index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/helpers/KeySetReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd.helpers;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Sets;
import io.etcd.jetcd.ByteSequence;
import io.etcd.jetcd.Client;
import io.etcd.jetcd.KeyValue;
import io.etcd.jetcd.kv.GetResponse;
import io.etcd.jetcd.options.GetOption;
import io.etcd.jetcd.options.WatchOption;
import io.etcd.jetcd.watch.WatchResponse;
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.metadata.etcd.EtcdWatchClient;
import org.apache.bookkeeper.metadata.etcd.EtcdWatcher;
import org.apache.bookkeeper.versioning.LongVersion;
import org.apache.bookkeeper.versioning.Versioned;
/**
* A helper class to read a set of keys and watch them.
*/
@Slf4j
public class KeySetReader<T> implements BiConsumer<WatchResponse, Throwable>, AutoCloseable {
private final Client client;
private final boolean ownWatchClient;
private final EtcdWatchClient watchClient;
private final Function<ByteSequence, T> encoder;
private final ByteSequence beginKey;
private final ByteSequence endKey;
private final Set<ByteSequence> keys;
private final CopyOnWriteArraySet<Consumer<Versioned<Set<T>>>> consumers =
new CopyOnWriteArraySet<>();
private volatile long revision = -1L;
private CompletableFuture<EtcdWatcher> watchFuture = null;
private CompletableFuture<Void> closeFuture = null;
public KeySetReader(Client client,
Function<ByteSequence, T> encoder,
ByteSequence beginKey,
ByteSequence endKey) {
this(client, new EtcdWatchClient(client), encoder, beginKey, endKey);
}
public KeySetReader(Client client,
EtcdWatchClient watchClient,
Function<ByteSequence, T> encoder,
ByteSequence beginKey,
ByteSequence endKey) {
this.client = client;
this.watchClient = watchClient;
this.ownWatchClient = false;
this.encoder = encoder;
this.beginKey = beginKey;
this.endKey = endKey;
this.keys = Collections.synchronizedSet(Sets.newHashSet());
}
public CompletableFuture<Versioned<Set<T>>> read() {
GetOption.Builder optionBuilder = GetOption.newBuilder()
.withKeysOnly(true);
if (null != endKey) {
optionBuilder.withRange(endKey);
}
return client.getKVClient().get(
beginKey,
optionBuilder.build()
).thenApply(getResp -> {
boolean updated = updateLocalValue(getResp);
Versioned<Set<T>> localValue = getLocalValue();
try {
return localValue;
} finally {
if (updated) {
notifyConsumers(localValue);
}
}
});
}
@VisibleForTesting
long getRevision() {
return revision;
}
private void notifyConsumers(Versioned<Set<T>> localValue) {
consumers.forEach(consumer -> consumer.accept(localValue));
}
private synchronized boolean updateLocalValue(GetResponse response) {
if (revision < response.getHeader().getRevision()) {
revision = response.getHeader().getRevision();
keys.clear();
for (KeyValue kv : response.getKvs()) {
ByteSequence key = kv.getKey();
keys.add(key);
}
return true;
} else {
return false;
}
}
private synchronized Versioned<Set<T>> processWatchResponse(WatchResponse response) {
if (null != closeFuture) {
return null;
}
if (log.isDebugEnabled()) {
log.debug("Received watch response : revision = {}, {} events = {}",
response.getHeader().getRevision(), response.getEvents().size(), response.getEvents());
}
if (response.getHeader().getRevision() <= revision) {
return null;
}
revision = response.getHeader().getRevision();
response.getEvents().forEach(event -> {
switch (event.getEventType()) {
case PUT:
keys.add(event.getKeyValue().getKey());
break;
case DELETE:
keys.remove(event.getKeyValue().getKey());
break;
default:
// ignore
break;
}
});
return getLocalValue();
}
@VisibleForTesting
synchronized Versioned<Set<T>> getLocalValue() {
return new Versioned<>(
keys.stream().map(encoder).collect(Collectors.toSet()),
new LongVersion(revision)
);
}
private CompletableFuture<Versioned<Set<T>>> getOrRead() {
boolean shouldRead = false;
synchronized (this) {
if (revision < 0L) {
// the value is never cached.
shouldRead = true;
}
}
if (shouldRead) {
return read();
} else {
return FutureUtils.value(getLocalValue());
}
}
@VisibleForTesting
synchronized boolean isWatcherSet() {
return null != watchFuture;
}
public CompletableFuture<Versioned<Set<T>>> readAndWatch(Consumer<Versioned<Set<T>>> consumer) {
if (!consumers.add(consumer) || isWatcherSet()) {
return getOrRead();
}
return getOrRead()
.thenCompose(versionedKeys -> {
long revision = ((LongVersion) versionedKeys.getVersion()).getLongVersion();
return watch(revision).thenApply(ignored -> versionedKeys);
});
}
public CompletableFuture<Void> unwatch(Consumer<Versioned<Set<T>>> consumer) {
if (consumers.remove(consumer) && consumers.isEmpty()) {
return closeOrRewatch(false);
} else {
return FutureUtils.Void();
}
}
private synchronized CompletableFuture<EtcdWatcher> watch(long revision) {
if (null != watchFuture) {
return watchFuture;
}
WatchOption.Builder optionBuilder = WatchOption.newBuilder()
.withRevision(revision);
if (null != endKey) {
optionBuilder.withRange(endKey);
}
watchFuture = watchClient.watch(beginKey, optionBuilder.build(), this);
return watchFuture.whenComplete((watcher, cause) -> {
if (null != cause) {
synchronized (KeySetReader.this) {
watchFuture = null;
}
}
});
}
private CompletableFuture<Void> closeOrRewatch(boolean rewatch) {
CompletableFuture<EtcdWatcher> oldWatcherFuture;
synchronized (this) {
oldWatcherFuture = watchFuture;
if (rewatch && null == closeFuture) {
watchFuture = watch(revision);
} else {
watchFuture = null;
}
}
if (null != oldWatcherFuture) {
return oldWatcherFuture.thenCompose(EtcdWatcher::closeAsync);
} else {
return FutureUtils.Void();
}
}
@Override
public void accept(WatchResponse watchResponse, Throwable throwable) {
if (null == throwable) {
Versioned<Set<T>> localValue = processWatchResponse(watchResponse);
if (null != localValue) {
notifyConsumers(localValue);
}
} else {
closeOrRewatch(true);
}
}
public CompletableFuture<Void> closeAsync() {
CompletableFuture<Void> future;
synchronized (this) {
if (null == closeFuture) {
closeFuture = closeOrRewatch(false).thenCompose(ignored -> {
if (ownWatchClient) {
return watchClient.closeAsync();
} else {
return FutureUtils.Void();
}
});
}
future = closeFuture;
}
return future;
}
@Override
public void close() {
try {
FutureUtils.result(closeAsync());
} catch (Exception e) {
log.warn("Encountered exceptions on closing key reader : {}", e.getMessage());
}
}
}
| 600 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/helpers/KeyStream.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd.helpers;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.primitives.UnsignedBytes;
import io.etcd.jetcd.ByteSequence;
import io.etcd.jetcd.KV;
import io.etcd.jetcd.KeyValue;
import io.etcd.jetcd.options.GetOption;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
/**
* Read a range of key/value pairs in a streaming way.
*/
@Slf4j
public class KeyStream<T> {
private final KV kvClient;
private final ByteSequence startKey;
private final ByteSequence endKey;
private final Function<ByteSequence, T> encoder;
private final int batchSize;
private ByteSequence nextKey;
private ByteSequence lastKey = null;
private boolean hasMore = true;
public KeyStream(KV kvClient,
ByteSequence startKey,
ByteSequence endKey,
Function<ByteSequence, T> encoder) {
this(kvClient, startKey, endKey, encoder, 100);
}
public KeyStream(KV kvClient,
ByteSequence startKey,
ByteSequence endKey,
Function<ByteSequence, T> encoder,
int batchSize) {
this.kvClient = kvClient;
this.startKey = startKey;
this.endKey = endKey;
this.nextKey = startKey;
this.encoder = encoder;
this.batchSize = batchSize;
}
public CompletableFuture<List<T>> readNext() {
ByteSequence beginKey;
int batchSize = this.batchSize;
synchronized (this) {
if (!hasMore) {
return FutureUtils.value(Collections.emptyList());
}
beginKey = nextKey;
if (null != lastKey) {
// read one more in since we are including last key.
batchSize += 1;
}
}
if (log.isTraceEnabled()) {
log.trace("Read keys between {} and {}", beginKey.toString(UTF_8), endKey.toString(UTF_8));
}
return kvClient.get(
beginKey,
GetOption.newBuilder()
.withRange(endKey)
.withKeysOnly(true)
.withLimit(batchSize)
.withSortField(GetOption.SortTarget.KEY)
.withSortOrder(GetOption.SortOrder.ASCEND)
.build()
).thenApply(getResp -> {
List<KeyValue> kvs = getResp.getKvs();
ByteSequence lkey;
synchronized (KeyStream.this) {
hasMore = getResp.isMore();
lkey = lastKey;
if (kvs.size() > 0) {
lastKey = nextKey = kvs.get(kvs.size() - 1).getKey();
}
}
if (null != lkey
&& kvs.size() > 0
&& UnsignedBytes.lexicographicalComparator().compare(
lkey.getBytes(),
kvs.get(0).getKey().getBytes()) == 0) {
kvs.remove(0);
}
return kvs.stream()
.map(kv -> encoder.apply(kv.getKey()))
.collect(Collectors.toList());
});
}
}
| 601 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/helpers/ValueStream.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd.helpers;
import com.google.common.annotations.VisibleForTesting;
import io.etcd.jetcd.ByteSequence;
import io.etcd.jetcd.Client;
import io.etcd.jetcd.common.exception.ClosedClientException;
import io.etcd.jetcd.kv.GetResponse;
import io.etcd.jetcd.options.WatchOption;
import io.etcd.jetcd.watch.WatchResponse;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.common.concurrent.FutureUtils;
import org.apache.bookkeeper.metadata.etcd.EtcdWatchClient;
import org.apache.bookkeeper.metadata.etcd.EtcdWatcher;
import org.apache.bookkeeper.versioning.LongVersion;
import org.apache.bookkeeper.versioning.Versioned;
/**
* A helper class to read the stream of values of a given key.
*/
@Slf4j
public class ValueStream<T> implements BiConsumer<WatchResponse, Throwable>, AutoCloseable {
private final Client client;
private final boolean ownWatchClient;
private final EtcdWatchClient watchClient;
private final Function<ByteSequence, T> encoder;
private final ByteSequence key;
private final Map<Consumer<Versioned<T>>, RevisionedConsumer<T>> consumers =
new HashMap<>();
private volatile T localValue = null;
private volatile long revision = -1L;
private CompletableFuture<EtcdWatcher> watchFuture = null;
private CompletableFuture<Void> closeFuture = null;
public ValueStream(Client client,
Function<ByteSequence, T> encoder,
ByteSequence key) {
this(client, new EtcdWatchClient(client), encoder, key);
}
public ValueStream(Client client,
EtcdWatchClient watchClient,
Function<ByteSequence, T> encoder,
ByteSequence key) {
this.client = client;
this.watchClient = watchClient;
this.ownWatchClient = false;
this.encoder = encoder;
this.key = key;
}
public CompletableFuture<Versioned<T>> read() {
return client.getKVClient().get(
key
).thenApply(getResp -> {
boolean updated = updateLocalValue(getResp);
Versioned<T> localValue = getLocalValue();
try {
return localValue;
} finally {
if (updated) {
notifyConsumers(localValue);
}
}
});
}
@VisibleForTesting
public int getNumConsumers() {
synchronized (consumers) {
return consumers.size();
}
}
private void notifyConsumers(Versioned<T> localValue) {
synchronized (consumers) {
consumers.values().forEach(consumer -> consumer.accept(localValue));
}
}
private synchronized boolean updateLocalValue(GetResponse response) {
if (revision < response.getHeader().getRevision()) {
revision = response.getHeader().getRevision();
if (response.getCount() > 0) {
localValue = encoder.apply(response.getKvs().get(0).getValue());
} else {
localValue = null;
}
return true;
} else {
return false;
}
}
private synchronized Versioned<T> processWatchResponse(WatchResponse response) {
if (null != closeFuture) {
return null;
}
if (log.isDebugEnabled()) {
log.debug("Received watch response : revision = {}, {} events = {}",
response.getHeader().getRevision(), response.getEvents().size(), response.getEvents());
}
if (response.getHeader().getRevision() <= revision) {
return null;
}
revision = response.getHeader().getRevision();
response.getEvents().forEach(event -> {
switch (event.getEventType()) {
case PUT:
this.localValue = encoder.apply(event.getKeyValue().getValue());
break;
case DELETE:
this.localValue = null;
break;
default:
// ignore
break;
}
});
return getLocalValue();
}
@VisibleForTesting
synchronized Versioned<T> getLocalValue() {
return new Versioned<>(
localValue,
new LongVersion(revision)
);
}
private CompletableFuture<Versioned<T>> getOrRead() {
boolean shouldRead = false;
synchronized (this) {
if (revision < 0L) {
// the value is never cached.
shouldRead = true;
}
}
if (shouldRead) {
return read();
} else {
return FutureUtils.value(getLocalValue());
}
}
@VisibleForTesting
synchronized boolean isWatcherSet() {
return null != watchFuture;
}
private synchronized CompletableFuture<EtcdWatcher> getWatchFuture() {
return this.watchFuture;
}
@VisibleForTesting
public CompletableFuture<EtcdWatcher> waitUntilWatched() {
CompletableFuture<EtcdWatcher> wf;
while ((wf = getWatchFuture()) == null) {
try {
TimeUnit.MILLISECONDS.sleep(100);
} catch (InterruptedException e) {
if (log.isDebugEnabled()) {
log.debug("Interrupted at waiting until the key is watched", e);
}
}
}
return wf;
}
public CompletableFuture<Versioned<T>> readAndWatch(Consumer<Versioned<T>> consumer) {
final RevisionedConsumer<T> revisionedConsumer = new RevisionedConsumer<>(consumer);
final boolean consumerExisted;
synchronized (consumers) {
consumerExisted = (null != consumers.put(consumer, revisionedConsumer));
}
if (consumerExisted) {
return getOrRead();
}
return getOrRead()
.thenCompose(versionedVal -> {
long revision = ((LongVersion) versionedVal.getVersion()).getLongVersion();
synchronized (this) {
notifyConsumers(versionedVal);
}
return watchIfNeeded(revision).thenApply(ignored -> versionedVal);
});
}
public CompletableFuture<Boolean> unwatch(Consumer<Versioned<T>> consumer) {
boolean lastConsumer;
synchronized (consumers) {
lastConsumer = (null != consumers.remove(consumer) && consumers.isEmpty());
}
if (lastConsumer) {
return closeOrRewatch(false).thenApply(ignored -> true);
} else {
return FutureUtils.value(false);
}
}
private synchronized CompletableFuture<EtcdWatcher> watchIfNeeded(long revision) {
if (null != watchFuture) {
return watchFuture;
}
watchFuture = watch(revision);
return watchFuture;
}
private CompletableFuture<EtcdWatcher> watch(long revision) {
WatchOption.Builder optionBuilder = WatchOption.newBuilder()
.withRevision(revision);
return watchClient.watch(key, optionBuilder.build(), this)
.whenComplete((watcher, cause) -> {
if (null != cause) {
synchronized (ValueStream.this) {
watchFuture = null;
}
}
});
}
private CompletableFuture<Void> closeOrRewatch(boolean rewatch) {
CompletableFuture<EtcdWatcher> oldWatcherFuture;
synchronized (this) {
oldWatcherFuture = watchFuture;
if (rewatch && null == closeFuture) {
watchFuture = watch(revision);
} else {
watchFuture = null;
}
}
if (null != oldWatcherFuture) {
return oldWatcherFuture.thenCompose(EtcdWatcher::closeAsync);
} else {
return FutureUtils.Void();
}
}
@Override
public void accept(WatchResponse watchResponse, Throwable throwable) {
if (null == throwable) {
if (log.isDebugEnabled()) {
log.debug("Received watch response : revision = {}, {} events = {}",
watchResponse.getHeader().getRevision(),
watchResponse.getEvents().size(),
watchResponse.getEvents());
}
synchronized (this) {
Versioned<T> localValue = processWatchResponse(watchResponse);
if (null != localValue) {
notifyConsumers(localValue);
}
}
} else {
// rewatch if it is not a `ClosedClientException`
closeOrRewatch(!(throwable instanceof ClosedClientException));
}
}
public CompletableFuture<Void> closeAsync() {
CompletableFuture<Void> future;
synchronized (this) {
if (null == closeFuture) {
closeFuture = closeOrRewatch(false).thenCompose(ignored -> {
if (ownWatchClient) {
return watchClient.closeAsync();
} else {
return FutureUtils.Void();
}
});
}
future = closeFuture;
}
return future;
}
@Override
public void close() {
try {
FutureUtils.result(closeAsync());
} catch (Exception e) {
log.warn("Encountered exceptions on closing key reader : {}", e.getMessage());
}
}
}
| 602 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/helpers/RevisionedConsumer.java | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.metadata.etcd.helpers;
import java.util.function.Consumer;
import org.apache.bookkeeper.versioning.Version;
import org.apache.bookkeeper.versioning.Version.Occurred;
import org.apache.bookkeeper.versioning.Versioned;
/**
* A consumer that cache last version.
*/
public class RevisionedConsumer<T> implements Consumer<Versioned<T>> {
protected final Consumer<Versioned<T>> consumer;
protected volatile Version localVersion = null;
protected RevisionedConsumer(Consumer<Versioned<T>> consumer) {
this.consumer = consumer;
}
@Override
public void accept(Versioned<T> versionedVal) {
synchronized (this) {
if (localVersion != null
&& Occurred.BEFORE != localVersion.compare(versionedVal.getVersion())) {
return;
}
localVersion = versionedVal.getVersion();
}
consumer.accept(versionedVal);
}
@Override
public int hashCode() {
return consumer.hashCode();
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof Consumer)) {
return false;
}
if (obj instanceof RevisionedConsumer) {
return consumer.equals(((RevisionedConsumer) obj).consumer);
} else {
return consumer.equals(obj);
}
}
@Override
public String toString() {
return consumer.toString();
}
}
| 603 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/org/apache/bookkeeper/metadata/etcd/helpers/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Etcd helper classes.
*/
package org.apache.bookkeeper.metadata.etcd.helpers; | 604 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/io/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/io/etcd/jetcd/EtcdClientUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.etcd.jetcd;
import java.lang.reflect.Field;
/**
* Utils to access fields in Etcd client.
*/
class EtcdClientUtils {
@SuppressWarnings("unchecked")
static <T> T getField(Object obj, String fieldName)
throws NoSuchFieldException, IllegalAccessException {
Class cls = obj.getClass();
Field field = cls.getDeclaredField(fieldName);
field.setAccessible(true);
return (T) field.get(obj);
}
}
| 605 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/io/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/io/etcd/jetcd/EtcdConnectionManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.etcd.jetcd;
import io.etcd.jetcd.api.WatchGrpc;
import lombok.extern.slf4j.Slf4j;
/**
* Keep a reference to etcd internal connection manager.
*/
@Slf4j
public class EtcdConnectionManager {
private final ClientImpl client;
private ClientConnectionManager connMgr;
public EtcdConnectionManager(Client client) {
this((ClientImpl) client);
}
EtcdConnectionManager(ClientImpl client) {
this.client = client;
try {
this.connMgr = EtcdClientUtils.getField(
client, "connectionManager"
);
} catch (NoSuchFieldException e) {
log.error("No `connectionManager` field found in etcd client", e);
throw new RuntimeException(
"No `connectionManager` field found in etcd client", e);
} catch (IllegalAccessException e) {
log.error("Illegal access to `connectionManager` field in etcd client", e);
throw new RuntimeException(
"Illegal access to `connectionManager` field in etcd client", e);
}
}
/**
* Create a watch api grpc stub.
*
* @return a watch api grpc stub.
*/
public WatchGrpc.WatchStub newWatchStub() {
return connMgr.newStub(WatchGrpc::newStub);
}
}
| 606 |
0 | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/io/etcd | Create_ds/bookkeeper/metadata-drivers/etcd/src/main/java/io/etcd/jetcd/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Etcd client changes for bookkeeper metadata driver.
*/
package io.etcd.jetcd; | 607 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/proto/ProtocolBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.proto;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.protobuf.ByteString;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.Unpooled;
import io.netty.util.ReferenceCountUtil;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.proto.BookieProtoEncoding.EnDecoder;
import org.apache.bookkeeper.proto.BookieProtoEncoding.RequestEnDeCoderPreV3;
import org.apache.bookkeeper.proto.BookieProtoEncoding.RequestEnDecoderV3;
import org.apache.bookkeeper.proto.BookkeeperProtocol.AddRequest;
import org.apache.bookkeeper.proto.BookkeeperProtocol.BKPacketHeader;
import org.apache.bookkeeper.proto.BookkeeperProtocol.OperationType;
import org.apache.bookkeeper.proto.BookkeeperProtocol.ProtocolVersion;
import org.apache.bookkeeper.proto.BookkeeperProtocol.Request;
import org.apache.bookkeeper.util.ByteBufList;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.slf4j.MDC;
/**
* Benchmarking serialization and deserialization.
*/
@BenchmarkMode({Mode.Throughput})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@State(Scope.Thread)
public class ProtocolBenchmark {
@Param({"10", "100", "1000", "10000"})
int size;
byte[] masterKey;
ByteBuf entry;
long ledgerId;
long entryId;
short flags;
EnDecoder reqEnDeV2;
EnDecoder reqEnDeV3;
@Setup
public void prepare() {
this.masterKey = "test-benchmark-key".getBytes(UTF_8);
byte[] data = new byte[this.size];
ThreadLocalRandom.current().nextBytes(data);
this.entry = Unpooled.wrappedBuffer(data);
this.ledgerId = ThreadLocalRandom.current().nextLong();
this.entryId = ThreadLocalRandom.current().nextLong();
this.flags = 1;
// prepare the encoder
this.reqEnDeV2 = new RequestEnDeCoderPreV3(null);
this.reqEnDeV3 = new RequestEnDecoderV3(null);
}
@Benchmark
public void testAddEntryV3() throws Exception {
// Build the request and calculate the total size to be included in the packet.
BKPacketHeader.Builder headerBuilder = BKPacketHeader.newBuilder()
.setVersion(ProtocolVersion.VERSION_THREE)
.setOperation(OperationType.ADD_ENTRY)
.setTxnId(0L);
ByteBuf toSend = entry.slice();
byte[] toSendArray = new byte[toSend.readableBytes()];
toSend.getBytes(toSend.readerIndex(), toSendArray);
AddRequest.Builder addBuilder = AddRequest.newBuilder()
.setLedgerId(ledgerId)
.setEntryId(entryId)
.setMasterKey(ByteString.copyFrom(masterKey))
.setBody(ByteString.copyFrom(toSendArray))
.setFlag(AddRequest.Flag.RECOVERY_ADD);
Request request = Request.newBuilder()
.setHeader(headerBuilder)
.setAddRequest(addBuilder)
.build();
Object res = this.reqEnDeV3.encode(request, ByteBufAllocator.DEFAULT);
ReferenceCountUtil.release(res);
}
@Benchmark
public void testAddEntryV3WithMdc() throws Exception {
MDC.put("parent_id", "LetsPutSomeLongParentRequestIdHere");
MDC.put("request_id", "LetsPutSomeLongRequestIdHere");
// Build the request and calculate the total size to be included in the packet.
BKPacketHeader.Builder headerBuilder = BKPacketHeader.newBuilder()
.setVersion(ProtocolVersion.VERSION_THREE)
.setOperation(OperationType.ADD_ENTRY)
.setTxnId(0L);
ByteBuf toSend = entry.slice();
byte[] toSendArray = new byte[toSend.readableBytes()];
toSend.getBytes(toSend.readerIndex(), toSendArray);
AddRequest.Builder addBuilder = AddRequest.newBuilder()
.setLedgerId(ledgerId)
.setEntryId(entryId)
.setMasterKey(ByteString.copyFrom(masterKey))
.setBody(ByteString.copyFrom(toSendArray))
.setFlag(AddRequest.Flag.RECOVERY_ADD);
Request request = PerChannelBookieClient.appendRequestContext(Request.newBuilder())
.setHeader(headerBuilder)
.setAddRequest(addBuilder)
.build();
Object res = this.reqEnDeV3.encode(request, ByteBufAllocator.DEFAULT);
ReferenceCountUtil.release(res);
MDC.clear();
}
static Request.Builder appendRequestContextNoMdc(Request.Builder builder) {
final BookkeeperProtocol.ContextPair context1 = BookkeeperProtocol.ContextPair.newBuilder()
.setKey("parent_id")
.setValue("LetsPutSomeLongParentRequestIdHere")
.build();
builder.addRequestContext(context1);
final BookkeeperProtocol.ContextPair context2 = BookkeeperProtocol.ContextPair.newBuilder()
.setKey("request_id")
.setValue("LetsPutSomeLongRequestIdHere")
.build();
builder.addRequestContext(context2);
return builder;
}
@Benchmark
public void testAddEntryV3WithExtraContextDataNoMdc() throws Exception {
// Build the request and calculate the total size to be included in the packet.
BKPacketHeader.Builder headerBuilder = BKPacketHeader.newBuilder()
.setVersion(ProtocolVersion.VERSION_THREE)
.setOperation(OperationType.ADD_ENTRY)
.setTxnId(0L);
ByteBuf toSend = entry.slice();
byte[] toSendArray = new byte[toSend.readableBytes()];
toSend.getBytes(toSend.readerIndex(), toSendArray);
AddRequest.Builder addBuilder = AddRequest.newBuilder()
.setLedgerId(ledgerId)
.setEntryId(entryId)
.setMasterKey(ByteString.copyFrom(masterKey))
.setBody(ByteString.copyFrom(toSendArray))
.setFlag(AddRequest.Flag.RECOVERY_ADD);
Request request = appendRequestContextNoMdc(Request.newBuilder())
.setHeader(headerBuilder)
.setAddRequest(addBuilder)
.build();
Object res = this.reqEnDeV3.encode(request, ByteBufAllocator.DEFAULT);
ReferenceCountUtil.release(res);
}
}
| 608 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/proto/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Protocol implementations benchmarks.
*/
package org.apache.bookkeeper.proto; | 609 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/proto | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/proto/checksum/DigestTypeBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.proto.checksum;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.buffer.Unpooled;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.proto.DataFormats.LedgerMetadataFormat.DigestType;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Warmup;
/**
* Microbenchmarks for different digest type
* getting started:
* 1. http://tutorials.jenkov.com/java-performance/jmh.html
* 2. http://hg.openjdk.java.net/code-tools/jmh/file/tip/jmh-samples/src/main/java/org/openjdk/jmh/samples/
* 3. google
* To run:
* build project from command line.
* execute ./run.sh
*/
public class DigestTypeBenchmark {
/**
* BufferType.
*/
public enum BufferType {
ARRAY_BACKED,
NOT_ARRAY_BACKED,
BYTE_BUF_DEFAULT_ALLOC
}
/**
* Digest.
*/
public enum Digest {
MAC,
CRC32,
CRC32_C,
}
static byte[] randomBytes(int sz) {
byte[] b = new byte[sz];
ThreadLocalRandom.current().nextBytes(b);
return b;
}
/**
* MyState.
*/
@State(Scope.Thread)
public static class MyState {
@Param
public BufferType bufferType;
@Param
public Digest digest;
@Param({"64", "1024", "4086", "8192", "16384", "65536"})
public int entrySize;
private DigestManager crc32;
private DigestManager crc32c;
private DigestManager mac;
private ByteBuf arrayBackedBuffer;
private CompositeByteBuf notArrayBackedBuffer;
private ByteBuf byteBufDefaultAlloc;
public ByteBuf digestBuf;
@Setup(Level.Trial)
public void doSetup() throws Exception {
final byte[] password = "password".getBytes(StandardCharsets.UTF_8);
crc32 = DigestManager.instantiate(ThreadLocalRandom.current().nextLong(0, Long.MAX_VALUE),
password, DigestType.CRC32, PooledByteBufAllocator.DEFAULT, true);
crc32c = DigestManager.instantiate(ThreadLocalRandom.current().nextLong(0, Long.MAX_VALUE),
password, DigestType.CRC32C, PooledByteBufAllocator.DEFAULT, true);
mac = DigestManager.instantiate(ThreadLocalRandom.current().nextLong(0, Long.MAX_VALUE),
password, DigestType.HMAC, PooledByteBufAllocator.DEFAULT, true);
digestBuf = Unpooled.buffer(getDigestManager(digest).getMacCodeLength());
arrayBackedBuffer = Unpooled.wrappedBuffer(randomBytes(entrySize));
final int headerSize = 32 + getDigestManager(digest).getMacCodeLength();
notArrayBackedBuffer = new CompositeByteBuf(ByteBufAllocator.DEFAULT, true, 2);
notArrayBackedBuffer.addComponent(Unpooled.wrappedBuffer(randomBytes(headerSize)));
notArrayBackedBuffer.addComponent(Unpooled.wrappedBuffer((randomBytes(entrySize - headerSize))));
byteBufDefaultAlloc = ByteBufAllocator.DEFAULT.buffer(entrySize, entrySize);
byteBufDefaultAlloc.writeBytes(randomBytes(entrySize));
if (!arrayBackedBuffer.hasArray() || notArrayBackedBuffer.hasArray()) {
throw new IllegalStateException("buffers in invalid state");
}
}
@TearDown(Level.Trial)
public void doTearDown() {
}
public ByteBuf getByteBuff(BufferType bType) {
switch (bType) {
case ARRAY_BACKED:
return arrayBackedBuffer;
case NOT_ARRAY_BACKED:
return notArrayBackedBuffer;
case BYTE_BUF_DEFAULT_ALLOC:
return byteBufDefaultAlloc;
default:
throw new IllegalArgumentException("unknown buffer type " + bType);
}
}
public DigestManager getDigestManager(Digest digest) {
switch (digest) {
case CRC32:
return crc32;
case CRC32_C:
return crc32c;
case MAC:
return mac;
default:
throw new IllegalArgumentException("unknown digest " + digest);
}
}
}
@Benchmark
@BenchmarkMode(Mode.Throughput)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Warmup(iterations = 2, time = 3, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 2, time = 10, timeUnit = TimeUnit.SECONDS)
@Threads(2)
@Fork(value = 1, warmups = 1)
public void digestManager(MyState state) {
final ByteBuf buff = state.getByteBuff(state.bufferType);
final DigestManager dm = state.getDigestManager(state.digest);
int digest = dm.update(0, buff, 0, buff.readableBytes());
state.digestBuf.clear();
dm.populateValueAndReset(digest, state.digestBuf);
}
}
| 610 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/proto | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/proto/checksum/DigestManagerBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.proto.checksum;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.PooledByteBufAllocator;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.proto.DataFormats.LedgerMetadataFormat.DigestType;
import org.apache.bookkeeper.util.ByteBufList;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Warmup;
/**
* Microbenchmarks for different digest type
* getting started:
* 1. http://tutorials.jenkov.com/java-performance/jmh.html
* 2. http://hg.openjdk.java.net/code-tools/jmh/file/tip/jmh-samples/src/main/java/org/openjdk/jmh/samples/
* 3. google
* To run:
* build project from command line.
* execute ./run.sh
*/
public class DigestManagerBenchmark {
static byte[] randomBytes(int sz) {
byte[] b = new byte[sz];
ThreadLocalRandom.current().nextBytes(b);
return b;
}
/**
* MyState.
*/
@State(Scope.Thread)
public static class MyState {
@Param({"64", "1024", "4086", "8192"})
public int entrySize;
private DigestManager dm;
public ByteBuf digestBuf;
@Setup(Level.Trial)
public void doSetup() throws Exception {
final byte[] password = "password".getBytes(StandardCharsets.UTF_8);
dm = DigestManager.instantiate(ThreadLocalRandom.current().nextLong(0, Long.MAX_VALUE),
password, DigestType.CRC32C, PooledByteBufAllocator.DEFAULT, true);
ByteBuf data = ByteBufAllocator.DEFAULT.directBuffer(entrySize, entrySize);
data.writeBytes(randomBytes(entrySize));
digestBuf = ByteBufAllocator.DEFAULT.directBuffer();
digestBuf.writeBytes((ByteBuf)
dm.computeDigestAndPackageForSending(1234, 1234, entrySize, data,
new byte[0], 0));
}
}
@Benchmark
@BenchmarkMode(Mode.Throughput)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@Warmup(iterations = 2, time = 3, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 3, time = 10, timeUnit = TimeUnit.SECONDS)
@Threads(2)
@Fork(1)
public void verifyDigest(MyState state) throws Exception {
state.digestBuf.readerIndex(0);
state.dm.verifyDigestAndReturnData(1234, state.digestBuf);
}
}
| 611 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/proto | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/proto/checksum/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Utilities for checksum functions.
*/
package org.apache.bookkeeper.proto.checksum; | 612 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/common/OrderedExecutorBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.common;
import com.google.common.collect.ImmutableMap;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import org.apache.bookkeeper.common.util.OrderedExecutor;
import org.apache.bookkeeper.common.util.OrderedScheduler;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Warmup;
/**
* Microbenchmarks for different executors providers.
*/
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Threads(16)
@Fork(1)
@Warmup(iterations = 1, time = 10, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 3, time = 10, timeUnit = TimeUnit.SECONDS)
public class OrderedExecutorBenchmark {
private static Map<String, Supplier<ExecutorService>> providers = ImmutableMap.of(
"JDK-ThreadPool", () -> Executors.newFixedThreadPool(1),
"OrderedExecutor", () -> OrderedExecutor.newBuilder().numThreads(1).build(),
"OrderedScheduler", () -> OrderedScheduler.newSchedulerBuilder().numThreads(1).build());
/**
* State holder of the test.
*/
@State(Scope.Benchmark)
public static class TestState {
@Param({"JDK-ThreadPool", "OrderedExecutor", "OrderedScheduler"})
private String executorName;
private ExecutorService executor;
@Setup(Level.Trial)
public void setup() {
executor = providers.get(executorName).get();
}
@TearDown(Level.Trial)
public void teardown() {
executor.shutdown();
}
}
@Benchmark
public void submitAndWait(TestState s) throws Exception {
s.executor.submit(() -> {
}).get();
}
}
| 613 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/common/MpScQueueBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.common;
import java.util.ArrayList;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import lombok.SneakyThrows;
import org.apache.bookkeeper.common.collections.BatchedArrayBlockingQueue;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OperationsPerInvocation;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Warmup;
/**
* Microbenchmarks for different executors providers.
*/
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@BenchmarkMode(Mode.Throughput)
@Threads(16)
@Fork(1)
@Warmup(iterations = 1, time = 10, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 3, time = 10, timeUnit = TimeUnit.SECONDS)
public class MpScQueueBenchmark {
private static final int QUEUE_SIZE = 100_000;
/**
* State holder of the test.
*/
@State(Scope.Benchmark)
public static class TestState {
private ArrayBlockingQueue arrayBlockingQueue = new ArrayBlockingQueue<>(QUEUE_SIZE);
private BatchedArrayBlockingQueue batchedArrayBlockingQueue = new BatchedArrayBlockingQueue<>(QUEUE_SIZE);
private final Integer[] batchArray = new Integer[1000];
private final ExecutorService executor = Executors.newCachedThreadPool();
@Setup(Level.Trial)
public void setup() {
for (int i = 0; i < 1000; i++) {
batchArray[i] = i;
}
executor.execute(this::consumeABQ);
executor.execute(this::consumeBAABQ);
}
@SneakyThrows
private void consumeABQ() {
ArrayList<Integer> localList = new ArrayList<>();
try {
while (true) {
arrayBlockingQueue.drainTo(localList);
if (localList.isEmpty()) {
arrayBlockingQueue.take();
}
localList.clear();
}
} catch (InterruptedException ie) {
}
}
@SneakyThrows
private void consumeBAABQ() {
Integer[] localArray = new Integer[20_000];
try {
while (true) {
batchedArrayBlockingQueue.takeAll(localArray);
}
} catch (InterruptedException ie) {
}
}
@TearDown(Level.Trial)
public void teardown() {
executor.shutdownNow();
}
@TearDown(Level.Iteration)
public void cleanupQueue() throws InterruptedException{
Thread.sleep(1_000);
}
}
@Benchmark
public void arrayBlockingQueue(TestState s) throws Exception {
s.arrayBlockingQueue.put(1);
}
@Benchmark
public void batchAwareArrayBlockingQueueSingleEnqueue(TestState s) throws Exception {
s.batchedArrayBlockingQueue.put(1);
}
@Benchmark
@OperationsPerInvocation(1000)
public void batchAwareArrayBlockingQueueBatch(TestState s) throws Exception {
s.batchedArrayBlockingQueue.putAll(s.batchArray, 0, 1000);
}
}
| 614 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/common/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Generic benchmarks.
*/
package org.apache.bookkeeper.common; | 615 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/stats/StatsLoggerBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.stats;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import org.apache.bookkeeper.stats.codahale.CodahaleMetricsProvider;
import org.apache.bookkeeper.stats.codahale.FastCodahaleMetricsProvider;
import org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Warmup;
/**
* Microbenchmarks for different stats backend providers.
*/
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@Threads(16)
@Fork(1)
@Warmup(iterations = 1, time = 10, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 3, time = 10, timeUnit = TimeUnit.SECONDS)
public class StatsLoggerBenchmark {
private static Map<String, Supplier<StatsProvider>> providers = new HashMap<>();
static {
providers.put("Prometheus", PrometheusMetricsProvider::new);
providers.put("Codahale", CodahaleMetricsProvider::new);
providers.put("FastCodahale", FastCodahaleMetricsProvider::new);
}
/**
* State holder of the logger.
*/
@State(Scope.Benchmark)
public static class LoggerState {
@Param({"Prometheus", "Codahale", "FastCodahale", "Twitter", "Ostrich"})
private String statsProvider;
private Counter counter;
private OpStatsLogger opStats;
private long startTime = System.nanoTime();
@Setup(Level.Trial)
public void setup() {
StatsProvider provider = providers.get(statsProvider).get();
StatsLogger logger = provider.getStatsLogger("test");
counter = logger.getCounter("counter");
opStats = logger.getOpStatsLogger("opstats");
}
}
@Benchmark
public void counterIncrement(LoggerState s) {
s.counter.inc();
}
@Benchmark
public void recordLatency(LoggerState s) {
s.opStats.registerSuccessfulValue(System.nanoTime() - s.startTime);
}
}
| 616 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/stats/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Stats implementations benchmarks.
*/
package org.apache.bookkeeper.stats; | 617 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/stats/codahale/TimerBenchmark.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.codahale;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Warmup;
/**
* Microbenchmarks for default and improved (fast) Codahale timers.
* To run:
* build project from command line (mvn clean install).
* execute ./run.sh
* Specify argument "TimerBenchmark" to only run this benchmark.
*/
public class TimerBenchmark {
/**
* Type of Timer.
*/
public enum TimerType {
CodahaleTimer,
FastTimer
}
/**
* Thread-local state.
*/
@State(Scope.Thread)
public static class MyState {
@Param
public TimerType timerType;
@Param({"1", "10", "100"})
public int timersPerThread;
private static OpStatsLogger[] timers;
private int timerIdx = 0;
private static long[] times;
private int timeIdx = 0;
@Setup(Level.Trial)
@SuppressFBWarnings("SSD_DO_NOT_USE_INSTANCE_LOCK_ON_SHARED_STATIC_DATA")
public void doSetup() throws Exception {
StatsLogger logger = null;
switch (timerType) {
case CodahaleTimer:
logger = new CodahaleMetricsProvider().getStatsLogger("test");
break;
case FastTimer:
logger = new FastCodahaleMetricsProvider().getStatsLogger("test");
break;
}
synchronized (MyState.class) {
// timers (and response times) are shared across threads to test
// concurrency of timer updates.
if (timers == null) {
timers = new OpStatsLogger[timersPerThread];
for (int i = 0; i < timersPerThread; i++) {
timers[i] = logger.getOpStatsLogger("test-timer-" + i);
}
// just a bunch of random response times to not always hit the same bucket
times = new long[1000];
for (int i = 0; i < times.length; i++) {
times[i] = Math.abs(ThreadLocalRandom.current().nextLong() % 1000);
}
}
}
}
public OpStatsLogger getTimer() {
return timers[(timerIdx++) % timers.length];
}
public long getTime() {
return times[(timeIdx++) % times.length];
}
public boolean isGetSnapshot() {
// create a snapshot every 109 operations (typically snapshot creations will be much more infrequent)
// 109 is prime, guaranteeing that we will create snapshots across all timers
if (timeIdx % 109 == 0) {
timeIdx++;
return true;
} else {
return false;
}
}
}
/**
* Tests the performance of (concurrent) timer updates.
* Note that test duration must exceed TIME_WINDOW (default: 60) to trigger
* FastTimer's housekeeping. Manual tests show little performance difference
* for longer running tests (since housekeeping is infrequent and cheap), so
* we keep the test duration low to not have tests run for too long.
* @param state
*/
@Benchmark
@BenchmarkMode(Mode.Throughput)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Warmup(iterations = 2, time = 3, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 2, time = 10, timeUnit = TimeUnit.SECONDS)
@Threads(4)
@Fork(value = 1, warmups = 1)
public void timerTest(MyState state) {
state.getTimer().registerSuccessfulEvent(state.getTime(), TimeUnit.MILLISECONDS);
}
/**
* Tests the performance of (concurrent) timer updates with
* the creation of snapshots. We expect snapshot creation to
* be infrequent (e.g. once every N seconds), while timer updates
* are frequent (for many timers hundreds or thousands of times
* per second). Here we're testing the creation of snapshots at
* a rate much higher than we would expect in real life.
* @param state
*/
@Benchmark
@BenchmarkMode(Mode.Throughput)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Warmup(iterations = 2, time = 3, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 2, time = 10, timeUnit = TimeUnit.SECONDS)
@Threads(4)
@Fork(value = 1, warmups = 1)
public void timerTestWithSnapshots(MyState state) {
OpStatsLogger timer = state.getTimer();
if (state.isGetSnapshot()) {
timer.toOpStatsData();
} else {
timer.registerSuccessfulEvent(state.getTime(), TimeUnit.MILLISECONDS);
}
}
/**
* Test routing for manual testing of memory footprint of default Codahale Timer vs. improved FastTimer.
* JMH can't do that, so we have a small stand-alone test routine here.
* Run with:
* <code>
* java -Xmx1g -cp target/benchmarks.jar \
* org.apache.bookkeeper.stats.codahale.TimerBenchmark <codahale|fast>
* </code>
* @param args
*/
public static void main(String[] args) {
if (args.length != 1
|| (!args[0].equalsIgnoreCase("codahale") && !args[0].equalsIgnoreCase("fast"))) {
System.out.println("usage: " + TimerBenchmark.class.getCanonicalName() + " <codahale|fast>");
System.exit(1);
}
StatsLogger logger = null;
if (args[0].equalsIgnoreCase("codahale")) {
logger = new CodahaleMetricsProvider().getStatsLogger("test");
} else {
logger = new FastCodahaleMetricsProvider().getStatsLogger("test");
}
System.out.println("Using " + logger.getClass().getCanonicalName());
System.out.println("Creating 1000 OpStatsLoggers (2000 Timers) and updating each of them 1000 times ...");
OpStatsLogger[] timers = new OpStatsLogger[1000];
for (int i = 0; i < timers.length; i++) {
timers[i] = logger.getOpStatsLogger("test-timer-" + i);
}
long[] times = new long[199]; // 199 is prime, so each timer will get each time
for (int i = 0; i < times.length; i++) {
times[i] = Math.abs(ThreadLocalRandom.current().nextLong() % 1000);
}
for (int i = 0; i < 1000 * timers.length; i++) {
timers[i % timers.length].registerSuccessfulEvent(times[i % times.length], TimeUnit.MILLISECONDS);
timers[i % timers.length].registerFailedEvent(times[i % times.length], TimeUnit.MILLISECONDS);
}
times = null; // let it become garbage
System.out.println("Done.");
System.out.println("Now run 'jmap -histo:live <pid>' on this JVM to get a heap histogram, then kill this JVM.");
while (true) {
try {
TimeUnit.MILLISECONDS.sleep(1000);
} catch (Exception e) {
// ignore
}
}
}
}
| 618 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/stats/codahale/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Stats benchmark runner.
*/
package org.apache.bookkeeper.stats.codahale; | 619 |
0 | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/microbenchmarks/src/main/java/org/apache/bookkeeper/bookie/GroupSortBenchmark.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.bookie;
import java.util.Arrays;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.bookie.storage.ldb.ArrayGroupSort;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
@OutputTimeUnit(TimeUnit.SECONDS)
@Fork(1)
@Warmup(iterations = 1, time = 10)
@Measurement(iterations = 3, time = 10)
public class GroupSortBenchmark {
private static final int N = 10_000;
@State(Scope.Benchmark)
public static class TestState {
private final long[] randomItems = new long[N * 4];
private final long[] sortedItems;
private final long[] reverseSortedItems = new long[N * 4];
private final long[] groupSortedItems;
private final long[] reverseGroupSortedItems = new long[N * 4];
private long[] items;
public TestState() {
Random r = ThreadLocalRandom.current();
for (int i = 0; i < (N * 4); i++) {
randomItems[i] = r.nextLong();
}
groupSortedItems = Arrays.copyOf(randomItems, randomItems.length);
ArrayGroupSort.sort(groupSortedItems);
for (int i = 0; i < (N * 4); i += 4) {
reverseGroupSortedItems[i] = groupSortedItems[(N - 1) * 4 - i];
reverseGroupSortedItems[i + 1] = groupSortedItems[(N - 1) * 4 - i + 1];
reverseGroupSortedItems[i + 2] = groupSortedItems[(N - 1) * 4 - i + 2];
reverseGroupSortedItems[i + 3] = groupSortedItems[(N - 1) * 4 - i + 3];
}
sortedItems = Arrays.copyOf(randomItems, randomItems.length);
Arrays.sort(sortedItems);
for (int i = 0; i < (N * 4); i++) {
reverseSortedItems[i] = sortedItems[N * 4 - 1 - i];
}
}
@Setup(Level.Invocation)
public void setupInvocation() {
items = Arrays.copyOf(randomItems, randomItems.length);
}
}
@Benchmark
public void randomGroupSort(GroupSortBenchmark.TestState s) {
ArrayGroupSort.sort(s.items);
}
@Benchmark
public void randomArraySort(GroupSortBenchmark.TestState s) {
Arrays.sort(s.items);
}
@Benchmark
public void preSortedGroupSort(GroupSortBenchmark.TestState s) {
ArrayGroupSort.sort(s.groupSortedItems);
}
@Benchmark
public void preSortedArraySort(GroupSortBenchmark.TestState s) {
Arrays.sort(s.sortedItems);
}
@Benchmark
public void reverseSortedGroupSort(GroupSortBenchmark.TestState s) {
ArrayGroupSort.sort(s.reverseGroupSortedItems);
}
@Benchmark
public void reverseSortedArraySort(GroupSortBenchmark.TestState s) {
Arrays.sort(s.reverseSortedItems);
}
}
| 620 |
0 | Create_ds/bookkeeper/bookkeeper-common-allocator/src/test/java/org/apache/bookkeeper/common/allocator | Create_ds/bookkeeper/bookkeeper-common-allocator/src/test/java/org/apache/bookkeeper/common/allocator/impl/ByteBufAllocatorBuilderTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.common.allocator.impl;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.buffer.UnpooledByteBufAllocator;
import io.netty.util.ReferenceCountUtil;
import java.lang.reflect.Constructor;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.bookkeeper.common.allocator.ByteBufAllocatorBuilder;
import org.apache.bookkeeper.common.allocator.OutOfMemoryPolicy;
import org.apache.bookkeeper.common.allocator.PoolingPolicy;
import org.junit.Test;
/**
* Tests for {@link ByteBufAllocatorBuilderImpl}.
*/
public class ByteBufAllocatorBuilderTest {
private static final OutOfMemoryError outOfDirectMemException;
static {
try {
Class<?> clazz = (Class<?>) ByteBufAllocatorBuilderTest.class.getClassLoader()
.loadClass("io.netty.util.internal.OutOfDirectMemoryError");
@SuppressWarnings("unchecked")
Constructor<OutOfMemoryError> constructor = (Constructor<OutOfMemoryError>) clazz
.getDeclaredConstructor(String.class);
constructor.setAccessible(true);
outOfDirectMemException = constructor.newInstance("no mem");
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@Test
public void testOomWithException() {
ByteBufAllocator baseAlloc = mock(ByteBufAllocator.class);
when(baseAlloc.directBuffer(anyInt(), anyInt())).thenThrow(outOfDirectMemException);
AtomicReference<OutOfMemoryError> receivedException = new AtomicReference<>();
ByteBufAllocator alloc = ByteBufAllocatorBuilder.create()
.pooledAllocator(baseAlloc)
.outOfMemoryPolicy(OutOfMemoryPolicy.ThrowException)
.outOfMemoryListener((e) -> {
receivedException.set(e);
})
.build();
try {
alloc.buffer();
fail("Should have thrown exception");
} catch (OutOfMemoryError e) {
// Expected
assertEquals(outOfDirectMemException, e);
}
// Ensure the notification was triggered even when exception is thrown
assertEquals(outOfDirectMemException, receivedException.get());
}
@Test
public void testOomWithFallback() {
ByteBufAllocator baseAlloc = mock(ByteBufAllocator.class);
when(baseAlloc.directBuffer(anyInt(), anyInt())).thenThrow(outOfDirectMemException);
AtomicReference<OutOfMemoryError> receivedException = new AtomicReference<>();
ByteBufAllocator alloc = ByteBufAllocatorBuilder.create()
.pooledAllocator(baseAlloc)
.unpooledAllocator(UnpooledByteBufAllocator.DEFAULT)
.outOfMemoryPolicy(OutOfMemoryPolicy.FallbackToHeap)
.outOfMemoryListener((e) -> {
receivedException.set(e);
})
.build();
// Should not throw exception
ByteBuf buf = alloc.buffer();
assertEquals(UnpooledByteBufAllocator.DEFAULT, buf.alloc());
// No notification should have been triggered
assertEquals(null, receivedException.get());
}
@Test
public void testOomWithFallbackAndNoMoreHeap() {
ByteBufAllocator baseAlloc = mock(ByteBufAllocator.class);
when(baseAlloc.directBuffer(anyInt(), anyInt())).thenThrow(outOfDirectMemException);
ByteBufAllocator heapAlloc = mock(ByteBufAllocator.class);
OutOfMemoryError noHeapError = new OutOfMemoryError("no more heap");
when(heapAlloc.heapBuffer(anyInt(), anyInt())).thenThrow(noHeapError);
AtomicReference<OutOfMemoryError> receivedException = new AtomicReference<>();
ByteBufAllocator alloc = ByteBufAllocatorBuilder.create()
.pooledAllocator(baseAlloc)
.unpooledAllocator(heapAlloc)
.outOfMemoryPolicy(OutOfMemoryPolicy.FallbackToHeap)
.outOfMemoryListener((e) -> {
receivedException.set(e);
})
.build();
try {
alloc.buffer();
fail("Should have thrown exception");
} catch (OutOfMemoryError e) {
// Expected
assertEquals(noHeapError, e);
}
// Ensure the notification was triggered even when exception is thrown
assertEquals(noHeapError, receivedException.get());
}
@Test
public void testOomUnpooledDirect() {
ByteBufAllocator heapAlloc = mock(ByteBufAllocator.class);
OutOfMemoryError noMemError = new OutOfMemoryError("no more direct mem");
when(heapAlloc.directBuffer(anyInt(), anyInt())).thenThrow(noMemError);
AtomicReference<OutOfMemoryError> receivedException = new AtomicReference<>();
ByteBufAllocator alloc = ByteBufAllocatorBuilder.create()
.poolingPolicy(PoolingPolicy.UnpooledHeap)
.unpooledAllocator(heapAlloc)
.outOfMemoryPolicy(OutOfMemoryPolicy.FallbackToHeap)
.outOfMemoryListener((e) -> {
receivedException.set(e);
})
.build();
try {
alloc.directBuffer();
fail("Should have thrown exception");
} catch (OutOfMemoryError e) {
// Expected
assertEquals(noMemError, e);
}
// Ensure the notification was triggered even when exception is thrown
assertEquals(noMemError, receivedException.get());
}
@Test
public void testOomUnpooledWithHeap() {
ByteBufAllocator heapAlloc = mock(ByteBufAllocator.class);
OutOfMemoryError noHeapError = new OutOfMemoryError("no more heap");
when(heapAlloc.heapBuffer(anyInt(), anyInt())).thenThrow(noHeapError);
AtomicReference<OutOfMemoryError> receivedException = new AtomicReference<>();
ByteBufAllocator alloc = ByteBufAllocatorBuilder.create()
.poolingPolicy(PoolingPolicy.UnpooledHeap)
.unpooledAllocator(heapAlloc)
.outOfMemoryPolicy(OutOfMemoryPolicy.FallbackToHeap)
.outOfMemoryListener((e) -> {
receivedException.set(e);
})
.build();
try {
alloc.heapBuffer();
fail("Should have thrown exception");
} catch (OutOfMemoryError e) {
// Expected
assertEquals(noHeapError, e);
}
// Ensure the notification was triggered even when exception is thrown
assertEquals(noHeapError, receivedException.get());
}
@Test
public void testUnpooled() {
ByteBufAllocator alloc = ByteBufAllocatorBuilder.create()
.poolingPolicy(PoolingPolicy.UnpooledHeap)
.build();
ByteBuf buf = alloc.buffer();
assertEquals(UnpooledByteBufAllocator.DEFAULT, buf.alloc());
assertTrue(buf.hasArray());
ByteBuf buf2 = alloc.directBuffer();
assertEquals(UnpooledByteBufAllocator.DEFAULT, buf2.alloc());
assertFalse(buf2.hasArray());
}
@Test
public void testPooled() {
PooledByteBufAllocator pooledAlloc = new PooledByteBufAllocator(true);
ByteBufAllocator alloc = ByteBufAllocatorBuilder.create()
.poolingPolicy(PoolingPolicy.PooledDirect)
.pooledAllocator(pooledAlloc)
.build();
assertTrue(alloc.isDirectBufferPooled());
ByteBuf buf1 = alloc.buffer();
assertEquals(pooledAlloc, buf1.alloc());
assertFalse(buf1.hasArray());
ReferenceCountUtil.release(buf1);
ByteBuf buf2 = alloc.directBuffer();
assertEquals(pooledAlloc, buf2.alloc());
assertFalse(buf2.hasArray());
ReferenceCountUtil.release(buf2);
ByteBuf buf3 = alloc.heapBuffer();
assertEquals(pooledAlloc, buf3.alloc());
assertTrue(buf3.hasArray());
ReferenceCountUtil.release(buf3);
}
@Test
public void testPooledWithDefaultAllocator() {
ByteBufAllocator alloc = ByteBufAllocatorBuilder.create()
.poolingPolicy(PoolingPolicy.PooledDirect)
.poolingConcurrency(3)
.build();
assertTrue(alloc.isDirectBufferPooled());
ByteBuf buf1 = alloc.buffer();
assertEquals(PooledByteBufAllocator.class, buf1.alloc().getClass());
assertEquals(3, ((PooledByteBufAllocator) buf1.alloc()).metric().numDirectArenas());
assertFalse(buf1.hasArray());
ReferenceCountUtil.release(buf1);
ByteBuf buf2 = alloc.directBuffer();
assertFalse(buf2.hasArray());
ReferenceCountUtil.release(buf2);
ByteBuf buf3 = alloc.heapBuffer();
assertTrue(buf3.hasArray());
ReferenceCountUtil.release(buf3);
}
}
| 621 |
0 | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common/allocator/LeakDetectionPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.common.allocator;
import lombok.extern.slf4j.Slf4j;
/**
* Define the policy for the Netty leak detector.
*/
@Slf4j
public enum LeakDetectionPolicy {
/**
* No leak detection and no overhead.
*/
Disabled,
/**
* Instruments 1% of the allocated buffer to track for leaks.
*/
Simple,
/**
* Instruments 1% of the allocated buffer to track for leaks, reporting
* stack traces of places where the buffer was used.
*/
Advanced,
/**
* Instruments 100% of the allocated buffer to track for leaks, reporting
* stack traces of places where the buffer was used. Introduce very
* significant overhead.
*/
Paranoid;
public static LeakDetectionPolicy parseLevel(String levelStr) {
String trimmedLevelStr = levelStr.trim();
for (LeakDetectionPolicy policy : values()) {
if (trimmedLevelStr.equalsIgnoreCase(policy.name())) {
return policy;
}
}
log.warn("Parse leak detection policy level {} failed. Use the default level: {}", levelStr,
LeakDetectionPolicy.Disabled.name());
return LeakDetectionPolicy.Disabled;
}
}
| 622 |
0 | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common/allocator/OutOfMemoryPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.common.allocator;
/**
* Represents the action to take when it's not possible to allocate memory.
*/
public enum OutOfMemoryPolicy {
/**
* Throw regular OOM exception without taking addition actions.
*/
ThrowException,
/**
* If it's not possible to allocate a buffer from direct memory, fallback to
* allocate an unpooled buffer from JVM heap.
*
* <p>This will help absorb memory allocation spikes because the heap
* allocations will naturally slow down the process and will result if full
* GC cleanup if the Heap itself is full.
*/
FallbackToHeap,
}
| 623 |
0 | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common/allocator/ByteBufAllocatorWithOomHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.common.allocator;
import io.netty.buffer.ByteBufAllocator;
import java.util.function.Consumer;
/**
* A ByteBufAllocatr interface with a OOM handler.
*/
public interface ByteBufAllocatorWithOomHandler extends ByteBufAllocator {
void setOomHandler(Consumer<OutOfMemoryError> handler);
}
| 624 |
0 | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common/allocator/ByteBufAllocatorBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.common.allocator;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.buffer.UnpooledByteBufAllocator;
import java.util.function.Consumer;
import org.apache.bookkeeper.common.allocator.impl.ByteBufAllocatorBuilderImpl;
/**
* Builder object to customize a ByteBuf allocator.
*/
public interface ByteBufAllocatorBuilder {
/**
* Creates a new {@link ByteBufAllocatorBuilder}.
*/
static ByteBufAllocatorBuilder create() {
return new ByteBufAllocatorBuilderImpl();
}
/**
* Finalize the configured {@link ByteBufAllocator}.
*/
ByteBufAllocatorWithOomHandler build();
/**
* Specify a custom allocator where the allocation requests should be
* forwarded to.
*
* <p>Default is to use a new instance of {@link PooledByteBufAllocator}.
*/
ByteBufAllocatorBuilder pooledAllocator(ByteBufAllocator pooledAllocator);
/**
* Specify a custom allocator where the allocation requests should be
* forwarded to.
*
* <p>Default is to use {@link UnpooledByteBufAllocator#DEFAULT}.
*/
ByteBufAllocatorBuilder unpooledAllocator(ByteBufAllocator unpooledAllocator);
/**
* Define the memory pooling policy.
*
* <p>Default is {@link PoolingPolicy#PooledDirect}
*/
ByteBufAllocatorBuilder poolingPolicy(PoolingPolicy policy);
/**
* Controls the amount of concurrency for the memory pool.
*
* <p>Default is to have a number of allocator arenas equals to 2 * CPUS.
*
* <p>Decreasing this number will reduce the amount of memory overhead, at the
* expense of increased allocation contention.
*/
ByteBufAllocatorBuilder poolingConcurrency(int poolingConcurrency);
/**
* Define the OutOfMemory handling policy.
*
* <p>Default is {@link OutOfMemoryPolicy#FallbackToHeap}
*/
ByteBufAllocatorBuilder outOfMemoryPolicy(OutOfMemoryPolicy policy);
/**
* Add a listener that is triggered whenever there is an allocation failure.
*
* <p>Application can use this to trigger alerting or process restarting.
*/
ByteBufAllocatorBuilder outOfMemoryListener(Consumer<OutOfMemoryError> listener);
/**
* Enable the leak detection for the allocator.
*
* <p>Default is {@link LeakDetectionPolicy#Disabled}
*/
ByteBufAllocatorBuilder leakDetectionPolicy(LeakDetectionPolicy leakDetectionPolicy);
}
| 625 |
0 | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common/allocator/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* defines the utilities for allocator used across the project.
*/
package org.apache.bookkeeper.common.allocator; | 626 |
0 | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common/allocator/PoolingPolicy.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.common.allocator;
/**
* Define a policy for allocating buffers.
*/
public enum PoolingPolicy {
/**
* Allocate memory from JVM heap without any pooling.
*
* <p>This option has the least overhead in terms of memory usage since the
* memory will be automatically reclaimed by the JVM GC but might impose a
* performance penalty at high throughput.
*/
UnpooledHeap,
/**
* Use Direct memory for all buffers and pool the memory.
*
* <p>Direct memory will avoid the overhead of JVM GC and most memory copies
* when reading and writing to socket channel.
*
* <p>Pooling will add memory space overhead due to the fact that there will be
* fragmentation in the allocator and that threads will keep a portion of
* memory as thread-local to avoid contention when possible.
*/
PooledDirect
}
| 627 |
0 | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common/allocator | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common/allocator/impl/ByteBufAllocatorImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.common.allocator.impl;
import io.netty.buffer.AbstractByteBufAllocator;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.buffer.UnpooledByteBufAllocator;
import io.netty.util.ResourceLeakDetector;
import io.netty.util.ResourceLeakDetector.Level;
import java.util.function.Consumer;
import org.apache.bookkeeper.common.allocator.ByteBufAllocatorWithOomHandler;
import org.apache.bookkeeper.common.allocator.LeakDetectionPolicy;
import org.apache.bookkeeper.common.allocator.OutOfMemoryPolicy;
import org.apache.bookkeeper.common.allocator.PoolingPolicy;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implementation of {@link ByteBufAllocator}.
*/
public class ByteBufAllocatorImpl extends AbstractByteBufAllocator implements ByteBufAllocatorWithOomHandler {
private static final Logger log = LoggerFactory.getLogger(ByteBufAllocatorImpl.class);
// Same as AbstractByteBufAllocator, but copied here since it's not visible
private static final int DEFAULT_INITIAL_CAPACITY = 256;
private static final int DEFAULT_MAX_CAPACITY = Integer.MAX_VALUE;
private final ByteBufAllocator pooledAllocator;
private final ByteBufAllocator unpooledAllocator;
private final PoolingPolicy poolingPolicy;
private final OutOfMemoryPolicy outOfMemoryPolicy;
private Consumer<OutOfMemoryError> outOfMemoryListener;
ByteBufAllocatorImpl(ByteBufAllocator pooledAllocator, ByteBufAllocator unpooledAllocator,
PoolingPolicy poolingPolicy, int poolingConcurrency, OutOfMemoryPolicy outOfMemoryPolicy,
Consumer<OutOfMemoryError> outOfMemoryListener,
LeakDetectionPolicy leakDetectionPolicy) {
super(poolingPolicy == PoolingPolicy.PooledDirect /* preferDirect */);
this.poolingPolicy = poolingPolicy;
this.outOfMemoryPolicy = outOfMemoryPolicy;
if (outOfMemoryListener == null) {
this.outOfMemoryListener = (v) -> {
log.error("Unable to allocate memory", v);
};
} else {
this.outOfMemoryListener = outOfMemoryListener;
}
if (poolingPolicy == PoolingPolicy.PooledDirect) {
if (pooledAllocator == null) {
if (poolingConcurrency == PooledByteBufAllocator.defaultNumDirectArena()) {
// If all the parameters are the same as in the default Netty pool,
// just reuse the static instance as the underlying allocator.
this.pooledAllocator = PooledByteBufAllocator.DEFAULT;
} else {
this.pooledAllocator = new PooledByteBufAllocator(
true /* preferDirect */,
poolingConcurrency /* nHeapArena */,
poolingConcurrency /* nDirectArena */,
PooledByteBufAllocator.defaultPageSize(),
PooledByteBufAllocator.defaultMaxOrder(),
PooledByteBufAllocator.defaultSmallCacheSize(),
PooledByteBufAllocator.defaultNormalCacheSize(),
PooledByteBufAllocator.defaultUseCacheForAllThreads());
}
} else {
this.pooledAllocator = pooledAllocator;
}
} else {
this.pooledAllocator = null;
}
this.unpooledAllocator = (unpooledAllocator != null) ? unpooledAllocator : UnpooledByteBufAllocator.DEFAULT;
// The setting is static in Netty, so it will actually affect all
// allocators
switch (leakDetectionPolicy) {
case Disabled:
if (log.isDebugEnabled()) {
log.debug("Disable Netty allocator leak detector");
}
ResourceLeakDetector.setLevel(Level.DISABLED);
break;
case Simple:
log.info("Setting Netty allocator leak detector to Simple");
ResourceLeakDetector.setLevel(Level.SIMPLE);
break;
case Advanced:
log.info("Setting Netty allocator leak detector to Advanced");
ResourceLeakDetector.setLevel(Level.ADVANCED);
break;
case Paranoid:
log.info("Setting Netty allocator leak detector to Paranoid");
ResourceLeakDetector.setLevel(Level.PARANOID);
break;
}
}
@Override
public ByteBuf buffer() {
return buffer(DEFAULT_INITIAL_CAPACITY);
}
@Override
public ByteBuf buffer(int initialCapacity) {
return buffer(initialCapacity, DEFAULT_MAX_CAPACITY);
}
@Override
public ByteBuf buffer(int initialCapacity, int maxCapacity) {
if (poolingPolicy == PoolingPolicy.PooledDirect) {
return newDirectBuffer(initialCapacity, maxCapacity, true /* can fallback to heap if needed */);
} else {
return newHeapBuffer(initialCapacity, maxCapacity);
}
}
@Override
protected ByteBuf newHeapBuffer(int initialCapacity, int maxCapacity) {
try {
// There are few cases in which we ask explicitly for a pooled
// heap buffer.
ByteBufAllocator alloc = (poolingPolicy == PoolingPolicy.PooledDirect) ? pooledAllocator
: unpooledAllocator;
return alloc.heapBuffer(initialCapacity, maxCapacity);
} catch (OutOfMemoryError e) {
outOfMemoryListener.accept(e);
throw e;
}
}
@Override
protected ByteBuf newDirectBuffer(int initialCapacity, int maxCapacity) {
// If caller asked specifically for a direct buffer, we cannot fallback to heap
return newDirectBuffer(initialCapacity, maxCapacity, false);
}
private ByteBuf newDirectBuffer(int initialCapacity, int maxCapacity, boolean canFallbackToHeap) {
if (poolingPolicy == PoolingPolicy.PooledDirect) {
try {
return pooledAllocator.directBuffer(initialCapacity, maxCapacity);
} catch (OutOfMemoryError e) {
if (canFallbackToHeap && outOfMemoryPolicy == OutOfMemoryPolicy.FallbackToHeap) {
try {
return unpooledAllocator.heapBuffer(initialCapacity, maxCapacity);
} catch (OutOfMemoryError e2) {
outOfMemoryListener.accept(e2);
throw e2;
}
} else {
// ThrowException
outOfMemoryListener.accept(e);
throw e;
}
}
} else {
// Unpooled heap buffer. Force heap buffers because unpooled direct
// buffers have very high overhead of allocation/reclaiming
try {
return unpooledAllocator.directBuffer(initialCapacity, maxCapacity);
} catch (OutOfMemoryError e) {
outOfMemoryListener.accept(e);
throw e;
}
}
}
@Override
public boolean isDirectBufferPooled() {
return pooledAllocator != null && pooledAllocator.isDirectBufferPooled();
}
@Override
public void setOomHandler(Consumer<OutOfMemoryError> handler) {
this.outOfMemoryListener = handler;
}
}
| 628 |
0 | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common/allocator | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common/allocator/impl/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Implements the utilities for allocator used across the project.
*/
package org.apache.bookkeeper.common.allocator.impl; | 629 |
0 | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common/allocator | Create_ds/bookkeeper/bookkeeper-common-allocator/src/main/java/org/apache/bookkeeper/common/allocator/impl/ByteBufAllocatorBuilderImpl.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.common.allocator.impl;
import io.netty.buffer.ByteBufAllocator;
import java.util.function.Consumer;
import org.apache.bookkeeper.common.allocator.ByteBufAllocatorBuilder;
import org.apache.bookkeeper.common.allocator.ByteBufAllocatorWithOomHandler;
import org.apache.bookkeeper.common.allocator.LeakDetectionPolicy;
import org.apache.bookkeeper.common.allocator.OutOfMemoryPolicy;
import org.apache.bookkeeper.common.allocator.PoolingPolicy;
/**
* Implementation of {@link ByteBufAllocatorBuilder}.
*/
public class ByteBufAllocatorBuilderImpl implements ByteBufAllocatorBuilder {
ByteBufAllocator pooledAllocator = null;
ByteBufAllocator unpooledAllocator = null;
PoolingPolicy poolingPolicy = PoolingPolicy.PooledDirect;
int poolingConcurrency = 2 * Runtime.getRuntime().availableProcessors();
OutOfMemoryPolicy outOfMemoryPolicy = OutOfMemoryPolicy.FallbackToHeap;
Consumer<OutOfMemoryError> outOfMemoryListener = null;
LeakDetectionPolicy leakDetectionPolicy = LeakDetectionPolicy.Disabled;
@Override
public ByteBufAllocatorWithOomHandler build() {
return new ByteBufAllocatorImpl(pooledAllocator, unpooledAllocator, poolingPolicy, poolingConcurrency,
outOfMemoryPolicy, outOfMemoryListener, leakDetectionPolicy);
}
@Override
public ByteBufAllocatorBuilder pooledAllocator(ByteBufAllocator pooledAllocator) {
this.pooledAllocator = pooledAllocator;
return this;
}
@Override
public ByteBufAllocatorBuilder unpooledAllocator(ByteBufAllocator unpooledAllocator) {
this.unpooledAllocator = unpooledAllocator;
return this;
}
@Override
public ByteBufAllocatorBuilder poolingPolicy(PoolingPolicy policy) {
this.poolingPolicy = policy;
return this;
}
@Override
public ByteBufAllocatorBuilder poolingConcurrency(int poolingConcurrency) {
this.poolingConcurrency = poolingConcurrency;
return this;
}
@Override
public ByteBufAllocatorBuilder outOfMemoryPolicy(OutOfMemoryPolicy policy) {
this.outOfMemoryPolicy = policy;
return this;
}
@Override
public ByteBufAllocatorBuilder outOfMemoryListener(Consumer<OutOfMemoryError> listener) {
this.outOfMemoryListener = listener;
return this;
}
@Override
public ByteBufAllocatorBuilder leakDetectionPolicy(LeakDetectionPolicy leakDetectionPolicy) {
this.leakDetectionPolicy = leakDetectionPolicy;
return this;
}
}
| 630 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/test/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/test/java/org/apache/bookkeeper/stats/codahale/FastTimerTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.codahale;
import static org.junit.Assert.assertEquals;
import com.codahale.metrics.Snapshot;
import java.util.ArrayList;
import java.util.Random;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import org.junit.Test;
/**
* Unit tests for FastTimer.
*
*/
public class FastTimerTest {
/*
* To simplify testing, we're over-riding the time source used by FastTimer with some
* fake time we're incrementing manually. This speeds-up testing (we don't have to wait
* for real seconds to elapse) and also guarantees deterministic behavior for the unit
* test.
*/
private static AtomicInteger mockedTime = new AtomicInteger(0);
private int incSec() {
return mockedTime.incrementAndGet();
}
private FastTimer getMockedFastTimer(int timeWindowSeconds, FastTimer.Buckets buckets) {
return new FastTimer(timeWindowSeconds, buckets) {
@Override
protected int getTime() {
return mockedTime.get();
}
};
}
@Test
public void testBuckets() {
FastTimer t = new FastTimer(1, FastTimer.Buckets.fine);
for (int b = 0; b < t.getNumberOfBuckets(); b++) {
long lowerBound = b > 0 ? t.getBucketBound(b - 1) + 1 : 0;
long bucketMean = t.getBucketValue(b);
long upperBound = t.getBucketBound(b);
System.out.println(String.format("Bucket %3d [%12d - %12d], avg=%12d",
b, lowerBound, upperBound, bucketMean));
assertEquals(String.format("bucket for lowerBound value %d", lowerBound),
b, t.getBucket(lowerBound));
assertEquals(String.format("bucket for bucketMean value %d", bucketMean),
b, t.getBucket(bucketMean));
assertEquals(String.format("bucket for upperBound value %d", upperBound),
b, t.getBucket(upperBound));
if (b > 0) {
assertEquals(String.format("bucket before bucket %d", b), b - 1, t.getBucket(lowerBound - 1));
}
if (b + 1 < t.getNumberOfBuckets()) {
assertEquals(String.format("bucket after bucket %d", b), b + 1, t.getBucket(upperBound + 1));
}
}
}
@Test
public void testFunctional() {
FastTimer t = getMockedFastTimer(1, FastTimer.Buckets.fine);
for (int i = 0; i <= 10000; i++) {
t.update(i, TimeUnit.MICROSECONDS);
}
incSec(); // advance mocked time to next second
Snapshot s = t.getSnapshot();
assertEquals("FastTimer.getCount()", 10001, t.getCount());
assertEquals("FastSnapshot.getMin()", 1, s.getMin());
assertEquals("FastSnapshot.getMax()", TimeUnit.MICROSECONDS.toNanos(10000), s.getMax());
assertEquals("FastSnapshot.getMean()", TimeUnit.MICROSECONDS.toNanos(5000), (long) s.getMean());
assertEquals("FastSnapshot.getMedian()", TimeUnit.MICROSECONDS.toNanos(5000), (long) s.getMedian());
assertEquals("FastSnapshot.getValue(0.1)", TimeUnit.MICROSECONDS.toNanos(1000), (long) s.getValue(0.1));
assertEquals("FastSnapshot.getValue(0.9)", TimeUnit.MICROSECONDS.toNanos(9000), (long) s.getValue(0.9));
assertEquals("FastSnapshot.getValue(0.99)", TimeUnit.MICROSECONDS.toNanos(9900), (long) s.getValue(0.99));
}
@Test
public void testTimer() {
// load definitions for testing the timer
// following 3 array lengths must match: each element defines values for one phase
final int[] timeRange = new int[] { 90, 190, 50, 90, 100, 100 };
final int[] timeBase = new int[] { 10, 10, 50, 10, 0, 0 };
final int[] rate = new int[] { 1000, 1000, 1000, 1000, 0, 1 };
final int window = 5; // use a 5 second window for testing
FastTimer t = getMockedFastTimer(window, FastTimer.Buckets.fine);
Random r = new Random(12345); // fixed random seed for deterministic value distribution
int phase = 0;
int sec = 0;
long count = 0;
// start generating test load for each of the configured phases
while (phase < timeRange.length) {
for (int i = 0; i < rate[phase]; i++) {
t.update(r.nextInt(timeRange[phase]) + timeBase[phase], TimeUnit.MILLISECONDS);
count++;
}
incSec(); // advance mocked time to next second
if (++sec % window == 0) {
// every WINDOW seconds, check the timer values
Snapshot s = t.getSnapshot();
System.out.println(String.format(
"phase %3d: count=%10d, rate=%6.0f, min=%6.1f, avg=%6.1f, q99=%6.1f, max=%6.1f",
phase, t.getCount(), t.getMeanRate(), ((double) s.getMin()) / 1000000.0,
s.getMean() / 1000000.0, s.getValue(0.99) / 1000000.0, ((double) s.getMax()) / 1000000.0));
// check count (events the timer has ever seen)
assertEquals("FastTimer.getCount()", count, t.getCount());
// check rate (should be precisely the configured rate)
assertEquals("FastTimer.getMeanRate()", rate[phase],
(int) Math.round(t.getMeanRate()));
assertEquals("FastTimer.getOneMinuteRate()", rate[phase],
(int) Math.round(t.getOneMinuteRate()));
assertEquals("FastTimer.getFiveMinuteRate()", rate[phase],
(int) Math.round(t.getFiveMinuteRate()));
assertEquals("FastTimer.getFifteenMinuteRate()", rate[phase],
(int) Math.round(t.getFifteenMinuteRate()));
// at rates > 1000 (with fixed seed), we know that the following checks will be successful
if (t.getMeanRate() >= 1000) {
// check minimum value == lower bound
assertEquals("FastSnapshot.getMin()", timeBase[phase], s.getMin() / 1000000);
// check maximum value == upper bound
assertEquals("FastSnapshot.getMax()", timeBase[phase] + timeRange[phase] - 1,
(s.getMax() / 1000000));
// check 99th percentile == upper bound
assertEquals("FastSnapshot.getValue(0.99)",
t.getBucketBound(t.getBucket(
TimeUnit.MILLISECONDS.toNanos(timeBase[phase] + timeRange[phase] - 1))),
(long) s.getValue(0.99));
// check mean is within 10% of configured mean
assertEquals("FastSnapshot.getMean()", (timeBase[phase] + (timeRange[phase] / 2)) / 10,
(int) (Math.round(s.getMean() / 1000000) / 10));
}
// start next phase
phase++;
}
}
}
@Test
public void testTimerMultiThreaded() {
final int window = 5; // use a 5 second window for testing
FastTimer t = getMockedFastTimer(window, FastTimer.Buckets.fine);
// start 10 threads, which each update the timer 1000 times
ArrayList<Thread> threads = new ArrayList<Thread>();
for (int i = 0; i < 10; i++) {
Thread thread = new Thread(() -> {
for (int j = 0; j < 1000; j++) {
t.update(10, TimeUnit.MILLISECONDS);
}
});
threads.add(thread);
thread.start();
}
// wait for 10 threads to finish
for (Thread thread : threads) {
try {
thread.join();
} catch (InterruptedException e) {
// ignore
}
}
incSec(); // advance mocked time to next second
assertEquals("FastTimer.getCount()", 10000, t.getCount());
assertEquals("FastTimer.getMeanRate()", 2000, (int) Math.round(t.getMeanRate()));
Snapshot s = t.getSnapshot();
assertEquals("FastSnapshot.getMin()", 10, s.getMin() / 1000000);
assertEquals("FastSnapshot.getMax()", 10, (s.getMax() / 1000000));
assertEquals("FastSnapshot.getValue(0.99)", 10, Math.round(s.getValue(0.99) / 1000000));
assertEquals("FastSnapshot.getMean()", 10, (int) Math.round(s.getMean() / 1000000));
}
@Test
public void testTimerNoBuckets() {
final int window = 5; // use a 5 second window for testing
FastTimer t = getMockedFastTimer(window, FastTimer.Buckets.none);
for (int i = 0; i < 1000; i++) {
t.update(10, TimeUnit.MILLISECONDS);
}
incSec(); // advance mocked time to next second
assertEquals("FastTimer.getCount()", 1000, t.getCount());
assertEquals("FastTimer.getMeanRate()", 200, (int) Math.round(t.getMeanRate()));
Snapshot s = t.getSnapshot();
assertEquals("FastSnapshot.getMin()", 10, s.getMin() / 1000000);
assertEquals("FastSnapshot.getMax()", 10, (s.getMax() / 1000000));
assertEquals("FastSnapshot.getValue(0.99)", 0, Math.round(s.getValue(0.99) / 1000000));
assertEquals("FastSnapshot.getMean()", 10, (int) Math.round(s.getMean() / 1000000));
}
@Test
public void testSnapshotOutOfSync() {
FastTimer t = getMockedFastTimer(1, FastTimer.Buckets.fine);
t.update(t.getBucketBound(0) - 1, TimeUnit.NANOSECONDS); // add value to 1st bucket
t.update(t.getBucketBound(1) - 1, TimeUnit.NANOSECONDS); // add value to 2nd bucket
t.update(t.getBucketBound(2) - 1, TimeUnit.NANOSECONDS); // add value to 3rd bucket
incSec(); // advance mocked time to next second
Snapshot s1 = t.getSnapshot();
long[] buckets = new long[t.getNumberOfBuckets()];
buckets[0] = 1;
buckets[1] = 1;
buckets[2] = 1;
Snapshot s2 = new FastSnapshot(t,
t.getBucketBound(0) - 1,
t.getBucketBound(2) - 1,
t.getBucketBound(0) + t.getBucketBound(1) + t.getBucketBound(2) + 3,
4, // count (4) is out of sync with number of recorded events in buckets (3)
buckets);
assertEquals("FastSnapshot.getMin()", s1.getMin(), s2.getMin());
assertEquals("FastSnapshot.getMax()", s1.getMax(), s2.getMax());
assertEquals("FastSnapshot.getValue(0.95)", (long) s1.getValue(0.95), (long) s2.getValue(0.95));
}
}
| 631 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/test/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/test/java/org/apache/bookkeeper/stats/codahale/CodahaleOpStatsTest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.codahale;
import static org.junit.Assert.assertEquals;
import org.apache.bookkeeper.stats.OpStatsData;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.junit.Test;
/**
* Unit test of {@link CodahaleOpStatsLogger}.
*/
public class CodahaleOpStatsTest {
@Test
public void testToOpStatsData() {
OpStatsLogger logger = new CodahaleMetricsProvider().getStatsLogger("test").getOpStatsLogger("testLogger");
logger.registerSuccessfulValue(1);
// the following should not throw any exception
OpStatsData statsData = logger.toOpStatsData();
assertEquals(1, statsData.getNumSuccessfulEvents());
}
@Test
public void testToFastOpStatsData() {
OpStatsLogger logger = new FastCodahaleMetricsProvider().getStatsLogger("test").getOpStatsLogger("testLogger");
logger.registerSuccessfulValue(1);
// the following should not throw any exception
OpStatsData statsData = logger.toOpStatsData();
assertEquals(1, statsData.getNumSuccessfulEvents());
}
}
| 632 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastSnapshot.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.codahale;
import com.codahale.metrics.Snapshot;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.io.OutputStream;
/**
* A snapshot of a FastTimer.
*/
public class FastSnapshot extends Snapshot {
private static final long[] EMPTY_VALUES = new long[] {};
private final FastTimer timer;
private final long min;
private final long max;
private final long sum;
private final long cnt;
private final long pcnt;
private final long[] values;
@SuppressFBWarnings(
value = "EI_EXPOSE_REP2",
justification = "long[] values is newly created array; FastTimer does not hold on to reference")
public FastSnapshot(FastTimer timer, long min, long max, long sum, long cnt, long[] values) {
this.timer = timer;
this.min = min;
this.max = max;
this.sum = sum;
this.cnt = cnt;
this.pcnt = values != null ? sumOf(values) : 0;
this.values = values;
}
@Override
public double getValue(double quantile) {
if (pcnt == 0 || values == null) {
return 0;
}
long qcnt = 0;
for (int i = 0; i < values.length; i++) {
qcnt += values[i];
if (((double) qcnt) / ((double) pcnt) > quantile) {
return timer.getBucketBound(i);
}
}
return timer.getBucketBound(values.length);
}
@Override
public long[] getValues() {
return EMPTY_VALUES; // values in this snapshot represent percentile buckets, but not discrete values
}
@Override
public int size() {
return 0; // values in this snapshot represent percentile buckets, but not discrete values
}
@Override
public long getMax() {
return max;
}
@Override
public double getMean() {
return cnt > 0 ? ((double) sum) / ((double) cnt) : 0;
}
@Override
public long getMin() {
return min;
}
@Override
public double getStdDev() {
if (cnt < 2 || values == null) {
return 0;
}
double avg = getMean();
double var = 0;
for (int i = 0; i < values.length; i++) {
double val = timer.getBucketValue(i);
var += ((double) values[i]) * Math.pow(val - avg, 2);
}
return Math.sqrt(var / ((double) cnt));
}
@Override
public void dump(OutputStream output) {
// values in this snapshot represent percentile buckets, but not discrete values
}
/**
* Calculates the sum of values of an array.
* @param a an array of values
* @return the sum of all array values
*/
private long sumOf(long[] a) {
long sum = 0;
for (long x : a) {
sum += x;
}
return sum;
}
} | 633 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastTimer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.codahale;
import com.codahale.metrics.Reservoir;
import com.codahale.metrics.Snapshot;
import com.codahale.metrics.Timer;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
/**
* A fast and (nearly) garbage-free Rate and Response Times Timer.
* FastTimer uses circular arrays which are allocated upfront.
* Timer updates or queries never allocate new objects and thus never
* create garbage.
* A small number of new objects are allocated for snapshots when
* calling getSnapshot().
*/
public class FastTimer extends Timer {
/*
* Design Considerations
* ---------------------
*
* The design goals of this timer implementation are for it to be
* - fast (i.e. few instructions to update a timer)
* - scalable (i.e. little synchronization cost for concurrent timer updates)
* - garbage-free for timer updates (i.e. no object allocation for timer updates)
* - space-efficient (i.e. as little memory footprint as possible while achieving first three goals)
* - provide similar functionality as Codahale's default timers with ExponentiallyDecayingReservoirs
*
* This implementation provides rate and response times over a configurable sliding time window. Data
* is stored in upfront allocated circular arrays, in which each array element holds data
* for one second. Data is overwritten in a circular fashion without the allocation of new data
* structures and is therefore garbage-free for all timer updates.
*
* This implementation does not store individual response times, but instead allocates bucketized counters
* upfront, which are incremented for any event falling into a particular response time bucket. A
* fine-grained bucket definition (intended for capturing successsful events) and a coarse-grained
* bucket definition (intended to capture failure or timed-out events) are provided.
*
* To improve scalability of concurrent timer updates, most data structures are replicated HASH_SIZE
* times, and calling threads updating a timer are hashed to individual instances. Performance tests
* (see below) have shown that this implementation is light-weight enough to achieve slightly better
* scalability than Codahale's default timers even without hashing, and can further improve scalability
* if hashing is used.
*
* Trading off performance and scalability vs. memory footprint, we need to be conservative in the hash
* size we chose. Different flavors of this timer implementation have been evaluated using JMH
* micro-benchmarks (see microbenchmarks/src/main/java/org/apache/bookkeeper/stats/TimerBenchmark.java),
* comparing implementations of FastTimer with a time window of 60 seconds and
* - (DEV1) a HASH_SIZE of 3 for all data structures (meters, counters, min/max, and response time buckets)
* - (DEV2) a HASH_SIZE of 1 for all data structures (meters, counters, min/max, and response time buckets)
* - (FINAL) a HASH_SIZE of 3 for meters, counters, min/max, and no hashing for response time buckets
* to the default timer implementation
* - (BASE-E) Codahale Timer with ExponentiallyDecayingReservoir (default as used by bookkeeper code)
* - (BASE-T) Codahale Timer with SlidingTimeWindowReservoir configured to 60 seconds
* - (BASE-S) Codahale Timer with SlidingWindowReservoir configured to hold 100,000 events.
*
* Based on results below, implementation (FINAL) was chosen as the final FastTimer implementation, as it
* achieves nearly the same throughput as (DEV1) at nearly the same memory footprint as (DEV2), and
* ultimately achieves roughly 3x higher throughput and scalability that Codahale's default implementation
* at around half the memory footprint.
*
* The following results have been collected on an eight core x86 server running at 3.2 GHz (updated
* timers are shared across 4 threads):
*
* Config Timer Impl Timers Threads ops/ms Alloc B/op Kb/TimerPair
* ----------------------------------------------------------------------------------------
* DEV1 FastTimer (Hash 3) 1 4 11487.904 0 253
* DEV1 FastTimer (Hash 3) 10 4 22621.702 0 253
* DEV1 FastTimer (Hash 3) 100 4 21781.319 0 253
* DEV2 FastTimer (Hash 1) 1 4 5138.143 0 88
* DEV2 FastTimer (Hash 1) 10 4 22902.195 0 88
* DEV2 FastTimer (Hash 1) 100 4 19173.085 0 88
* FINAL FastTimer (Hash 3/1) 1 4 9291.002 0 99
* FINAL FastTimer (Hash 3/1) 10 4 16379.940 0 99
* FINAL FastTimer (Hash 3/1) 100 4 16751.020 0 99
* BASE-E CodahaleTimer 1 4 3845.187 82.609 189
* BASE-E CodahaleTimer 10 4 7262.445 35.035 189
* BASE-E CodahaleTimer 100 4 7051.77 32.843 189
* BASE-T CodahaleTimer/TimeWindow 1 4 102.479 90.851 174
* BASE-T CodahaleTimer/TimeWindow 10 4 68.852 84.812 174
* BASE-T CodahaleTimer/TimeWindow 100 4 153.444 136.436 174
* BASE-S CodahaleTimer/SlidingWdw 1 4 4670.543 0 2103 (size=100000)
* BASE-S CodahaleTimer/SlidingWdw 10 4 13696.168 0 2103
* BASE-S CodahaleTimer/SlidingWdw 100 4 12541.936 0 2103
*
* - ops/ms is the number of timer updates per millisecond.
* - Alloc B/op is the number of bytes allocated per timer update
* - Kb/TimerPair is the heap footprint per pair of timers (one with fine-grained, one with coarse-grained buckets)
*
* The following test results include snapshot creation every 109 timer updates (typically, we would assume
* snapshot creation to be much less frequent), and show that also with snapshots in the mix, FastTimer outperforms
* Codahale default Timers both with respect to throughput and scalability as well as object allocation:
*
* Config Timer Impl Timers Threads ops/ms Alloc B/op
* -------------------------------------------------------------------------
* FINAL FastTimer (Hash 3/1) 1 4 1569.953 23.707
* FINAL FastTimer (Hash 3/1) 10 4 7316.794 24.073
* FINAL FastTimer (Hash 3/1) 100 4 6498.215 24.073
* BASE-E CodahaleTimer 1 4 246.953 481.771
* BASE-E CodahaleTimer 10 4 1989.134 476.807
* BASE-E CodahaleTimer 100 4 1514.729 468.624
* BASE-T CodahaleTimer/TimeWindow 1 4 6.063 43795.810
* BASE-T CodahaleTimer/TimeWindow 10 4 44.651 33916.315
* BASE-T CodahaleTimer/TimeWindow 100 4 180.431 12330.939
* BASE-S CodahaleTimer/SlidingWdw 1 4 17.439 14683.756
* BASE-S CodahaleTimer/SlidingWdw 10 4 107.257 14683.745
* BASE-S CodahaleTimer/SlidingWdw 100 4 236.538 9767.106
*
* Unfortunately Codahale does not have a Timer interface we can implement, and some Codahale
* base classes are assuming instances of Timer (for example, our JettyServices instantiate a
* Codahale MetricsServlet, which instantiates a Codahale MetricsModule, which only serializes
* timers that are instances of Timer class into the json output stream). Unless we wanted to
* reimplement or override all these base classes, we can't just implement Codahale's Metered and Sampling
* interfaces. Instead we have to extend its Timer class, even though we're not using any of its
* inherited functionality or data structures. The inherited (unused) member variables of Codahale Timer
* consume slightly less than 512 byte per FastTimer (measured around 425 byte in Codahale 3.1).
* Above memory footprint results include ~ 1 kb of inherited (unused) data structures, which comprise
* around 1% of FastTimer's overall memory footprint.
*
* In terms of functionality, FastTimer provides the same functionality as Codahale's timers
* (in default configuration with ExponentiallyDecayingReservoirs), with the following exceptions:
* - Statistics are kept for a fixed amount of time (rather than exponentially decayed), by
* default 60 seconds. As a consequence, getMeanRate(), getOneMinuteRate(), getFiveMinuteRate()
* and getFifteenMinuteRate() all return the same value if FastTimer is configured to use a
* 60 second time window.
* - FastTimer and FastSnapshot only record bucketized instead of discrete response times. As a
* consequence, the accuracy of percentiles depends on bucket granularity. FastSnapshot also
* can't return discrete values: getValues() returns an empty array, and size returns 0.
*/
/**
* For improved scalability, threads are hased to meters, counters, and min/max values based on
* HASH_SIZE. Note that response time buckets are *not* hashed to reduce memory footprint, and we
* assume that concurrent updates of the same response time bucket are infrequent.
* The hash size could be made configurable in the future (if ever seems necessary). For now, we just
* hard-code it to 3 based on above performance results.
*/
private static final int HASH_SIZE = 3;
/**
* This timer stores rate and response times on a per-second basis for a configurable amount of time
* (default: 60 seconds).
* Note that larger time windows increase the memory footprint of this timer (nearly linear).
*/
private static final int TIME_WINDOW = 60;
/*
* Buckets for percentiles store response times according to the definition in BUCKET_SPEC in the
* form of { numerOfBuckets , nanosecondResolutionPerBucket }.
*
* BUCKET_SPEC_FINE:
* This bucket definition provides fine-grained timing for small values, and more coarse-grained timing
* for larger values. We expect this timer to be used primarily for I/O operations that typically
* range in milliseconds (or sub-milliseconds), with sporadic outliers in the single-digit second
* range. For values larger than 10 seconds, we only keep the maximum value, but no distribution.
*
* BUCKET_SPEC_COARSE:
* This bucket specification provides coarse-grained timing for events in the range of 1 - 20 seconds
* with 1 second granularity.
*
* If this timer is used for timing of events with significantly different value distribution,
* other bucket definitions may be specified.
*
* Note that a larger number of buckets increases the memory footprint of this timer nearly linear
* (as the number of buckets largely dominate the timer's overall memory footprint).
*/
private static final long[][] BUCKET_SPEC_FINE = new long[][] {
{ 100 , 100000}, // 100 buckets of 0.1 ms ( 0.1 - 10.0 ms)
{ 90 , 1000000}, // 90 buckets of 1 ms ( 10 - 100 ms)
{ 90 , 10000000}, // 90 buckets of 10 ms ( 100 - 1,000 ms)
{ 9 , 1000000000}, // 9 buckets of 1000 ms (1,000 - 10,000 ms)
}; // + 1 (default) bucket for all values > 10,000 ms
private static final long[][] BUCKET_SPEC_COARSE = new long[][] {
{ 20 , 1000000000}, // 20 buckets of 1000 ms (1,000 - 20,000 ms)
}; // + 1 (default) bucket for all values > 20,000 ms
/**
* Defines the response time buckets to use.
* - fine: BUCKET_SPEC_FINE
* - coarse: BUCKET_SPEC_COARSE
* - none: no response time buckets
*/
public enum Buckets {
fine,
coarse,
none
}
// index into the second dimension of BUCKET_SPEC arrays
private static final int BS_NUMBUCKETS = 0;
private static final int BS_RESOLUTION = 1;
/*
* approximate space requirements for an instance of FastTimer:
* 4096 + (TIME_WINDOW + 2) * ((HASH_SIZE * 28) + (NUMBUCKETS * 4))
*
* For timeWindow=60 and Buckets.fine: ~ 81 kb
* For timeWindow=60 and Buckets.coarse: ~ 14 kb
*/
private final long[][] bucketSpec;
private final int numBuckets;
private final long[] bucketBounds;
private final int timeWindow;
private final int startTime;
private final AtomicLong[] counter; // indexed by [hash]
private final Object[] locks; // indexed by [hash]
private final int[] lastTime;
private int lastTimeBucket = 0;
private final int[][] meter; // indexed by [hash][time]
private final int[][] buckets; // indexed by [bucket][time]
private final long[][] min; // indexed by [hash][time]
private final long[][] max; // indexed by [hash][time]
private final long[][] sum; // indexed by [hash][time]
/**
* A Dummy reservoir implementation.
* Since we have to extend Codahale's Timer class (see above), we inherit all its member
* objects as well. By default, Timer instantiates a ExponentiallyDecayingReservoir. Since
* we're not making use of it, we instead instantiate our own DummyReservoir to reduce
* memory footprint.
*/
private static class DummyReservoir implements Reservoir {
@Override
public int size() {
return 0;
}
@Override
public void update(long value) {
}
@Override
public Snapshot getSnapshot() {
return null;
}
}
/**
* Constructs a new timer with default time window (60 seconds) and
* default time buckets (fine).
*/
public FastTimer() {
this(TIME_WINDOW, Buckets.fine);
}
/**
* Constructs a new timer.
* @param timeWindowSeconds the time window (in seconds) for this timer
* @param buckets the type of buckets to use for response times
*/
public FastTimer(int timeWindowSeconds, Buckets buckets) {
super(new DummyReservoir());
this.timeWindow = timeWindowSeconds + 2; // 2 extra seconds for housekeeping
switch (buckets) {
case fine:
bucketSpec = BUCKET_SPEC_FINE;
break;
case coarse:
bucketSpec = BUCKET_SPEC_COARSE;
break;
default:
bucketSpec = null;
}
// initialize buckets
int bucketCnt = 0;
for (int i = 0; bucketSpec != null && i < bucketSpec.length; i++) {
bucketCnt += bucketSpec[i][BS_NUMBUCKETS];
}
numBuckets = (bucketCnt > 0 ? bucketCnt + 1 : 0);
if (numBuckets > 0) {
bucketBounds = new long[bucketSpec.length];
long bound = 0;
for (int i = 0; i < bucketSpec.length; i++) {
bound += bucketSpec[i][BS_NUMBUCKETS] * bucketSpec[i][BS_RESOLUTION];
bucketBounds[i] = bound;
}
} else {
bucketBounds = null;
}
this.startTime = getTime();
counter = new AtomicLong[HASH_SIZE];
for (int i = 0; i < counter.length; i++) {
counter[i] = new AtomicLong(0);
}
meter = new int[HASH_SIZE][timeWindow];
if (numBuckets > 0) {
this.buckets = new int[numBuckets][timeWindow];
} else {
this.buckets = null;
}
sum = new long[HASH_SIZE][timeWindow];
min = new long[HASH_SIZE][timeWindow];
max = new long[HASH_SIZE][timeWindow];
lastTime = new int[HASH_SIZE];
locks = new Object[HASH_SIZE];
for (int h = 0; h < locks.length; h++) {
locks[h] = new Object();
}
}
/**
* Returns the number of response time buckets used by this timer.
* @return the number of response time buckets
*/
public int getNumberOfBuckets() {
return numBuckets;
}
/**
* Figure out which percentile bucket an event of a given duration belongs into.
* @param duration the duration (in nanoseconds)
* @return the bucket
*/
public int getBucket(long duration) {
if (numBuckets == 0) {
return -1;
}
int bucket = 0;
long lowbound = 0;
for (int i = 0; i < bucketSpec.length; i++) {
if (duration <= bucketBounds[i]) {
return bucket + (int) ((duration - lowbound - 1) / bucketSpec[i][BS_RESOLUTION]);
} else {
bucket += bucketSpec[i][BS_NUMBUCKETS];
lowbound = bucketBounds[i];
}
}
return numBuckets - 1;
}
/**
* Returns the upper bucket bound (inclusive) of a given bucket.
* @param b the bucket
* @return the bound (in nanoseconds)
*/
public long getBucketBound(int b) {
if (numBuckets == 0) {
return -1;
}
int bucket = 0;
long lowbound = 0;
for (int i = 0; i < bucketSpec.length; i++) {
if (b < bucket + bucketSpec[i][BS_NUMBUCKETS]) {
return lowbound + ((long) ((b + 1) - bucket)) * bucketSpec[i][BS_RESOLUTION];
} else {
bucket += bucketSpec[i][BS_NUMBUCKETS];
lowbound = bucketBounds[i];
}
}
return Long.MAX_VALUE;
}
/**
* Returns the average value of a given bucket (the mean between its lower and upper bound).
* @param b the bucket
* @return the average value (in nanoseconds)
*/
public long getBucketValue(int b) {
if (numBuckets == 0) {
return -1;
}
if (b == 0) {
return getBucketBound(0) / 2;
}
if (b == numBuckets - 1) {
return 2 * getBucketBound(numBuckets - 2);
}
return (getBucketBound(b - 1) + getBucketBound(b)) / 2;
}
/**
* Hashes a thread to a hash index.
* @return the hash index
*/
private int getHash() {
// hashing threads to timers is cheaper than ThreadLocal timers
return (int) (Thread.currentThread().getId() % HASH_SIZE);
}
/**
* Returns the current absolute time (in seconds).
* @return the current absolute time (in seconds)
*/
protected int getTime() {
return (int) TimeUnit.NANOSECONDS.toSeconds(System.nanoTime());
}
/**
* Returns the current second (relative to start time) and, if necessary, performs house-keeping.
* @param hash the hash of the calling thread
* @return the current time since start (in seconds)
*/
private int getNow(int hash) {
int now = getTime() - startTime;
// check whether we need to do housekeeping
if (now > lastTime[hash]) {
synchronized (locks[hash]) {
// now that we have the lock, check again
if (now > lastTime[hash]) {
int tstop = (now + 2) % timeWindow;
// clear meter for next time period
for (int t = (lastTime[hash] + 2) % timeWindow; t != tstop; t = (t + 1) % timeWindow) {
meter[hash][t] = 0;
}
// clear histo for next time period
for (int t = (lastTime[hash] + 2) % timeWindow; t != tstop; t = (t + 1) % timeWindow) {
sum[hash][t] = 0;
min[hash][t] = 0;
max[hash][t] = 0;
}
lastTime[hash] = now;
}
}
}
// check whether we need to do bucket housekeeping
// (we have to do this separately since buckets aren't hashed)
if (numBuckets > 0 && now > lastTimeBucket) {
synchronized (buckets) {
// now that we have the lock, check again
if (now > lastTimeBucket) {
int tstop = (now + 2) % timeWindow;
for (int b = 0; b < numBuckets; b++) {
synchronized (buckets[b]) {
for (int t = (lastTimeBucket + 2) % timeWindow; t != tstop; t = (t + 1) % timeWindow) {
buckets[b][t] = 0;
}
}
}
lastTimeBucket = now;
}
}
}
return now % timeWindow;
}
/**
* Returns the average per-second rate of events this timer has seen.
* The computed rate is calculated for past seconds (not including the current second, which is still being
* updated). If the specified time exceeds the time window of this timer, the only rate of the configured time
* window is reported.
* @param seconds the number of seconds over which to calculate the average rate
* @return the average rate (per second).
*/
public double getRate(int seconds) {
seconds = Math.min(seconds, timeWindow - 2);
int t = getNow(getHash()) - 1; // start from last completed second
int secFrom = t - seconds;
long sum = 0;
for (int h = 0; h < HASH_SIZE; h++) {
for (int i = t; i > secFrom; i--) {
// no need to synchronize for reading (meter (int) is written atomically)
sum += meter[h][(timeWindow + i) % timeWindow];
}
}
return ((double) sum) / (double) seconds;
}
/**
* Returns the all-time count of events this timer has seen.
* @return the all-time count of events
*/
@Override
public long getCount() {
long sum = 0;
for (AtomicLong c : counter) {
sum += c.get();
}
return sum;
}
@Override
public double getFifteenMinuteRate() {
return getRate(15 * 60);
}
@Override
public double getFiveMinuteRate() {
return getRate(5 * 60);
}
@Override
public double getMeanRate() {
return getRate(Integer.MAX_VALUE);
}
@Override
public double getOneMinuteRate() {
return getRate(60);
}
/**
* Returns a snapshot of this timer.
* The computed snapshot is calculated over the complete time interval supported by
* this timer.
* @return a snapshot of this timer
*/
@Override
public Snapshot getSnapshot() {
long sum = 0;
long cnt = 0;
long min = 0;
long max = 0;
// get time and trigger housekeeping
int now = getNow(0) - 1; // start from last completed second
int secFrom = now - (timeWindow - 2);
for (int i = 1; i < HASH_SIZE; i++) {
getNow(i);
}
long[] buckets = (numBuckets > 0 ? new long[numBuckets] : null);
for (int i = now; i > secFrom; i--) {
int t = (timeWindow + i) % timeWindow;
for (int h = 0; h < HASH_SIZE; h++) {
synchronized (locks[h]) {
sum += this.sum[h][t];
cnt += this.meter[h][t];
if ((this.min[h][t] < min && this.min[h][t] > 0) || min == 0) {
min = this.min[h][t];
}
if (this.max[h][t] > max) {
max = this.max[h][t];
}
}
}
// no need to synchronize for reading (buckets (int) is written atomically)
for (int b = 0; b < numBuckets; b++) {
buckets[b] += this.buckets[b][t];
}
}
return new FastSnapshot(this, min, max, sum, cnt, buckets);
}
/**
* Add an event to this timer.
* @param duration the time duration of the event
* @param unit the unit of time duration
*/
@Override
public void update(long duration, TimeUnit unit) {
update(unit.toNanos(duration));
}
/**
* Add an event to this timer.
* @param duration the time duration of the event (in nanoseconds)
*/
private void update(long duration) {
if (duration < 1) {
// we can't time anything that took less than 1 ns (caller gave us wrong value)
duration = 1;
}
int h = getHash();
int t = getNow(h);
counter[h].incrementAndGet();
int b = getBucket(duration);
synchronized (locks[h]) {
meter[h][t]++;
sum[h][t] += duration;
if (duration < min[h][t] || min[h][t] == 0) {
min[h][t] = duration;
}
if (duration > max[h][t]) {
max[h][t] = duration;
}
}
if (numBuckets > 0) {
synchronized (buckets[b]) {
buckets[b][t]++;
}
}
}
}
| 634 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastCodahaleStatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.codahale;
import static com.codahale.metrics.MetricRegistry.name;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* A {@link StatsLogger} implemented based on <i>Codahale</i> metrics library.
*/
public class FastCodahaleStatsLogger extends CodahaleStatsLogger {
private static final ConcurrentHashMap<String, CodahaleOpStatsLogger> statsLoggerCache =
new ConcurrentHashMap<String, CodahaleOpStatsLogger>();
FastCodahaleStatsLogger(MetricRegistry metrics, String basename) {
super(metrics, basename);
}
@Override
@SuppressFBWarnings(
value = {
"JLM_JSR166_UTILCONCURRENT_MONITORENTER",
"AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION"
},
justification = "We use synchronized (statsLoggerCache) to make get/put atomic")
public OpStatsLogger getOpStatsLogger(String statName) {
CodahaleOpStatsLogger logger;
String nameSuccess = name(basename, statName);
logger = statsLoggerCache.get(nameSuccess);
if (logger == null) {
synchronized (statsLoggerCache) {
// check again now that we have the lock
logger = statsLoggerCache.get(nameSuccess);
if (logger == null) {
String nameFailure = name(basename, statName + "-fail");
FastTimer success;
FastTimer failure;
Map<String, Timer> timers = metrics.getTimers();
success = timers != null ? (FastTimer) timers.get(nameSuccess) : null;
if (success == null) {
success = new FastTimer(60, FastTimer.Buckets.fine);
metrics.register(nameSuccess, success);
}
failure = timers != null ? (FastTimer) timers.get(nameFailure) : null;
if (failure == null) {
failure = new FastTimer(60, FastTimer.Buckets.coarse);
metrics.register(nameFailure, failure);
}
logger = new CodahaleOpStatsLogger(success, failure);
statsLoggerCache.put(nameSuccess, logger);
}
}
}
return logger;
}
@Override
public StatsLogger scope(String scope) {
String scopeName;
if (basename == null || 0 == basename.length()) {
scopeName = scope;
} else {
scopeName = name(basename, scope);
}
return new FastCodahaleStatsLogger(metrics, scopeName);
}
}
| 635 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/CodahaleOpStatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.codahale;
import com.codahale.metrics.Snapshot;
import com.codahale.metrics.Timer;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.stats.OpStatsData;
import org.apache.bookkeeper.stats.OpStatsLogger;
class CodahaleOpStatsLogger implements OpStatsLogger {
final Timer success;
final Timer fail;
CodahaleOpStatsLogger(Timer success, Timer fail) {
this.success = success;
this.fail = fail;
}
// OpStatsLogger functions
@Override
public void registerFailedEvent(long eventLatency, TimeUnit unit) {
fail.update(eventLatency, unit);
}
@Override
public void registerSuccessfulEvent(long eventLatency, TimeUnit unit) {
success.update(eventLatency, unit);
}
@Override
public void registerSuccessfulValue(long value) {
// Values are inserted as millis, which is the unit they will be presented, to maintain 1:1 scale
success.update(value, TimeUnit.MILLISECONDS);
}
@Override
public void registerFailedValue(long value) {
// Values are inserted as millis, which is the unit they will be presented, to maintain 1:1 scale
fail.update(value, TimeUnit.MILLISECONDS);
}
@Override
public synchronized void clear() {
// can't clear a timer
}
/**
* This function should go away soon (hopefully).
*/
@Override
public synchronized OpStatsData toOpStatsData() {
long numFailed = fail.getCount();
long numSuccess = success.getCount();
Snapshot s = success.getSnapshot();
double avgLatencyMillis = s.getMean();
double[] defaultPercentiles = {10, 50, 90, 99, 99.9, 99.99};
long[] latenciesMillis = new long[defaultPercentiles.length];
Arrays.fill(latenciesMillis, Long.MAX_VALUE);
for (int i = 0; i < defaultPercentiles.length; i++) {
latenciesMillis[i] = (long) s.getValue(defaultPercentiles[i] / 100);
}
return new OpStatsData(numSuccess, numFailed, avgLatencyMillis, latenciesMillis);
}
}
| 636 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/FastCodahaleMetricsProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.codahale;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.StatsProvider;
/**
* A {@link StatsProvider} implemented based on <i>Codahale</i> metrics library.
*/
@SuppressWarnings("deprecation")
public class FastCodahaleMetricsProvider extends CodahaleMetricsProvider {
@Override
public StatsLogger getStatsLogger(String name) {
initIfNecessary();
return new FastCodahaleStatsLogger(getMetrics(), name);
}
}
| 637 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/CodahaleMetricsProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.codahale;
import com.codahale.metrics.CsvReporter;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.ScheduledReporter;
import com.codahale.metrics.Slf4jReporter;
import com.codahale.metrics.graphite.Graphite;
import com.codahale.metrics.graphite.GraphiteReporter;
import com.codahale.metrics.jmx.JmxReporter;
import com.codahale.metrics.jvm.GarbageCollectorMetricSet;
import com.codahale.metrics.jvm.MemoryUsageGaugeSet;
import com.google.common.base.Strings;
import com.google.common.net.HostAndPort;
import java.io.File;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.StatsProvider;
import org.apache.bookkeeper.stats.ThreadRegistry;
import org.apache.commons.configuration.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link StatsProvider} implemented based on <i>Codahale</i> metrics library.
*/
@SuppressWarnings("deprecation")
public class CodahaleMetricsProvider implements StatsProvider {
static final Logger LOG = LoggerFactory.getLogger(CodahaleMetricsProvider.class);
MetricRegistry metrics = null;
List<ScheduledReporter> reporters = new ArrayList<ScheduledReporter>();
JmxReporter jmx = null;
synchronized void initIfNecessary() {
if (metrics == null) {
metrics = new MetricRegistry();
metrics.registerAll(new MemoryUsageGaugeSet());
metrics.registerAll(new GarbageCollectorMetricSet());
}
}
public synchronized MetricRegistry getMetrics() {
return metrics;
}
@Override
public void start(Configuration conf) {
initIfNecessary();
int metricsOutputFrequency = conf.getInt("codahaleStatsOutputFrequencySeconds", 60);
String prefix = conf.getString("codahaleStatsPrefix", "");
String graphiteHost = conf.getString("codahaleStatsGraphiteEndpoint");
String csvDir = conf.getString("codahaleStatsCSVEndpoint");
String slf4jCat = conf.getString("codahaleStatsSlf4jEndpoint");
String jmxDomain = conf.getString("codahaleStatsJmxEndpoint");
if (!Strings.isNullOrEmpty(graphiteHost)) {
LOG.info("Configuring stats with graphite");
HostAndPort addr = HostAndPort.fromString(graphiteHost);
final Graphite graphite = new Graphite(
new InetSocketAddress(addr.getHost(), addr.getPort()));
reporters.add(GraphiteReporter.forRegistry(getMetrics())
.prefixedWith(prefix)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.filter(MetricFilter.ALL)
.build(graphite));
}
if (!Strings.isNullOrEmpty(csvDir)) {
// NOTE: 1/ metrics output files are exclusive to a given process
// 2/ the output directory must exist
// 3/ if output files already exist they are not overwritten and there is no metrics output
File outdir;
if (!Strings.isNullOrEmpty(prefix)) {
outdir = new File(csvDir, prefix);
} else {
outdir = new File(csvDir);
}
LOG.info("Configuring stats with csv output to directory [{}]", outdir.getAbsolutePath());
reporters.add(CsvReporter.forRegistry(getMetrics())
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build(outdir));
}
if (!Strings.isNullOrEmpty(slf4jCat)) {
LOG.info("Configuring stats with slf4j");
reporters.add(Slf4jReporter.forRegistry(getMetrics())
.outputTo(LoggerFactory.getLogger(slf4jCat))
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build());
}
if (!Strings.isNullOrEmpty(jmxDomain)) {
LOG.info("Configuring stats with jmx");
jmx = JmxReporter.forRegistry(getMetrics())
.inDomain(jmxDomain)
.convertRatesTo(TimeUnit.SECONDS)
.convertDurationsTo(TimeUnit.MILLISECONDS)
.build();
jmx.start();
}
for (ScheduledReporter r : reporters) {
r.start(metricsOutputFrequency, TimeUnit.SECONDS);
}
}
@Override
public void stop() {
for (ScheduledReporter r : reporters) {
r.report();
r.stop();
}
if (jmx != null) {
jmx.stop();
}
ThreadRegistry.clear();
}
@Override
public StatsLogger getStatsLogger(String name) {
initIfNecessary();
return new CodahaleStatsLogger(getMetrics(), name);
}
@Override
public String getStatsName(String... statsComponents) {
if (statsComponents.length == 0) {
return "";
}
String baseName = statsComponents[0];
String[] names = new String[statsComponents.length - 1];
System.arraycopy(statsComponents, 1, names, 0, names.length);
return MetricRegistry.name(baseName, names);
}
}
| 638 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
/**
* A lightweight stats library implemention based on <i>Codahale</i> metrics library.
*/
package org.apache.bookkeeper.stats.codahale;
| 639 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/codahale-metrics-provider/src/main/java/org/apache/bookkeeper/stats/codahale/CodahaleStatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.codahale;
import static com.codahale.metrics.MetricRegistry.name;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.Timer;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* A {@link StatsLogger} implemented based on <i>Codahale</i> metrics library.
*/
public class CodahaleStatsLogger implements StatsLogger {
protected final String basename;
final MetricRegistry metrics;
CodahaleStatsLogger(MetricRegistry metrics, String basename) {
this.metrics = metrics;
this.basename = basename;
}
@Override
public OpStatsLogger getOpStatsLogger(String statName) {
Timer success = metrics.timer(name(basename, statName));
Timer failure = metrics.timer(name(basename, statName + "-fail"));
return new CodahaleOpStatsLogger(success, failure);
}
@Override
public Counter getCounter(String statName) {
final com.codahale.metrics.Counter c = metrics.counter(name(basename, statName));
return new Counter() {
@Override
public synchronized void clear() {
long cur = c.getCount();
c.dec(cur);
}
@Override
public Long get() {
return c.getCount();
}
@Override
public void inc() {
c.inc();
}
@Override
public void dec() {
c.dec();
}
@Override
public void addCount(long delta) {
c.inc(delta);
}
@Override
public void addLatency(long eventLatency, TimeUnit unit) {
long valueMillis = unit.toMillis(eventLatency);
c.inc(valueMillis);
}
};
}
@Override
public <T extends Number> void registerGauge(final String statName, final Gauge<T> gauge) {
String metricName = name(basename, statName);
metrics.remove(metricName);
metrics.register(metricName, new com.codahale.metrics.Gauge<T>() {
@Override
public T getValue() {
return gauge.getSample();
}
});
}
@Override
public <T extends Number> void unregisterGauge(String statName, Gauge<T> gauge) {
// do nothing right now as the Codahale doesn't support conditional removal
}
@Override
public StatsLogger scope(String scope) {
String scopeName;
if (basename == null || 0 == basename.length()) {
scopeName = scope;
} else {
scopeName = name(basename, scope);
}
return new CodahaleStatsLogger(metrics, scopeName);
}
@Override
public void removeScope(String name, StatsLogger statsLogger) {
// no-op. the codahale stats logger doesn't have the references for stats logger.
}
/**
Thread-scoped stats not currently supported.
*/
@Override
public OpStatsLogger getThreadScopedOpStatsLogger(String name) {
return getOpStatsLogger(name);
}
/**
Thread-scoped stats not currently supported.
*/
@Override
public Counter getThreadScopedCounter(String name) {
return getCounter(name);
}
}
| 640 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/otel-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/otel-metrics-provider/src/main/java/org/apache/bookkeeper/stats/otel/OtelMetricsProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.otel;
// CHECKSTYLE.OFF: IllegalImport
import io.netty.util.internal.PlatformDependent;
import io.opentelemetry.api.OpenTelemetry;
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.api.metrics.Meter;
import io.opentelemetry.instrumentation.runtimemetrics.BufferPools;
import io.opentelemetry.instrumentation.runtimemetrics.Classes;
import io.opentelemetry.instrumentation.runtimemetrics.Cpu;
import io.opentelemetry.instrumentation.runtimemetrics.GarbageCollector;
import io.opentelemetry.instrumentation.runtimemetrics.MemoryPools;
import io.opentelemetry.instrumentation.runtimemetrics.Threads;
import io.opentelemetry.sdk.autoconfigure.AutoConfiguredOpenTelemetrySdk;
import io.opentelemetry.sdk.metrics.Aggregation;
import io.opentelemetry.sdk.metrics.InstrumentSelector;
import io.opentelemetry.sdk.metrics.InstrumentType;
import io.opentelemetry.sdk.metrics.View;
import java.lang.management.BufferPoolMXBean;
import java.lang.management.ManagementFactory;
import java.lang.reflect.Field;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.StatsProvider;
import org.apache.commons.configuration.Configuration;
// CHECKSTYLE.ON: IllegalImport
@Slf4j
public class OtelMetricsProvider implements StatsProvider {
private static final String METER_NAME = "org.apache.bookkeeper";
/*
* These acts a registry of the metrics defined in this provider
*/
final ConcurrentMap<ScopeContext, OtelCounter> counters = new ConcurrentHashMap<>();
final ConcurrentMap<ScopeContext, OtelOpStatsLogger> opStats = new ConcurrentHashMap<>();
private static final List<Double> histogramBuckets = Arrays.asList(
0.1, 0.2, 0.5,
1.0, 2.0, 5.0,
10.0, 20.0, 50.0,
100.0, 200.0, 500.0,
1_000.0, 2_000.0, 5_000.0,
10_000.0, 20_000.0, 50_000.0
);
private final OpenTelemetry openTelemetry;
final Meter meter;
OtelMetricsProvider() {
AutoConfiguredOpenTelemetrySdk sdk = AutoConfiguredOpenTelemetrySdk.builder()
.addMeterProviderCustomizer(
(sdkMeterProviderBuilder, configProperties) ->
sdkMeterProviderBuilder.registerView(
InstrumentSelector.builder()
.setMeterName(METER_NAME)
.setType(InstrumentType.HISTOGRAM)
.build(),
View.builder()
.setAggregation(Aggregation.explicitBucketHistogram(histogramBuckets))
.build())
).build();
this.openTelemetry = sdk.getOpenTelemetrySdk();
this.meter = openTelemetry.getMeter(METER_NAME);
}
@Override
public void start(Configuration conf) {
boolean exposeDefaultJVMMetrics = conf.getBoolean("exposeDefaultJVMMetrics", true);
if (exposeDefaultJVMMetrics) {
// Include standard JVM stats
MemoryPools.registerObservers(openTelemetry);
BufferPools.registerObservers(openTelemetry);
Classes.registerObservers(openTelemetry);
Cpu.registerObservers(openTelemetry);
Threads.registerObservers(openTelemetry);
GarbageCollector.registerObservers(openTelemetry);
meter.gaugeBuilder("process.runtime.jvm.memory.direct_bytes_used")
.buildWithCallback(odm -> odm.record(getDirectMemoryUsage.get()));
meter.gaugeBuilder("process.runtime.jvm.memory.direct_bytes_max")
.buildWithCallback(odm -> odm.record(PlatformDependent.estimateMaxDirectMemory()));
}
}
@Override
public void stop() {
}
@Override
public StatsLogger getStatsLogger(String scope) {
return new OtelStatsLogger(this, scope, Attributes.empty());
}
/*
* Try to get Netty counter of used direct memory. This will be correct, unlike the JVM values.
*/
private static final AtomicLong directMemoryUsage;
private static final Optional<BufferPoolMXBean> poolMxBeanOp;
private static final Supplier<Double> getDirectMemoryUsage;
static {
if (PlatformDependent.useDirectBufferNoCleaner()) {
poolMxBeanOp = Optional.empty();
AtomicLong tmpDirectMemoryUsage = null;
try {
Field field = PlatformDependent.class.getDeclaredField("DIRECT_MEMORY_COUNTER");
field.setAccessible(true);
tmpDirectMemoryUsage = (AtomicLong) field.get(null);
} catch (Throwable t) {
log.warn("Failed to access netty DIRECT_MEMORY_COUNTER field {}", t.getMessage());
}
directMemoryUsage = tmpDirectMemoryUsage;
getDirectMemoryUsage = () -> directMemoryUsage != null ? directMemoryUsage.get() : Double.NaN;
} else {
directMemoryUsage = null;
List<BufferPoolMXBean> platformMXBeans = ManagementFactory.getPlatformMXBeans(BufferPoolMXBean.class);
poolMxBeanOp = platformMXBeans.stream()
.filter(bufferPoolMXBean -> bufferPoolMXBean.getName().equals("direct")).findAny();
getDirectMemoryUsage = () -> poolMxBeanOp.isPresent() ? poolMxBeanOp.get().getMemoryUsed() : Double.NaN;
}
}
}
| 641 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/otel-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/otel-metrics-provider/src/main/java/org/apache/bookkeeper/stats/otel/OtelCounter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.otel;
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.api.metrics.LongUpDownCounter;
import io.opentelemetry.api.metrics.Meter;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.stats.Counter;
class OtelCounter implements Counter {
private final LongUpDownCounter counter;
private final Attributes attributes;
OtelCounter(Meter meter, ScopeContext sc) {
this.counter = meter.upDownCounterBuilder(sc.getName()).build();
this.attributes = sc.getAttributes();
}
@Override
public void clear() {
// no-op
}
@Override
public void inc() {
counter.add(1, attributes);
}
@Override
public void dec() {
counter.add(-1, attributes);
}
@Override
public void addCount(long delta) {
counter.add(delta, attributes);
}
@Override
public void addLatency(long eventLatency, TimeUnit unit) {
long valueMillis = unit.toMillis(eventLatency);
counter.add(valueMillis);
}
@Override
public Long get() {
return -1L;
}
}
| 642 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/otel-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/otel-metrics-provider/src/main/java/org/apache/bookkeeper/stats/otel/OtelOpStatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.otel;
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.api.metrics.DoubleHistogram;
import io.opentelemetry.api.metrics.Meter;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.stats.OpStatsData;
import org.apache.bookkeeper.stats.OpStatsLogger;
class OtelOpStatsLogger implements OpStatsLogger {
private final DoubleHistogram histo;
private final Attributes successAttributes;
private final Attributes failureAttributes;
OtelOpStatsLogger(Meter meter, ScopeContext sc) {
this.histo = meter.histogramBuilder(sc.getName()).build();
this.successAttributes = Attributes.builder().putAll(sc.getAttributes()).put("success", "true").build();
this.failureAttributes = Attributes.builder().putAll(sc.getAttributes()).put("success", "false").build();
}
@Override
public void registerFailedEvent(long eventLatency, TimeUnit unit) {
double valueMillis = unit.toMicros(eventLatency) / 1000.0;
histo.record(valueMillis, failureAttributes);
}
@Override
public void registerSuccessfulEvent(long eventLatency, TimeUnit unit) {
double valueMillis = unit.toMicros(eventLatency) / 1000.0;
histo.record(valueMillis, successAttributes);
}
@Override
public void registerSuccessfulValue(long value) {
histo.record(value, successAttributes);
}
@Override
public void registerFailedValue(long value) {
histo.record(value, failureAttributes);
}
@Override
public OpStatsData toOpStatsData() {
throw new UnsupportedOperationException();
}
@Override
public void clear() {
throw new UnsupportedOperationException();
}
}
| 643 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/otel-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/otel-metrics-provider/src/main/java/org/apache/bookkeeper/stats/otel/OtelStatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.otel;
import io.opentelemetry.api.common.Attributes;
import java.util.StringJoiner;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
public class OtelStatsLogger implements StatsLogger {
private final OtelMetricsProvider provider;
private final String scope;
private final Attributes attributes;
OtelStatsLogger(OtelMetricsProvider provider, String scope, Attributes attributes) {
this.provider = provider;
this.scope = scope;
this.attributes = attributes;
}
@Override
public OpStatsLogger getOpStatsLogger(String name) {
return provider.opStats.computeIfAbsent(scopeContext(name), sc -> new OtelOpStatsLogger(provider.meter, sc));
}
@Override
public OpStatsLogger getThreadScopedOpStatsLogger(String name) {
return getOpStatsLogger(name);
}
@Override
public Counter getCounter(String name) {
return provider.counters.computeIfAbsent(scopeContext(name), sc -> new OtelCounter(provider.meter, sc));
}
@Override
public Counter getThreadScopedCounter(String name) {
return getCounter(name);
}
@Override
public <T extends Number> void registerGauge(String name, Gauge<T> gauge) {
provider.meter.gaugeBuilder(completeName(name))
.buildWithCallback(observableDoubleMeasurement -> {
double v = gauge.getSample().doubleValue();
observableDoubleMeasurement.record(v, attributes);
});
}
@Override
public <T extends Number> void unregisterGauge(String name, Gauge<T> gauge) {
// no-op
}
@Override
public StatsLogger scope(String name) {
return new OtelStatsLogger(provider, completeName(name), attributes);
}
@Override
public void removeScope(String name, StatsLogger statsLogger) {
// no-op
}
@Override
public StatsLogger scopeLabel(String labelName, String labelValue) {
Attributes newAttributes = Attributes.builder()
.putAll(attributes)
.put(labelName, labelValue)
.build();
return new OtelStatsLogger(provider, scope, newAttributes);
}
private ScopeContext scopeContext(String name) {
return new ScopeContext(completeName(name), attributes);
}
private String completeName(String name) {
String metricName = scope.isEmpty()
? name
: new StringJoiner(".").add(scope).add(name).toString();
return metricName;
}
}
| 644 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/otel-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/otel-metrics-provider/src/main/java/org/apache/bookkeeper/stats/otel/ScopeContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.stats.otel;
import io.opentelemetry.api.common.Attributes;
import lombok.AllArgsConstructor;
import lombok.Data;
/**
* Holder for a scope and a set of associated labels.
*/
@Data
@AllArgsConstructor
public class ScopeContext {
private final String name;
private final Attributes attributes;
}
| 645 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/otel-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/otel-metrics-provider/src/main/java/org/apache/bookkeeper/stats/otel/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
/**
* A stats provider implementation based on OpenTelemetry.
*/
package org.apache.bookkeeper.stats.otel;
| 646 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/test/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/test/java/org/apache/bookkeeper/stats/prometheus/TestPrometheusFormatter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.prometheus;
import static com.google.common.base.Preconditions.checkArgument;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import com.google.common.base.MoreObjects;
import com.google.common.base.Splitter;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Multimap;
import io.prometheus.client.CollectorRegistry;
import io.prometheus.client.Gauge;
import io.prometheus.client.hotspot.GarbageCollectorExports;
import io.prometheus.client.hotspot.MemoryPoolsExports;
import io.prometheus.client.hotspot.StandardExports;
import io.prometheus.client.hotspot.ThreadExports;
import java.io.IOException;
import java.io.StringWriter;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.junit.Test;
/**
* Test for {@link PrometheusMetricsProvider}.
*/
public class TestPrometheusFormatter {
@Test(timeout = 30000)
public void testStatsOutput() throws Exception {
PrometheusMetricsProvider provider = new PrometheusMetricsProvider();
StatsLogger statsLogger = provider.getStatsLogger("test");
Counter counter = statsLogger.getCounter("my_counter");
counter.inc();
counter.inc();
OpStatsLogger opStats = statsLogger.getOpStatsLogger("op");
opStats.registerSuccessfulEvent(10, TimeUnit.MILLISECONDS);
opStats.registerSuccessfulEvent(5, TimeUnit.MILLISECONDS);
OpStatsLogger opStats1 = statsLogger.scopeLabel("test_label", "test_value")
.getOpStatsLogger("op_label");
opStats1.registerSuccessfulEvent(10, TimeUnit.MILLISECONDS);
opStats1.registerSuccessfulEvent(5, TimeUnit.MILLISECONDS);
opStats1.registerFailedEvent(1, TimeUnit.MILLISECONDS);
provider.rotateLatencyCollection();
StringWriter writer = new StringWriter();
provider.writeAllMetrics(writer);
writer.write("jvm_memory_direct_bytes_max{} 4.77626368E8\n");
writer.write("jvm_memory_pool_bytes_used{pool=\"Code Cache\"} 3347712.0\n");
writer.write("jvm_memory_pool_bytes_used{pool=\"CodeHeap 'non-nmethods'\"} 1207168.0\n");
System.out.println(writer);
Multimap<String, Metric> metrics = parseMetrics(writer.toString());
System.out.println(metrics);
List<Metric> cm = (List<Metric>) metrics.get("test_my_counter");
assertEquals(1, cm.size());
assertEquals(0, cm.get(0).tags.size());
assertEquals(2.0, cm.get(0).value, 0.0);
// test_op_sum
cm = (List<Metric>) metrics.get("test_op_sum");
assertEquals(2, cm.size());
Metric m = cm.get(0);
assertEquals(1, cm.get(0).tags.size());
assertEquals(0.0, m.value, 0.0);
assertEquals(1, m.tags.size());
assertEquals("false", m.tags.get("success"));
m = cm.get(1);
assertEquals(1, cm.get(0).tags.size());
assertEquals(15.0, m.value, 0.0);
assertEquals(1, m.tags.size());
assertEquals("true", m.tags.get("success"));
// test_op_count
cm = (List<Metric>) metrics.get("test_op_count");
assertEquals(2, cm.size());
m = cm.get(0);
assertEquals(1, cm.get(0).tags.size());
assertEquals(0.0, m.value, 0.0);
assertEquals(1, m.tags.size());
assertEquals("false", m.tags.get("success"));
m = cm.get(1);
assertEquals(1, cm.get(0).tags.size());
assertEquals(2.0, m.value, 0.0);
assertEquals(1, m.tags.size());
assertEquals("true", m.tags.get("success"));
// Latency
cm = (List<Metric>) metrics.get("test_op");
assertEquals(14, cm.size());
boolean found = false;
for (Metric mt : cm) {
if ("true".equals(mt.tags.get("success")) && "1.0".equals(mt.tags.get("quantile"))) {
assertEquals(10.0, mt.value, 0.0);
found = true;
}
}
assertTrue(found);
// test_op_label_sum
cm = (List<Metric>) metrics.get("test_op_label_sum");
assertEquals(2, cm.size());
m = cm.get(0);
assertEquals(2, m.tags.size());
assertEquals(1.0, m.value, 0.0);
assertEquals("false", m.tags.get("success"));
assertEquals("test_value", m.tags.get("test_label"));
m = cm.get(1);
assertEquals(15.0, m.value, 0.0);
assertEquals(2, m.tags.size());
assertEquals("true", m.tags.get("success"));
assertEquals("test_value", m.tags.get("test_label"));
// test_op_label_count
cm = (List<Metric>) metrics.get("test_op_label_count");
assertEquals(2, cm.size());
m = cm.get(0);
assertEquals(1, m.value, 0.0);
assertEquals(2, m.tags.size());
assertEquals("false", m.tags.get("success"));
assertEquals("test_value", m.tags.get("test_label"));
m = cm.get(1);
assertEquals(2.0, m.value, 0.0);
assertEquals(2, m.tags.size());
assertEquals("true", m.tags.get("success"));
assertEquals("test_value", m.tags.get("test_label"));
// Latency
cm = (List<Metric>) metrics.get("test_op_label");
assertEquals(14, cm.size());
found = false;
for (Metric mt : cm) {
if ("true".equals(mt.tags.get("success"))
&& "test_value".equals(mt.tags.get("test_label"))
&& "1.0".equals(mt.tags.get("quantile"))) {
assertEquals(10.0, mt.value, 0.0);
found = true;
}
}
assertTrue(found);
}
@Test
public void testWriteMetricsCollectedByPrometheusClient() {
CollectorRegistry registry = CollectorRegistry.defaultRegistry;
registry.register(new StandardExports());
registry.register(new MemoryPoolsExports());
registry.register(new GarbageCollectorExports());
registry.register(new ThreadExports());
registry.register(Gauge.build("jvm_memory_direct_bytes_used", "-").create().setChild(new Gauge.Child() {
@Override
public double get() {
return 1.0;
}
}));
registry.register(Gauge.build("jvm_memory_direct_bytes_max", "-").create().setChild(new Gauge.Child() {
@Override
public double get() {
return 100.0;
}
}));
PrometheusMetricsProvider provider = new PrometheusMetricsProvider(registry);
StringWriter writer = new StringWriter();
try {
provider.rotateLatencyCollection();
provider.writeAllMetrics(writer);
String output = writer.toString();
parseMetrics(output);
assertTrue(output.contains("# TYPE jvm_memory_direct_bytes_max gauge"));
assertTrue(output.contains("# TYPE jvm_memory_direct_bytes_used gauge"));
assertTrue(output.contains("# TYPE jvm_gc_collection_seconds summary"));
assertTrue(output.contains("# TYPE jvm_memory_pool_bytes_committed gauge"));
assertTrue(output.contains("# TYPE process_cpu_seconds counter"));
} catch (Exception e) {
fail();
}
}
@Test
public void testPrometheusTypeDuplicate() throws IOException {
PrometheusTextFormat prometheusTextFormat = new PrometheusTextFormat();
StringWriter writer = new StringWriter();
prometheusTextFormat.writeType(writer, "counter", "gauge");
prometheusTextFormat.writeType(writer, "counter", "gauge");
String string = writer.toString();
assertEquals("# TYPE counter gauge\n", string);
}
/**
* Hacky parsing of Prometheus text format. Sould be good enough for unit tests
*/
private static Multimap<String, Metric> parseMetrics(String metrics) {
Multimap<String, Metric> parsed = ArrayListMultimap.create();
// Example of lines are
// jvm_threads_current{cluster="standalone",} 203.0
// or
// pulsar_subscriptions_count{cluster="standalone", namespace="sample/standalone/ns1",
// topic="persistent://sample/standalone/ns1/test-2"} 0.0 1517945780897
Pattern pattern = Pattern.compile("^(\\w+)(\\{([^\\}]*)\\})?\\s(-?[\\d\\w\\.]+)(\\s(\\d+))?$");
Pattern formatPattern =
Pattern.compile("^(\\w+)(\\{((\\w+=[-\\s\\\'\\\"\\.\\w]+(,\\s?\\w+=[\\\"\\.\\w]+)*))?\\})?"
+ "\\s(-?[\\d\\w\\.]+)(\\s(\\d+))?$");
Pattern tagsPattern = Pattern.compile("(\\w+)=\"([^\"]+)\"(,\\s?)?");
Splitter.on("\n").split(metrics).forEach(line -> {
if (line.isEmpty() || line.startsWith("#")) {
return;
}
System.err.println("LINE: '" + line + "'");
Matcher matcher = pattern.matcher(line);
Matcher formatMatcher = formatPattern.matcher(line);
System.err.println("Matches: " + matcher.matches());
System.err.println(matcher);
assertTrue(matcher.matches());
assertTrue("failed to validate line: " + line, formatMatcher.matches());
assertEquals(6, matcher.groupCount());
System.err.println("groups: " + matcher.groupCount());
for (int i = 0; i < matcher.groupCount(); i++) {
System.err.println(" GROUP " + i + " -- " + matcher.group(i));
}
checkArgument(matcher.matches());
checkArgument(formatMatcher.matches());
String name = matcher.group(1);
Metric m = new Metric();
m.value = Double.parseDouble(matcher.group(4));
String tags = matcher.group(3);
if (tags != null) {
Matcher tagsMatcher = tagsPattern.matcher(tags);
while (tagsMatcher.find()) {
String tag = tagsMatcher.group(1);
String value = tagsMatcher.group(2);
m.tags.put(tag, value);
}
}
parsed.put(name, m);
});
return parsed;
}
static class Metric {
Map<String, String> tags = new TreeMap<>();
double value;
@Override
public String toString() {
return MoreObjects.toStringHelper(this).add("tags", tags).add("value", value).toString();
}
}
}
| 647 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/test/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/test/java/org/apache/bookkeeper/stats/prometheus/TestPrometheusMetricsProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.prometheus;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import java.io.StringWriter;
import java.util.Collections;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;
import lombok.Cleanup;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.commons.configuration.PropertiesConfiguration;
import org.junit.Assert;
import org.junit.Test;
/**
* Unit test of {@link PrometheusMetricsProvider}.
*/
public class TestPrometheusMetricsProvider {
@Test
public void testStartNoHttp() {
PropertiesConfiguration config = new PropertiesConfiguration();
config.setProperty(PrometheusMetricsProvider.PROMETHEUS_STATS_HTTP_ENABLE, false);
PrometheusMetricsProvider provider = new PrometheusMetricsProvider();
try {
provider.start(config);
assertNull(provider.server);
} finally {
provider.stop();
}
}
@Test
public void testStartNoHttpWhenBkHttpEnabled() {
PropertiesConfiguration config = new PropertiesConfiguration();
config.setProperty(PrometheusMetricsProvider.PROMETHEUS_STATS_HTTP_ENABLE, true);
config.setProperty("httpServerEnabled", true);
@Cleanup("stop") PrometheusMetricsProvider provider = new PrometheusMetricsProvider();
provider.start(config);
assertNull(provider.server);
}
@Test
public void testStartWithHttp() {
PropertiesConfiguration config = new PropertiesConfiguration();
config.setProperty(PrometheusMetricsProvider.PROMETHEUS_STATS_HTTP_ENABLE, true);
config.setProperty(PrometheusMetricsProvider.PROMETHEUS_STATS_HTTP_PORT, 0); // ephemeral
PrometheusMetricsProvider provider = new PrometheusMetricsProvider();
try {
provider.start(config);
assertNotNull(provider.server);
} finally {
provider.stop();
}
}
@Test
public void testStartWithHttpSpecifyAddr() {
PropertiesConfiguration config = new PropertiesConfiguration();
config.setProperty(PrometheusMetricsProvider.PROMETHEUS_STATS_HTTP_ENABLE, true);
config.setProperty(PrometheusMetricsProvider.PROMETHEUS_STATS_HTTP_PORT, 0); // ephemeral
config.setProperty(PrometheusMetricsProvider.PROMETHEUS_STATS_HTTP_ADDRESS, "127.0.0.1");
PrometheusMetricsProvider provider = new PrometheusMetricsProvider();
try {
provider.start(config);
assertNotNull(provider.server);
} finally {
provider.stop();
}
}
@Test
public void testCounter() {
LongAdderCounter counter = new LongAdderCounter(Collections.emptyMap());
long value = counter.get();
assertEquals(0L, value);
counter.inc();
assertEquals(1L, counter.get().longValue());
counter.dec();
assertEquals(0L, counter.get().longValue());
counter.addCount(3);
assertEquals(3L, counter.get().longValue());
}
@Test
public void testCounter2() {
LongAdderCounter counter = new LongAdderCounter(Collections.emptyMap());
long value = counter.get();
assertEquals(0L, value);
counter.addLatency(3 * 1000 * 1000L, TimeUnit.NANOSECONDS);
assertEquals(3L, counter.get().longValue());
}
@Test
public void testTwoCounters() throws Exception {
PrometheusMetricsProvider provider = new PrometheusMetricsProvider();
StatsLogger statsLogger = provider.getStatsLogger("test");
Counter counter1 = statsLogger.getCounter("counter");
Counter counter2 = statsLogger.getCounter("counter");
assertEquals(counter1, counter2);
assertSame(counter1, counter2);
assertEquals(1, provider.counters.size());
}
@Test
public void testJvmDirectMemoryMetrics() throws Exception {
PropertiesConfiguration config = new PropertiesConfiguration();
config.setProperty(PrometheusMetricsProvider.PROMETHEUS_STATS_HTTP_ENABLE, true);
config.setProperty(PrometheusMetricsProvider.PROMETHEUS_STATS_HTTP_PORT, 0);
config.setProperty(PrometheusMetricsProvider.PROMETHEUS_STATS_HTTP_ADDRESS, "127.0.0.1");
ByteBuf byteBuf = ByteBufAllocator.DEFAULT.directBuffer(25);
PrometheusMetricsProvider provider = new PrometheusMetricsProvider();
try {
provider.start(config);
assertNotNull(provider.server);
StringWriter writer = new StringWriter();
provider.writeAllMetrics(writer);
String s = writer.toString();
String[] split = s.split(System.lineSeparator());
HashMap<String, String> map = new HashMap<>();
for (String str : split) {
String[] aux = str.split(" ");
map.put(aux[0], aux[1]);
}
String directBytesMax = map.get("jvm_memory_direct_bytes_max{}");
Assert.assertNotNull(directBytesMax);
Assert.assertNotEquals("Nan", directBytesMax);
Assert.assertNotEquals("-1", directBytesMax);
String directBytesUsed = map.get("jvm_memory_direct_bytes_used{}");
Assert.assertNotNull(directBytesUsed);
Assert.assertNotEquals("Nan", directBytesUsed);
Assert.assertTrue(Double.parseDouble(directBytesUsed) > 25);
// ensure byteBuffer doesn't gc
byteBuf.release();
} finally {
provider.stop();
}
}
}
| 648 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/DataSketchesOpStatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.prometheus;
import com.yahoo.sketches.quantiles.DoublesSketch;
import com.yahoo.sketches.quantiles.DoublesSketchBuilder;
import com.yahoo.sketches.quantiles.DoublesUnion;
import com.yahoo.sketches.quantiles.DoublesUnionBuilder;
import io.netty.util.concurrent.FastThreadLocal;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.LongAdder;
import java.util.concurrent.locks.StampedLock;
import org.apache.bookkeeper.stats.OpStatsData;
import org.apache.bookkeeper.stats.OpStatsLogger;
/**
* OpStatsLogger implementation that uses DataSketches library to calculate the approximated latency quantiles.
*/
public class DataSketchesOpStatsLogger implements OpStatsLogger {
/*
* Use 2 rotating thread local accessor so that we can safely swap them.
*/
private volatile ThreadLocalAccessor current;
private volatile ThreadLocalAccessor replacement;
/*
* These are the sketches where all the aggregated results are published.
*/
private volatile DoublesSketch successResult;
private volatile DoublesSketch failResult;
private final LongAdder successCountAdder = new LongAdder();
private final LongAdder failCountAdder = new LongAdder();
private final LongAdder successSumAdder = new LongAdder();
private final LongAdder failSumAdder = new LongAdder();
private Map<String, String> labels;
// used for lazy registration for thread scoped metrics
private boolean threadInitialized;
public DataSketchesOpStatsLogger(Map<String, String> labels) {
this.current = new ThreadLocalAccessor();
this.replacement = new ThreadLocalAccessor();
this.labels = labels;
}
@Override
public void registerFailedEvent(long eventLatency, TimeUnit unit) {
double valueMillis = unit.toMicros(eventLatency) / 1000.0;
failCountAdder.increment();
failSumAdder.add((long) valueMillis);
LocalData localData = current.localData.get();
long stamp = localData.lock.readLock();
try {
localData.failSketch.update(valueMillis);
} finally {
localData.lock.unlockRead(stamp);
}
}
@Override
public void registerSuccessfulEvent(long eventLatency, TimeUnit unit) {
double valueMillis = unit.toMicros(eventLatency) / 1000.0;
successCountAdder.increment();
successSumAdder.add((long) valueMillis);
LocalData localData = current.localData.get();
long stamp = localData.lock.readLock();
try {
localData.successSketch.update(valueMillis);
} finally {
localData.lock.unlockRead(stamp);
}
}
@Override
public void registerSuccessfulValue(long value) {
successCountAdder.increment();
successSumAdder.add(value);
LocalData localData = current.localData.get();
long stamp = localData.lock.readLock();
try {
localData.successSketch.update(value);
} finally {
localData.lock.unlockRead(stamp);
}
}
@Override
public void registerFailedValue(long value) {
failCountAdder.increment();
failSumAdder.add(value);
LocalData localData = current.localData.get();
long stamp = localData.lock.readLock();
try {
localData.failSketch.update(value);
} finally {
localData.lock.unlockRead(stamp);
}
}
@Override
public OpStatsData toOpStatsData() {
// Not relevant as we don't use JMX here
throw new UnsupportedOperationException();
}
@Override
public void clear() {
// Not relevant as we don't use JMX here
throw new UnsupportedOperationException();
}
public void rotateLatencyCollection() {
// Swap current with replacement
ThreadLocalAccessor local = current;
current = replacement;
replacement = local;
final DoublesUnion aggregateSuccesss = new DoublesUnionBuilder().build();
final DoublesUnion aggregateFail = new DoublesUnionBuilder().build();
local.map.forEach((localData, b) -> {
long stamp = localData.lock.writeLock();
try {
aggregateSuccesss.update(localData.successSketch);
localData.successSketch.reset();
aggregateFail.update(localData.failSketch);
localData.failSketch.reset();
} finally {
localData.lock.unlockWrite(stamp);
}
});
successResult = aggregateSuccesss.getResultAndReset();
failResult = aggregateFail.getResultAndReset();
}
public long getCount(boolean success) {
return success ? successCountAdder.sum() : failCountAdder.sum();
}
public long getSum(boolean success) {
return success ? successSumAdder.sum() : failSumAdder.sum();
}
public double getQuantileValue(boolean success, double quantile) {
DoublesSketch s = success ? successResult : failResult;
return s != null ? s.getQuantile(quantile) : Double.NaN;
}
public Map<String, String> getLabels() {
return labels;
}
public boolean isThreadInitialized() {
return threadInitialized;
}
public void initializeThread(Map<String, String> labels) {
this.labels = labels;
this.threadInitialized = true;
}
private static class LocalData {
private final DoublesSketch successSketch = new DoublesSketchBuilder().build();
private final DoublesSketch failSketch = new DoublesSketchBuilder().build();
private final StampedLock lock = new StampedLock();
}
private static class ThreadLocalAccessor {
private final Map<LocalData, Boolean> map = new ConcurrentHashMap<>();
private final FastThreadLocal<LocalData> localData = new FastThreadLocal<LocalData>() {
@Override
protected LocalData initialValue() throws Exception {
LocalData localData = new LocalData();
map.put(localData, Boolean.TRUE);
return localData;
}
@Override
protected void onRemoval(LocalData value) throws Exception {
map.remove(value);
}
};
}
}
| 649 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/SimpleGauge.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.prometheus;
import java.util.Map;
import org.apache.bookkeeper.stats.Gauge;
/**
* A {@link Gauge} implementation that forwards on the value supplier.
*/
public class SimpleGauge<T extends Number> {
private final Map<String, String> labels;
private final Gauge<T> gauge;
public SimpleGauge(final Gauge<T> gauge, Map<String, String> labels) {
this.gauge = gauge;
this.labels = labels;
}
Number getSample() {
return gauge.getSample();
}
public Map<String, String> getLabels() {
return labels;
}
}
| 650 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/ThreadScopedLongAdderCounter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.prometheus;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.ThreadRegistry;
/**
* {@link Counter} implementation that lazily registers LongAdderCounters per thread
* * with added labels for the threadpool/thread name and thread no.
*/
public class ThreadScopedLongAdderCounter implements Counter {
private ThreadLocal<LongAdderCounter> counters;
private LongAdderCounter defaultCounter;
private Map<String, String> originalLabels;
private ScopeContext scopeContext;
private PrometheusMetricsProvider provider;
public ThreadScopedLongAdderCounter(PrometheusMetricsProvider provider,
ScopeContext scopeContext,
Map<String, String> labels) {
this.provider = provider;
this.scopeContext = scopeContext;
this.originalLabels = new HashMap<>(labels);
this.defaultCounter = new LongAdderCounter(labels);
Map<String, String> defaultLabels = new HashMap<>(labels);
defaultLabels.put("threadPool", "?");
defaultLabels.put("thread", "?");
this.defaultCounter.initializeThread(defaultLabels);
this.counters = ThreadLocal.withInitial(() -> {
return new LongAdderCounter(labels);
});
}
@Override
public void clear() {
getCounter().clear();
}
@Override
public void inc() {
getCounter().inc();
}
@Override
public void dec() {
getCounter().dec();
}
@Override
public void addCount(long delta) {
getCounter().addCount(delta);
}
@Override
public void addLatency(long eventLatency, TimeUnit unit) {
getCounter().addLatency(eventLatency, unit);
}
@Override
public Long get() {
return getCounter().get();
}
private LongAdderCounter getCounter() {
LongAdderCounter counter = counters.get();
// Lazy registration
// Update the counter with the thread labels then add to the provider
// If for some reason this thread did not get registered,
// then we fallback to a standard counter (defaultCounter)
if (!counter.isThreadInitialized()) {
ThreadRegistry.ThreadPoolThread tpt = ThreadRegistry.get();
if (tpt == null) {
counters.set(defaultCounter);
provider.counters.put(new ScopeContext(scopeContext.getScope(), originalLabels), defaultCounter);
return defaultCounter;
} else {
Map<String, String> threadScopedlabels = new HashMap<>(originalLabels);
threadScopedlabels.put("threadPool", tpt.getThreadPool());
threadScopedlabels.put("thread", String.valueOf(tpt.getOrdinal()));
counter.initializeThread(threadScopedlabels);
provider.counters.put(new ScopeContext(scopeContext.getScope(), threadScopedlabels), counter);
}
}
return counter;
}
}
| 651 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/PrometheusStatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.prometheus;
import com.google.common.base.Joiner;
import io.prometheus.client.Collector;
import java.util.Map;
import java.util.TreeMap;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.StatsLogger;
/**
* A {@code Prometheus} based {@link StatsLogger} implementation.
*/
public class PrometheusStatsLogger implements StatsLogger {
private final PrometheusMetricsProvider provider;
private final String scope;
private final Map<String, String> labels;
PrometheusStatsLogger(PrometheusMetricsProvider provider, String scope, Map<String, String> labels) {
this.provider = provider;
this.scope = scope;
this.labels = labels;
}
@Override
public OpStatsLogger getOpStatsLogger(String name) {
return provider.opStats.computeIfAbsent(scopeContext(name), x -> new DataSketchesOpStatsLogger(labels));
}
@Override
public OpStatsLogger getThreadScopedOpStatsLogger(String name) {
return provider.threadScopedOpStats.computeIfAbsent(scopeContext(name),
x -> new ThreadScopedDataSketchesStatsLogger(provider, x, labels));
}
@Override
public Counter getCounter(String name) {
return provider.counters.computeIfAbsent(scopeContext(name), x -> new LongAdderCounter(labels));
}
public Counter getThreadScopedCounter(String name) {
return provider.threadScopedCounters.computeIfAbsent(scopeContext(name),
x -> new ThreadScopedLongAdderCounter(provider, x, labels));
}
@Override
public <T extends Number> void registerGauge(String name, Gauge<T> gauge) {
provider.gauges.computeIfAbsent(scopeContext(name), x -> new SimpleGauge<T>(gauge, labels));
}
@Override
public <T extends Number> void unregisterGauge(String name, Gauge<T> gauge) {
// no-op
}
@Override
public void removeScope(String name, StatsLogger statsLogger) {
// no-op
}
@Override
public StatsLogger scope(String name) {
return new PrometheusStatsLogger(provider, completeName(name), labels);
}
@Override
public StatsLogger scopeLabel(String labelName, String labelValue) {
Map<String, String> newLabels = new TreeMap<>(labels);
newLabels.put(labelName, labelValue);
return new PrometheusStatsLogger(provider, scope, newLabels);
}
private ScopeContext scopeContext(String name) {
return new ScopeContext(completeName(name), labels);
}
private String completeName(String name) {
return Collector.sanitizeMetricName(scope.isEmpty() ? name : Joiner.on('_').join(scope, name));
}
}
| 652 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/PrometheusServlet.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.prometheus;
import io.prometheus.client.exporter.common.TextFormat;
import java.io.IOException;
import java.io.Writer;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
/**
* Servlet used to export metrics in prometheus text format.
*/
public class PrometheusServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
private final transient PrometheusMetricsProvider provider;
public PrometheusServlet(PrometheusMetricsProvider provider) {
this.provider = provider;
}
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
resp.setStatus(HttpServletResponse.SC_OK);
resp.setContentType(TextFormat.CONTENT_TYPE_004);
Writer writer = resp.getWriter();
try {
provider.writeAllMetrics(writer);
writer.flush();
} finally {
writer.close();
}
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
doGet(req, resp);
}
}
| 653 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/PrometheusTextFormat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.prometheus;
import io.prometheus.client.Collector;
import io.prometheus.client.Collector.MetricFamilySamples;
import io.prometheus.client.Collector.MetricFamilySamples.Sample;
import io.prometheus.client.CollectorRegistry;
import java.io.IOException;
import java.io.Writer;
import java.util.Enumeration;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* Logic to write metrics in Prometheus text format.
*/
public class PrometheusTextFormat {
Set<String> metricNameSet = new HashSet<>();
void writeGauge(Writer w, String name, SimpleGauge<? extends Number> gauge) {
// Example:
// # TYPE bookie_storage_entries_count gauge
// bookie_storage_entries_count 519
try {
writeType(w, name, "gauge");
w.append(name);
writeLabels(w, gauge.getLabels());
w.append(' ').append(gauge.getSample().toString()).append('\n');
} catch (IOException e) {
throw new RuntimeException(e);
}
}
void writeCounter(Writer w, String name, LongAdderCounter counter) {
// Example:
// # TYPE jvm_threads_started_total counter
// jvm_threads_started_total 59
try {
writeType(w, name, "counter");
w.append(name);
writeLabels(w, counter.getLabels());
w.append(' ').append(counter.get().toString()).append('\n');
} catch (IOException e) {
throw new RuntimeException(e);
}
}
void writeOpStat(Writer w, String name, DataSketchesOpStatsLogger opStat) {
// Example:
// # TYPE bookie_journal_JOURNAL_ADD_ENTRY summary
// bookie_journal_JOURNAL_ADD_ENTRY{success="false",quantile="0.5",} NaN
// bookie_journal_JOURNAL_ADD_ENTRY{success="false",quantile="0.75",} NaN
// bookie_journal_JOURNAL_ADD_ENTRY{success="false",quantile="0.95",} NaN
// bookie_journal_JOURNAL_ADD_ENTRY{success="false",quantile="0.99",} NaN
// bookie_journal_JOURNAL_ADD_ENTRY{success="false",quantile="0.999",} NaN
// bookie_journal_JOURNAL_ADD_ENTRY{success="false",quantile="0.9999",} NaN
// bookie_journal_JOURNAL_ADD_ENTRY{success="false",quantile="1.0",} NaN
// bookie_journal_JOURNAL_ADD_ENTRY_count{success="false",} 0.0
// bookie_journal_JOURNAL_ADD_ENTRY_sum{success="false",} 0.0
// bookie_journal_JOURNAL_ADD_ENTRY{success="true",quantile="0.5",} 1.706
// bookie_journal_JOURNAL_ADD_ENTRY{success="true",quantile="0.75",} 1.89
// bookie_journal_JOURNAL_ADD_ENTRY{success="true",quantile="0.95",} 2.121
// bookie_journal_JOURNAL_ADD_ENTRY{success="true",quantile="0.99",} 10.708
// bookie_journal_JOURNAL_ADD_ENTRY{success="true",quantile="0.999",} 10.902
// bookie_journal_JOURNAL_ADD_ENTRY{success="true",quantile="0.9999",} 10.902
// bookie_journal_JOURNAL_ADD_ENTRY{success="true",quantile="1.0",} 10.902
// bookie_journal_JOURNAL_ADD_ENTRY_count{success="true",} 658.0
// bookie_journal_JOURNAL_ADD_ENTRY_sum{success="true",} 1265.0800000000002
try {
writeType(w, name, "summary");
writeQuantile(w, opStat, name, false, 0.5);
writeQuantile(w, opStat, name, false, 0.75);
writeQuantile(w, opStat, name, false, 0.95);
writeQuantile(w, opStat, name, false, 0.99);
writeQuantile(w, opStat, name, false, 0.999);
writeQuantile(w, opStat, name, false, 0.9999);
writeQuantile(w, opStat, name, false, 1.0);
writeCount(w, opStat, name, false);
writeSum(w, opStat, name, false);
writeQuantile(w, opStat, name, true, 0.5);
writeQuantile(w, opStat, name, true, 0.75);
writeQuantile(w, opStat, name, true, 0.95);
writeQuantile(w, opStat, name, true, 0.99);
writeQuantile(w, opStat, name, true, 0.999);
writeQuantile(w, opStat, name, true, 0.9999);
writeQuantile(w, opStat, name, true, 1.0);
writeCount(w, opStat, name, true);
writeSum(w, opStat, name, true);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
private void writeLabels(Writer w, Map<String, String> labels) throws IOException {
if (labels.isEmpty()) {
return;
}
w.append('{');
writeLabelsNoBraces(w, labels);
w.append('}');
}
private void writeLabelsNoBraces(Writer w, Map<String, String> labels) throws IOException {
if (labels.isEmpty()) {
return;
}
boolean isFirst = true;
for (Map.Entry<String, String> e : labels.entrySet()) {
if (!isFirst) {
w.append(',');
}
isFirst = false;
w.append(e.getKey())
.append("=\"")
.append(e.getValue())
.append('"');
}
}
private void writeQuantile(Writer w, DataSketchesOpStatsLogger opStat, String name, Boolean success,
double quantile) throws IOException {
w.append(name)
.append("{success=\"").append(success.toString())
.append("\",quantile=\"").append(Double.toString(quantile))
.append("\"");
if (!opStat.getLabels().isEmpty()) {
w.append(", ");
writeLabelsNoBraces(w, opStat.getLabels());
}
w.append("} ")
.append(Double.toString(opStat.getQuantileValue(success, quantile))).append('\n');
}
private void writeCount(Writer w, DataSketchesOpStatsLogger opStat, String name, Boolean success)
throws IOException {
w.append(name).append("_count{success=\"").append(success.toString()).append("\"");
if (!opStat.getLabels().isEmpty()) {
w.append(", ");
writeLabelsNoBraces(w, opStat.getLabels());
}
w.append("} ")
.append(Long.toString(opStat.getCount(success))).append('\n');
}
private void writeSum(Writer w, DataSketchesOpStatsLogger opStat, String name, Boolean success)
throws IOException {
w.append(name).append("_sum{success=\"").append(success.toString()).append("\"");
if (!opStat.getLabels().isEmpty()) {
w.append(", ");
writeLabelsNoBraces(w, opStat.getLabels());
}
w.append("} ")
.append(Double.toString(opStat.getSum(success))).append('\n');
}
static void writeMetricsCollectedByPrometheusClient(Writer w, CollectorRegistry registry) throws IOException {
Enumeration<MetricFamilySamples> metricFamilySamples = registry.metricFamilySamples();
while (metricFamilySamples.hasMoreElements()) {
MetricFamilySamples metricFamily = metricFamilySamples.nextElement();
// Write type of metric
w.append("# TYPE ").append(metricFamily.name).append(getTypeNameSuffix(metricFamily.type)).append(' ')
.append(getTypeStr(metricFamily.type)).write('\n');
for (int i = 0; i < metricFamily.samples.size(); i++) {
Sample sample = metricFamily.samples.get(i);
w.write(sample.name);
w.write('{');
for (int j = 0; j < sample.labelNames.size(); j++) {
if (j != 0) {
w.write(", ");
}
w.write(sample.labelNames.get(j));
w.write("=\"");
w.write(sample.labelValues.get(j));
w.write('"');
}
w.write("} ");
w.write(Collector.doubleToGoString(sample.value));
w.write('\n');
}
}
}
static String getTypeNameSuffix(Collector.Type type) {
if (type.equals(Collector.Type.INFO)) {
return "_info";
}
return "";
}
static String getTypeStr(Collector.Type type) {
switch (type) {
case COUNTER:
return "counter";
case GAUGE:
case INFO:
return "gauge";
case SUMMARY:
return "summary";
case HISTOGRAM:
return "histogram";
case UNKNOWN:
default:
return "unknown";
}
}
void writeType(Writer w, String name, String type) throws IOException {
if (metricNameSet.contains(name)) {
return;
}
metricNameSet.add(name);
w.append("# TYPE ").append(name).append(" ").append(type).append("\n");
}
}
| 654 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/ThreadScopedDataSketchesStatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.prometheus;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.stats.OpStatsData;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.ThreadRegistry;
/**
* OpStatsLogger implementation that lazily registers OpStatsLoggers per thread
* with added labels for the threadpool/thresd name and thread no.
*/
public class ThreadScopedDataSketchesStatsLogger implements OpStatsLogger {
private ThreadLocal<DataSketchesOpStatsLogger> statsLoggers;
private DataSketchesOpStatsLogger defaultStatsLogger;
private Map<String, String> originalLabels;
private ScopeContext scopeContext;
private PrometheusMetricsProvider provider;
public ThreadScopedDataSketchesStatsLogger(PrometheusMetricsProvider provider,
ScopeContext scopeContext,
Map<String, String> labels) {
this.provider = provider;
this.scopeContext = scopeContext;
this.originalLabels = labels;
this.defaultStatsLogger = new DataSketchesOpStatsLogger(labels);
Map<String, String> defaultLabels = new HashMap<>(labels);
defaultLabels.put("threadPool", "?");
defaultLabels.put("thread", "?");
this.defaultStatsLogger.initializeThread(defaultLabels);
this.statsLoggers = ThreadLocal.withInitial(() -> {
return new DataSketchesOpStatsLogger(labels);
});
}
@Override
public void registerFailedEvent(long eventLatency, TimeUnit unit) {
getStatsLogger().registerFailedEvent(eventLatency, unit);
}
@Override
public void registerSuccessfulEvent(long eventLatency, TimeUnit unit) {
getStatsLogger().registerSuccessfulEvent(eventLatency, unit);
}
@Override
public void registerSuccessfulValue(long value) {
getStatsLogger().registerSuccessfulValue(value);
}
@Override
public void registerFailedValue(long value) {
getStatsLogger().registerFailedValue(value);
}
@Override
public OpStatsData toOpStatsData() {
// Not relevant as we don't use JMX here
throw new UnsupportedOperationException();
}
@Override
public void clear() {
// Not relevant as we don't use JMX here
throw new UnsupportedOperationException();
}
private DataSketchesOpStatsLogger getStatsLogger() {
DataSketchesOpStatsLogger statsLogger = statsLoggers.get();
// Lazy registration
// Update the stats logger with the thread labels then add to the provider
// If for some reason this thread did not get registered,
// then we fallback to a standard OpsStatsLogger (defaultStatsLogger)
if (!statsLogger.isThreadInitialized()) {
ThreadRegistry.ThreadPoolThread tpt = ThreadRegistry.get();
if (tpt == null) {
statsLoggers.set(defaultStatsLogger);
provider.opStats.put(new ScopeContext(scopeContext.getScope(), originalLabels), defaultStatsLogger);
return defaultStatsLogger;
} else {
Map<String, String> threadScopedlabels = new HashMap<>(originalLabels);
threadScopedlabels.put("threadPool", tpt.getThreadPool());
threadScopedlabels.put("thread", String.valueOf(tpt.getOrdinal()));
statsLogger.initializeThread(threadScopedlabels);
provider.opStats.put(new ScopeContext(scopeContext.getScope(), threadScopedlabels), statsLogger);
}
}
return statsLogger;
}
} | 655 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/PrometheusMetricsProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.prometheus;
// CHECKSTYLE.OFF: IllegalImport
import com.google.common.annotations.VisibleForTesting;
import io.netty.util.concurrent.DefaultThreadFactory;
import io.netty.util.internal.PlatformDependent;
import io.prometheus.client.Collector;
import io.prometheus.client.CollectorRegistry;
import io.prometheus.client.Gauge;
import io.prometheus.client.Gauge.Child;
import io.prometheus.client.hotspot.GarbageCollectorExports;
import io.prometheus.client.hotspot.MemoryPoolsExports;
import io.prometheus.client.hotspot.StandardExports;
import io.prometheus.client.hotspot.ThreadExports;
import java.io.IOException;
import java.io.Writer;
import java.lang.management.BufferPoolMXBean;
import java.lang.management.ManagementFactory;
import java.lang.reflect.Field;
import java.net.InetSocketAddress;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.bookkeeper.stats.StatsProvider;
import org.apache.bookkeeper.stats.ThreadRegistry;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.lang.StringUtils;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
// CHECKSTYLE.ON: IllegalImport
/**
* A <i>Prometheus</i> based {@link StatsProvider} implementation.
*/
public class PrometheusMetricsProvider implements StatsProvider {
private ScheduledExecutorService executor;
public static final String PROMETHEUS_STATS_HTTP_ENABLE = "prometheusStatsHttpEnable";
public static final boolean DEFAULT_PROMETHEUS_STATS_HTTP_ENABLE = true;
public static final String PROMETHEUS_STATS_HTTP_ADDRESS = "prometheusStatsHttpAddress";
public static final String DEFAULT_PROMETHEUS_STATS_HTTP_ADDR = "0.0.0.0";
public static final String PROMETHEUS_STATS_HTTP_PORT = "prometheusStatsHttpPort";
public static final int DEFAULT_PROMETHEUS_STATS_HTTP_PORT = 8000;
public static final String PROMETHEUS_STATS_LATENCY_ROLLOVER_SECONDS = "prometheusStatsLatencyRolloverSeconds";
public static final int DEFAULT_PROMETHEUS_STATS_LATENCY_ROLLOVER_SECONDS = 60;
final CollectorRegistry registry;
Server server;
/*
* These acts a registry of the metrics defined in this provider
*/
final ConcurrentMap<ScopeContext, LongAdderCounter> counters = new ConcurrentHashMap<>();
final ConcurrentMap<ScopeContext, SimpleGauge<? extends Number>> gauges = new ConcurrentHashMap<>();
final ConcurrentMap<ScopeContext, DataSketchesOpStatsLogger> opStats = new ConcurrentHashMap<>();
final ConcurrentMap<ScopeContext, ThreadScopedDataSketchesStatsLogger> threadScopedOpStats =
new ConcurrentHashMap<>();
final ConcurrentMap<ScopeContext, ThreadScopedLongAdderCounter> threadScopedCounters =
new ConcurrentHashMap<>();
public PrometheusMetricsProvider() {
this(CollectorRegistry.defaultRegistry);
}
public PrometheusMetricsProvider(CollectorRegistry registry) {
this.registry = registry;
}
@Override
public void start(Configuration conf) {
boolean httpEnabled = conf.getBoolean(PROMETHEUS_STATS_HTTP_ENABLE, DEFAULT_PROMETHEUS_STATS_HTTP_ENABLE);
boolean bkHttpServerEnabled = conf.getBoolean("httpServerEnabled", false);
boolean exposeDefaultJVMMetrics = conf.getBoolean("exposeDefaultJVMMetrics", true);
// only start its own http server when prometheus http is enabled and bk http server is not enabled.
if (httpEnabled && !bkHttpServerEnabled) {
String httpAddr = conf.getString(PROMETHEUS_STATS_HTTP_ADDRESS, DEFAULT_PROMETHEUS_STATS_HTTP_ADDR);
int httpPort = conf.getInt(PROMETHEUS_STATS_HTTP_PORT, DEFAULT_PROMETHEUS_STATS_HTTP_PORT);
InetSocketAddress httpEndpoint = InetSocketAddress.createUnresolved(httpAddr, httpPort);
this.server = new Server(httpEndpoint);
ServletContextHandler context = new ServletContextHandler();
context.setContextPath("/");
server.setHandler(context);
context.addServlet(new ServletHolder(new PrometheusServlet(this)), "/metrics");
try {
server.start();
log.info("Started Prometheus stats endpoint at {}", httpEndpoint);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
if (exposeDefaultJVMMetrics) {
// Include standard JVM stats
registerMetrics(new StandardExports());
registerMetrics(new MemoryPoolsExports());
registerMetrics(new GarbageCollectorExports());
registerMetrics(new ThreadExports());
// Add direct memory allocated through unsafe
registerMetrics(Gauge.build("jvm_memory_direct_bytes_used", "-").create().setChild(new Child() {
@Override
public double get() {
return getDirectMemoryUsage.get();
}
}));
registerMetrics(Gauge.build("jvm_memory_direct_bytes_max", "-").create().setChild(new Child() {
@Override
public double get() {
return PlatformDependent.estimateMaxDirectMemory();
}
}));
}
executor = Executors.newSingleThreadScheduledExecutor(new DefaultThreadFactory("metrics"));
int latencyRolloverSeconds = conf.getInt(PROMETHEUS_STATS_LATENCY_ROLLOVER_SECONDS,
DEFAULT_PROMETHEUS_STATS_LATENCY_ROLLOVER_SECONDS);
executor.scheduleAtFixedRate(() -> {
rotateLatencyCollection();
}, 1, latencyRolloverSeconds, TimeUnit.SECONDS);
}
@Override
public void stop() {
if (server != null) {
try {
server.stop();
} catch (Exception e) {
log.warn("Failed to shutdown Jetty server", e);
} finally {
ThreadRegistry.clear();
}
}
}
@Override
public StatsLogger getStatsLogger(String scope) {
return new PrometheusStatsLogger(PrometheusMetricsProvider.this, scope, Collections.emptyMap());
}
@Override
public void writeAllMetrics(Writer writer) throws IOException {
PrometheusTextFormat prometheusTextFormat = new PrometheusTextFormat();
PrometheusTextFormat.writeMetricsCollectedByPrometheusClient(writer, registry);
gauges.forEach((sc, gauge) -> prometheusTextFormat.writeGauge(writer, sc.getScope(), gauge));
counters.forEach((sc, counter) -> prometheusTextFormat.writeCounter(writer, sc.getScope(), counter));
opStats.forEach((sc, opStatLogger) ->
prometheusTextFormat.writeOpStat(writer, sc.getScope(), opStatLogger));
}
@Override
public String getStatsName(String... statsComponents) {
String completeName;
if (statsComponents.length == 0) {
return "";
} else if (statsComponents[0].isEmpty()) {
completeName = StringUtils.join(statsComponents, '_', 1, statsComponents.length);
} else {
completeName = StringUtils.join(statsComponents, '_');
}
return Collector.sanitizeMetricName(completeName);
}
@VisibleForTesting
void rotateLatencyCollection() {
opStats.forEach((name, metric) -> {
metric.rotateLatencyCollection();
});
}
private void registerMetrics(Collector collector) {
try {
collector.register(registry);
} catch (Exception e) {
// Ignore if these were already registered
if (log.isDebugEnabled()) {
log.debug("Failed to register Prometheus collector exports", e);
}
}
}
private static final Logger log = LoggerFactory.getLogger(PrometheusMetricsProvider.class);
/*
* Try to get Netty counter of used direct memory. This will be correct, unlike the JVM values.
*/
private static final AtomicLong directMemoryUsage;
private static final Optional<BufferPoolMXBean> poolMxBeanOp;
private static final Supplier<Double> getDirectMemoryUsage;
static {
if (PlatformDependent.useDirectBufferNoCleaner()) {
poolMxBeanOp = Optional.empty();
AtomicLong tmpDirectMemoryUsage = null;
try {
Field field = PlatformDependent.class.getDeclaredField("DIRECT_MEMORY_COUNTER");
field.setAccessible(true);
tmpDirectMemoryUsage = (AtomicLong) field.get(null);
} catch (Throwable t) {
log.warn("Failed to access netty DIRECT_MEMORY_COUNTER field {}", t.getMessage());
}
directMemoryUsage = tmpDirectMemoryUsage;
getDirectMemoryUsage = () -> directMemoryUsage != null ? directMemoryUsage.get() : Double.NaN;
} else {
directMemoryUsage = null;
List<BufferPoolMXBean> platformMXBeans = ManagementFactory.getPlatformMXBeans(BufferPoolMXBean.class);
poolMxBeanOp = platformMXBeans.stream()
.filter(bufferPoolMXBean -> bufferPoolMXBean.getName().equals("direct")).findAny();
getDirectMemoryUsage = () -> poolMxBeanOp.isPresent() ? poolMxBeanOp.get().getMemoryUsed() : Double.NaN;
}
}
}
| 656 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/LongAdderCounter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats.prometheus;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.LongAdder;
import org.apache.bookkeeper.stats.Counter;
/**
* {@link Counter} implementation based on {@link LongAdder}.
*
* <p>LongAdder keeps a counter per-thread and then aggregates to get the result, in order to avoid contention between
* multiple threads.
*/
public class LongAdderCounter implements Counter {
private final LongAdder counter = new LongAdder();
private Map<String, String> labels;
// used for lazy registration for thread scoped metric
private boolean threadInitialized;
public LongAdderCounter(Map<String, String> labels) {
this.labels = labels;
}
@Override
public void clear() {
counter.reset();
}
@Override
public void inc() {
counter.increment();
}
@Override
public void dec() {
counter.decrement();
}
@Override
public void addCount(long delta) {
counter.add(delta);
}
/**
* When counter is used to count time.
* consistent with the {@link DataSketchesOpStatsLogger#registerSuccessfulEvent(long, TimeUnit)} 's logic
* */
@Override
public void addLatency(long eventLatency, TimeUnit unit) {
long valueMillis = unit.toMillis(eventLatency);
counter.add(valueMillis);
}
@Override
public Long get() {
return counter.sum();
}
public Map<String, String> getLabels() {
return labels;
}
public boolean isThreadInitialized() {
return threadInitialized;
}
public void initializeThread(Map<String, String> labels) {
this.labels = labels;
this.threadInitialized = true;
}
}
| 657 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/ScopeContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.stats.prometheus;
import java.util.Map;
import java.util.Objects;
/**
* Holder for a scope and a set of associated labels.
*/
public class ScopeContext {
private final String scope;
private final Map<String, String> labels;
public ScopeContext(String scope, Map<String, String> labels) {
this.scope = scope;
this.labels = labels;
}
public String getScope() {
return scope;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
ScopeContext that = (ScopeContext) o;
return Objects.equals(scope, that.scope) && Objects.equals(labels, that.labels);
}
@Override
public int hashCode() {
return Objects.hash(scope, labels);
}
}
| 658 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-providers/prometheus-metrics-provider/src/main/java/org/apache/bookkeeper/stats/prometheus/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
/**
* A stats provider implementation based on {@link https://prometheus.io/}.
*/
package org.apache.bookkeeper.stats.prometheus;
| 659 |
0 | Create_ds/bookkeeper/stats/utils/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/utils/src/main/java/org/apache/bookkeeper/stats/utils/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Utilities for bookkeeper stats api.
*/
package org.apache.bookkeeper.stats.utils; | 660 |
0 | Create_ds/bookkeeper/stats/utils/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/utils/src/main/java/org/apache/bookkeeper/stats/utils/StatsDocGenerator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.stats.utils;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.Parameter;
import com.google.common.base.Predicate;
import com.google.common.base.Strings;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.reflect.Field;
import java.net.URL;
import java.net.URLClassLoader;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.stream.Collectors;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.apache.bookkeeper.stats.Counter;
import org.apache.bookkeeper.stats.Gauge;
import org.apache.bookkeeper.stats.OpStatsLogger;
import org.apache.bookkeeper.stats.Stats;
import org.apache.bookkeeper.stats.StatsProvider;
import org.apache.bookkeeper.stats.annotations.StatsDoc;
import org.reflections.Reflections;
import org.reflections.util.ConfigurationBuilder;
import org.reflections.util.FilterBuilder;
import org.yaml.snakeyaml.DumperOptions;
import org.yaml.snakeyaml.DumperOptions.FlowStyle;
import org.yaml.snakeyaml.DumperOptions.ScalarStyle;
import org.yaml.snakeyaml.Yaml;
/**
* Generator stats documentation.
*/
@Slf4j
public class StatsDocGenerator {
enum StatsType {
COUNTER,
GAUGE,
OPSTATS
}
@AllArgsConstructor
@Data
static class StatsDocEntry {
private String name;
private StatsType type;
private String description;
public Map<String, String> properties() {
Map<String, String> properties = new TreeMap<>();
properties.put("type", type.name());
properties.put("description", description);
return properties;
}
}
private static Reflections newReflections(String packagePrefix) {
List<URL> urls = new ArrayList<>();
ClassLoader[] classLoaders = new ClassLoader[] {
StatsDocGenerator.class.getClassLoader(),
Thread.currentThread().getContextClassLoader()
};
for (int i = 0; i < classLoaders.length; i++) {
if (classLoaders[i] instanceof URLClassLoader) {
urls.addAll(Arrays.asList(((URLClassLoader) classLoaders[i]).getURLs()));
} else {
throw new RuntimeException("ClassLoader '" + classLoaders[i] + " is not an instance of URLClassLoader");
}
}
Predicate<String> filters = new FilterBuilder()
.includePackage(packagePrefix);
ConfigurationBuilder confBuilder = new ConfigurationBuilder();
confBuilder.filterInputsBy(filters);
confBuilder.setUrls(urls);
return new Reflections(confBuilder);
}
private final String packagePrefix;
private final Reflections reflections;
private final StatsProvider statsProvider;
private final NavigableMap<String, NavigableMap<String, StatsDocEntry>> docEntries = new TreeMap<>();
public StatsDocGenerator(String packagePrefix,
StatsProvider provider) {
this.packagePrefix = packagePrefix;
this.reflections = newReflections(packagePrefix);
this.statsProvider = provider;
}
public void generate(String filename) throws Exception {
log.info("Processing classes under package {}", packagePrefix);
// get all classes annotated with `StatsDoc`
Set<Class<?>> annotatedClasses = reflections.getTypesAnnotatedWith(StatsDoc.class);
log.info("Retrieve all `StatsDoc` annotated classes : {}", annotatedClasses);
for (Class<?> annotatedClass : annotatedClasses) {
generateDocForAnnotatedClass(annotatedClass);
}
log.info("Successfully processed classes under package {}", packagePrefix);
log.info("Writing stats doc to file {}", filename);
writeDoc(filename);
log.info("Successfully wrote stats doc to file {}", filename);
}
private void generateDocForAnnotatedClass(Class<?> annotatedClass) {
StatsDoc scopeStatsDoc = annotatedClass.getDeclaredAnnotation(StatsDoc.class);
if (scopeStatsDoc == null) {
return;
}
log.info("Processing StatsDoc annotated class {} : {}", annotatedClass, scopeStatsDoc);
Field[] fields = annotatedClass.getDeclaredFields();
for (Field field : fields) {
StatsDoc fieldStatsDoc = field.getDeclaredAnnotation(StatsDoc.class);
if (null == fieldStatsDoc) {
// it is not a `StatsDoc` annotated field
continue;
}
generateDocForAnnotatedField(scopeStatsDoc, fieldStatsDoc, field);
}
log.info("Successfully processed StatsDoc annotated class {}.", annotatedClass);
}
private NavigableMap<String, StatsDocEntry> getCategoryMap(String category) {
NavigableMap<String, StatsDocEntry> categoryMap = docEntries.get(category);
if (null == categoryMap) {
categoryMap = new TreeMap<>();
docEntries.put(category, categoryMap);
}
return categoryMap;
}
private void generateDocForAnnotatedField(StatsDoc scopedStatsDoc, StatsDoc fieldStatsDoc, Field field) {
NavigableMap<String, StatsDocEntry> categoryMap = getCategoryMap(scopedStatsDoc.category());
String statsName =
statsProvider.getStatsName(scopedStatsDoc.scope(), scopedStatsDoc.name(), fieldStatsDoc.name());
StatsType statsType;
if (Counter.class.isAssignableFrom(field.getType())) {
statsType = StatsType.COUNTER;
} else if (Gauge.class.isAssignableFrom(field.getType())) {
statsType = StatsType.GAUGE;
} else if (OpStatsLogger.class.isAssignableFrom(field.getType())) {
statsType = StatsType.OPSTATS;
} else {
throw new IllegalArgumentException("Unknown stats field '" + field.getName()
+ "' is annotated with `StatsDoc`: " + field.getType());
}
String helpDesc = fieldStatsDoc.help();
StatsDocEntry docEntry = new StatsDocEntry(statsName, statsType, helpDesc);
categoryMap.put(statsName, docEntry);
}
private void writeDoc(String file) throws IOException {
DumperOptions options = new DumperOptions();
options.setDefaultFlowStyle(FlowStyle.BLOCK);
options.setDefaultScalarStyle(ScalarStyle.LITERAL);
Yaml yaml = new Yaml(options);
Writer writer;
if (Strings.isNullOrEmpty(file)) {
writer = new OutputStreamWriter(System.out, UTF_8);
} else {
writer = new OutputStreamWriter(new FileOutputStream(file), UTF_8);
}
try {
Map<String, Map<String, Map<String, String>>> docs = docEntries.entrySet()
.stream()
.collect(Collectors.toMap(
e -> e.getKey(),
e -> e.getValue().entrySet()
.stream()
.collect(Collectors.toMap(
e1 -> e1.getKey(),
e1 -> e1.getValue().properties()
))
));
yaml.dump(docs, writer);
writer.flush();
} finally {
writer.close();
}
}
/**
* Args for stats generator.
*/
private static class MainArgs {
@Parameter(
names = {
"-p", "--package"
},
description = "Package prefix of the classes to generate stats doc")
String packagePrefix = "org.apache.bookkeeper";
@Parameter(
names = {
"-sp", "--stats-provider"
},
description = "The stats provider used for generating stats doc")
String statsProviderClass = "prometheus";
@Parameter(
names = {
"-o", "--output-yaml-file"
},
description = "The output yaml file to dump stats docs."
+ " If omitted, the output goes to stdout."
)
String yamlFile = null;
@Parameter(
names = {
"-h", "--help"
},
description = "Show this help message")
boolean help = false;
}
public static void main(String[] args) throws Exception {
MainArgs mainArgs = new MainArgs();
JCommander commander = new JCommander();
try {
commander.setProgramName("stats-doc-gen");
commander.addObject(mainArgs);
commander.parse(args);
if (mainArgs.help) {
commander.usage();
Runtime.getRuntime().exit(0);
return;
}
} catch (Exception e) {
commander.usage();
Runtime.getRuntime().exit(-1);
return;
}
Stats.loadStatsProvider(getStatsProviderClass(mainArgs.statsProviderClass));
StatsProvider provider = Stats.get();
StatsDocGenerator docGen = new StatsDocGenerator(
mainArgs.packagePrefix,
provider
);
docGen.generate(mainArgs.yamlFile);
}
private static String getStatsProviderClass(String providerClass) {
switch (providerClass.toLowerCase()) {
case "prometheus":
return "org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider";
case "codahale":
return "org.apache.bookkeeper.stats.codahale.CodahaleMetricsProvider";
default:
return providerClass;
}
}
}
| 661 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/NullStatsProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.configuration.Configuration;
/**
* A <i>no-op</i> stats provider implementation.
*/
@SuppressFBWarnings("EI_EXPOSE_REP2")
public class NullStatsProvider implements StatsProvider {
final StatsLogger nullStatsLogger = new NullStatsLogger();
@Override
public void start(Configuration conf) {
// nop
}
@Override
public void stop() {
// nop
}
@Override
public StatsLogger getStatsLogger(String scope) {
return nullStatsLogger;
}
}
| 662 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/Stats.java | /*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.bookkeeper.stats;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import org.apache.commons.configuration.Configuration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An umbrella class for loading stats provider.
*/
public class Stats {
static final Logger LOG = LoggerFactory.getLogger(Stats.class);
public static final String STATS_PROVIDER_CLASS = "statsProviderClass";
static StatsProvider prov = new NullStatsProvider();
public static void loadStatsProvider(Configuration conf) {
String className = conf.getString(STATS_PROVIDER_CLASS);
loadStatsProvider(className);
}
public static void loadStatsProvider(String className) {
if (className != null) {
try {
Class cls = Class.forName(className);
@SuppressWarnings("unchecked")
Constructor<? extends StatsProvider> cons =
(Constructor<? extends StatsProvider>) cls.getDeclaredConstructor();
prov = cons.newInstance();
} catch (ClassNotFoundException cnfe) {
LOG.error("Couldn't find configured class(" + className + ")", cnfe);
} catch (NoSuchMethodException nsme) {
LOG.error("Couldn't find default constructor for class (" + className + ")", nsme);
} catch (InstantiationException ie) {
LOG.error("Couldn't construct class (" + className + ")", ie);
} catch (IllegalAccessException iae) {
LOG.error("Couldn't construct class (" + className + "),"
+ " Is the constructor private?", iae);
} catch (InvocationTargetException ite) {
LOG.error("Constructor threw an exception. It should not have.", ite);
}
}
}
@SuppressFBWarnings("EI_EXPOSE_REP2")
public static StatsProvider get() {
return prov;
}
}
| 663 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/Gauge.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.bookkeeper.stats;
/**
* A guage is a value that has only one value at a specific point in time.
* An example is the number of elements in a queue. The value of T must be
* some numeric type.
*/
public interface Gauge<T extends Number> {
T getDefaultValue();
T getSample();
}
| 664 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/CachingStatsProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.commons.configuration.Configuration;
/**
* A {@code CachingStatsProvider} adds the caching functionality to an existing {@code StatsProvider}.
*
* <p>The stats provider will cache the stats objects created by the other {@code StatsProvider} to allow
* the reusability of stats objects and avoid creating a lot of stats objects.
*/
public class CachingStatsProvider implements StatsProvider {
protected final StatsProvider underlying;
protected final ConcurrentMap<String, StatsLogger> statsLoggers;
public CachingStatsProvider(StatsProvider provider) {
this.underlying = provider;
this.statsLoggers = new ConcurrentHashMap<String, StatsLogger>();
}
@Override
public void start(Configuration conf) {
this.underlying.start(conf);
}
@Override
public void stop() {
this.underlying.stop();
}
@Override
public StatsLogger getStatsLogger(String scope) {
StatsLogger statsLogger = statsLoggers.get(scope);
if (null == statsLogger) {
StatsLogger newStatsLogger =
new CachingStatsLogger(underlying.getStatsLogger(scope));
StatsLogger oldStatsLogger = statsLoggers.putIfAbsent(scope, newStatsLogger);
statsLogger = (null == oldStatsLogger) ? newStatsLogger : oldStatsLogger;
}
return statsLogger;
}
@Override
public String getStatsName(String... statsComponents) {
return underlying.getStatsName(statsComponents);
}
}
| 665 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/Counter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats;
import java.util.concurrent.TimeUnit;
/**
* Simple stats that require only increment and decrement
* functions on a Long. Metrics like the number of topics, persist queue size
* etc. should use this.
*/
public interface Counter {
/**
* Clear this stat.
*/
void clear();
/**
* Increment the value associated with this stat.
*/
void inc();
/**
* Decrement the value associated with this stat.
*/
void dec();
/**
* Add delta to the value associated with this stat.
* @param delta
*/
void addCount(long delta);
/**
* An operation succeeded with the given eventLatency. Update
* stats to reflect the same
* @param eventLatency The event latency
* @param unit
*/
void addLatency(long eventLatency, TimeUnit unit);
/**
* Get the value associated with this stat.
*/
Long get();
}
| 666 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/ThreadRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* For mapping thread ids to thread pools and threads within those pools
* or just for lone named threads. Thread scoped metrics add labels to
* metrics by retrieving the ThreadPoolThread object from this registry.
* For flexibility, this registry is not based on TLS.
*/
public class ThreadRegistry {
private static ConcurrentMap<Long, ThreadPoolThread> threadPoolMap = new ConcurrentHashMap<>();
/*
Threads can register themselves as their first act before carrying out
any work.
*/
public static void register(String threadPool, int threadPoolThread) {
register(threadPool, threadPoolThread, Thread.currentThread().getId());
}
/*
Thread factories can register a thread by its id.
*/
public static void register(String threadPool, int threadPoolThread, long threadId) {
ThreadPoolThread tpt = new ThreadPoolThread(threadPool, threadPoolThread, threadId);
threadPoolMap.put(threadId, tpt);
}
/*
Clears all stored thread state.
*/
public static void clear() {
threadPoolMap.clear();
}
/*
Retrieves the registered ThreadPoolThread (if registered) for the calling thread.
*/
public static ThreadPoolThread get() {
return threadPoolMap.get(Thread.currentThread().getId());
}
/**
* Stores the thread pool and ordinal.
*/
public static final class ThreadPoolThread {
final String threadPool;
final int ordinal;
final long threadId;
public ThreadPoolThread(String threadPool, int ordinal, long threadId) {
this.threadPool = threadPool;
this.ordinal = ordinal;
this.threadId = threadId;
}
public String getThreadPool() {
return threadPool;
}
public int getOrdinal() {
return ordinal;
}
}
}
| 667 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/CachingStatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
* A {@code StatsLogger} that caches the stats objects created by other {@code StatsLogger}.
*/
public class CachingStatsLogger implements StatsLogger {
protected final StatsLogger underlying;
protected final ConcurrentMap<String, Counter> counters;
protected final ConcurrentMap<String, OpStatsLogger> opStatsLoggers;
protected final ConcurrentMap<String, StatsLogger> scopeStatsLoggers;
@SuppressFBWarnings("EI_EXPOSE_REP2")
public CachingStatsLogger(StatsLogger statsLogger) {
this.underlying = statsLogger;
this.counters = new ConcurrentHashMap<String, Counter>();
this.opStatsLoggers = new ConcurrentHashMap<String, OpStatsLogger>();
this.scopeStatsLoggers = new ConcurrentHashMap<String, StatsLogger>();
}
@Override
public int hashCode() {
return underlying.hashCode();
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof CachingStatsLogger)) {
return false;
}
CachingStatsLogger another = (CachingStatsLogger) obj;
return underlying.equals(another.underlying);
}
@Override
public String toString() {
return underlying.toString();
}
@Override
public OpStatsLogger getOpStatsLogger(String name) {
OpStatsLogger opStatsLogger = opStatsLoggers.get(name);
if (null == opStatsLogger) {
OpStatsLogger newOpStatsLogger = underlying.getOpStatsLogger(name);
OpStatsLogger oldOpStatsLogger = opStatsLoggers.putIfAbsent(name, newOpStatsLogger);
opStatsLogger = (null == oldOpStatsLogger) ? newOpStatsLogger : oldOpStatsLogger;
}
return opStatsLogger;
}
@Override
public Counter getCounter(String name) {
Counter counter = counters.get(name);
if (null == counter) {
Counter newCounter = underlying.getCounter(name);
Counter oldCounter = counters.putIfAbsent(name, newCounter);
counter = (null == oldCounter) ? newCounter : oldCounter;
}
return counter;
}
@Override
public <T extends Number> void registerGauge(String name, Gauge<T> gauge) {
underlying.registerGauge(name, gauge);
}
@Override
public <T extends Number> void unregisterGauge(String name, Gauge<T> gauge) {
underlying.unregisterGauge(name, gauge);
}
@Override
public StatsLogger scope(String name) {
StatsLogger statsLogger = scopeStatsLoggers.get(name);
if (null == statsLogger) {
StatsLogger newStatsLogger = new CachingStatsLogger(underlying.scope(name));
StatsLogger oldStatsLogger = scopeStatsLoggers.putIfAbsent(name, newStatsLogger);
statsLogger = (null == oldStatsLogger) ? newStatsLogger : oldStatsLogger;
}
return statsLogger;
}
@Override
public void removeScope(String name, StatsLogger statsLogger) {
scopeStatsLoggers.remove(name, statsLogger);
}
/**
Thread-scoped stats not currently supported.
*/
@Override
public OpStatsLogger getThreadScopedOpStatsLogger(String name) {
return getOpStatsLogger(name);
}
/**
Thread-scoped stats not currently supported.
*/
@Override
public Counter getThreadScopedCounter(String name) {
return getCounter(name);
}
}
| 668 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/NullStatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import java.util.concurrent.TimeUnit;
/**
* A <i>no-op</i> {@code StatsLogger}.
*
* <p>Metrics are not recorded, making this receiver useful in unit tests and as defaults in
* situations where metrics are not strictly required.
*/
@SuppressFBWarnings("EI_EXPOSE_REP2")
public class NullStatsLogger implements StatsLogger {
public static final NullStatsLogger INSTANCE = new NullStatsLogger();
/**
* A <i>no-op</i> {@code OpStatsLogger}.
*/
static class NullOpStatsLogger implements OpStatsLogger {
final OpStatsData nullOpStats = new OpStatsData(0, 0, 0, new long[6]);
@Override
public void registerFailedEvent(long eventLatency, TimeUnit unit) {
// nop
}
@Override
public void registerSuccessfulEvent(long eventLatency, TimeUnit unit) {
// nop
}
@Override
public void registerSuccessfulValue(long value) {
// nop
}
@Override
public void registerFailedValue(long value) {
// nop
}
@Override
public OpStatsData toOpStatsData() {
return nullOpStats;
}
@Override
public void clear() {
// nop
}
}
static NullOpStatsLogger nullOpStatsLogger = new NullOpStatsLogger();
/**
* A <i>no-op</i> {@code Counter}.
*/
static class NullCounter implements Counter {
@Override
public void clear() {
// nop
}
@Override
public void inc() {
// nop
}
@Override
public void dec() {
// nop
}
@Override
public void addCount(long delta) {
// nop
}
@Override
public void addLatency(long eventLatency, TimeUnit unit) {
// nop
}
@Override
public Long get() {
return 0L;
}
}
static NullCounter nullCounter = new NullCounter();
@Override
public OpStatsLogger getOpStatsLogger(String name) {
return nullOpStatsLogger;
}
@Override
public Counter getCounter(String name) {
return nullCounter;
}
@Override
public <T extends Number> void registerGauge(String name, Gauge<T> gauge) {
// nop
}
@Override
public <T extends Number> void unregisterGauge(String name, Gauge<T> gauge) {
// nop
}
@Override
public StatsLogger scope(String name) {
return this;
}
@Override
public void removeScope(String name, StatsLogger statsLogger) {
// nop
}
@Override
public OpStatsLogger getThreadScopedOpStatsLogger(String name) {
return getOpStatsLogger(name);
}
@Override
public Counter getThreadScopedCounter(String name) {
return getCounter(name);
}
}
| 669 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/AlertStatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is used to raise alert when we detect an event that should never happen in production.
*/
public class AlertStatsLogger {
private static final Logger logger = LoggerFactory.getLogger(AlertStatsLogger.class);
public final String alertStatName;
private final StatsLogger globalStatsLogger;
private final StatsLogger scopedStatsLogger;
private final String scope;
private Counter globalCounter = null;
private Counter scopedCounter = null;
public AlertStatsLogger(StatsLogger globalStatsLogger, String scope, String alertStatName) {
this.globalStatsLogger = globalStatsLogger;
this.scope = scope;
this.scopedStatsLogger = globalStatsLogger.scope(scope);
this.alertStatName = alertStatName;
}
public AlertStatsLogger(StatsLogger globalStatsLogger, String alertStatName) {
this.globalStatsLogger = globalStatsLogger;
this.scope = null;
this.scopedStatsLogger = null;
this.alertStatName = alertStatName;
}
private String format(String msg) {
return msg.startsWith("ALERT!: ") ? msg :
("ALERT!: " + (scope != null ? "(" + scope + "):" : "") + msg);
}
private void initializeCountersIfNeeded() {
if (null != globalCounter) {
return;
}
globalCounter = globalStatsLogger.getCounter(alertStatName);
if (null != scopedStatsLogger) {
scopedCounter = scopedStatsLogger.getCounter(alertStatName);
}
}
/**
* Report an alertable condition". Prefixes "ALERT!: " if not already prefixed.
*/
public void raise(String msg, Object... args) {
initializeCountersIfNeeded();
globalCounter.inc();
if (null != scopedCounter) {
scopedCounter.inc();
}
logger.error(format(msg), args);
logger.error("fake exception to generate stack trace", new Exception());
}
}
| 670 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/StatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats;
/**
* A simple interface that exposes just 2 useful methods. One to get the logger for an Op stat
* and another to get the logger for a simple stat
*/
public interface StatsLogger {
/**
* @param name
* Stats Name
* @return Get the logger for an OpStat described by the <i>name</i>.
*/
OpStatsLogger getOpStatsLogger(String name);
/**
* @param name
* Stats Name
* @return Get the logger for an OpStat described by the <i>name</i> with extra
* labels for the threadpool/threadname and thread no. Lone threads always
* have 0 as their thread no.
*/
OpStatsLogger getThreadScopedOpStatsLogger(String name);
/**
* @param name
* Stats Name
* @return Get the logger for a simple stat described by the <i>name</i>
*/
Counter getCounter(String name);
/**
* @param name
* Stats Name
* @return Get the logger for a simple stat described by the <i>name</i> with extra
* labels for the threadpool/threadname and thread no. Lone threads always
* have 0 as their thread no.
*/
Counter getThreadScopedCounter(String name);
/**
* Register given <i>gauge</i> as name <i>name</i>.
*
* @param name
* gauge name
* @param gauge
* gauge function
*/
<T extends Number> void registerGauge(String name, Gauge<T> gauge);
/**
* Unregister given <i>gauge</i> from name <i>name</i>.
*
* @param name
* name of the gauge
* @param gauge
* gauge function
*/
<T extends Number> void unregisterGauge(String name, Gauge<T> gauge);
/**
* Provide the stats logger under scope <i>name</i>.
*
* @param name
* scope name.
* @return stats logger under scope <i>name</i>.
*/
StatsLogger scope(String name);
/**
* Provide the stats logger with an attached label.
*
* @param labelName
* the name of the label.
* @param labelValue
* the value of the label.
*
* @return stats logger under scope <i>name</i>.
*/
default StatsLogger scopeLabel(String labelName, String labelValue) {
// Provide default implementation for backward compatibility
return scope(new StringBuilder()
.append(labelName)
.append('_')
.append(labelValue.replace('.', '_')
.replace('-', '_')
.replace(':', '_'))
.toString());
}
/**
* Remove the given <i>statsLogger</i> for scope <i>name</i>.
* It can be no-op if the underlying stats provider doesn't have the ability to remove scope.
*
* @param name name of the scope
* @param statsLogger the stats logger of this scope.
*/
void removeScope(String name, StatsLogger statsLogger);
}
| 671 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/OpStatsData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats;
import java.util.Arrays;
/**
* This class provides a read view of operation specific stats.
* We expose this to JMX.
* We use primitives because the class has to conform to CompositeViewData.
*/
public class OpStatsData {
private final long numSuccessfulEvents, numFailedEvents;
// All latency values are in Milliseconds.
private final double avgLatencyMillis;
// 10.0 50.0, 90.0, 99.0, 99.9, 99.99 in that order.
// TODO: Figure out if we can use a Map
private final long[] percentileLatenciesMillis;
public OpStatsData (long numSuccessfulEvents, long numFailedEvents,
double avgLatencyMillis, long[] percentileLatenciesMillis) {
this.numSuccessfulEvents = numSuccessfulEvents;
this.numFailedEvents = numFailedEvents;
this.avgLatencyMillis = avgLatencyMillis;
this.percentileLatenciesMillis =
Arrays.copyOf(percentileLatenciesMillis, percentileLatenciesMillis.length);
}
public long getP10Latency() {
return this.percentileLatenciesMillis[0];
}
public long getP50Latency() {
return this.percentileLatenciesMillis[1];
}
public long getP90Latency() {
return this.percentileLatenciesMillis[2];
}
public long getP99Latency() {
return this.percentileLatenciesMillis[3];
}
public long getP999Latency() {
return this.percentileLatenciesMillis[4];
}
public long getP9999Latency() {
return this.percentileLatenciesMillis[5];
}
public long getNumSuccessfulEvents() {
return this.numSuccessfulEvents;
}
public long getNumFailedEvents() {
return this.numFailedEvents;
}
public double getAvgLatencyMillis() {
return this.avgLatencyMillis;
}
}
| 672 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/StatsProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats;
import java.io.IOException;
import java.io.Writer;
import org.apache.commons.configuration.Configuration;
import org.apache.commons.lang.StringUtils;
/**
* Provider to provide stats logger for different scopes.
*/
public interface StatsProvider {
/**
* Initialize the stats provider by loading the given configuration <i>conf</i>.
*
* @param conf
* Configuration to configure the stats provider.
*/
void start(Configuration conf);
/**
* Close the stats provider.
*/
void stop();
/**
*
* @param writer
* @throws IOException
*/
default void writeAllMetrics(Writer writer) throws IOException {
throw new UnsupportedOperationException("writeAllMetrics is not implemented yet");
}
/**
* Return the stats logger to a given <i>scope</i>.
* @param scope
* Scope for the given stats
* @return stats logger for the given <i>scope</i>
*/
StatsLogger getStatsLogger(String scope);
/**
* Return the fully qualified stats name comprised of given <tt>statsComponents</tt>.
*
* @param statsComponents stats components to comprise the fully qualified stats name
* @return the fully qualified stats name
*/
default String getStatsName(String...statsComponents) {
return StringUtils.join(statsComponents, '/');
}
}
| 673 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/OpStatsLogger.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.bookkeeper.stats;
import java.util.concurrent.TimeUnit;
/**
* This interface handles logging of statistics related to each operation. (PUBLISH, CONSUME etc.)
*/
public interface OpStatsLogger {
/**
* Increment the failed op counter with the given eventLatency.
* @param eventLatency The event latency
* @param unit
*/
void registerFailedEvent(long eventLatency, TimeUnit unit);
/**
* An operation succeeded with the given eventLatency. Update
* stats to reflect the same
* @param eventLatency The event latency
* @param unit
*/
void registerSuccessfulEvent(long eventLatency, TimeUnit unit);
/**
* An operation with the given value succeeded.
* @param value
*/
void registerSuccessfulValue(long value);
/**
* An operation with the given value failed.
*/
void registerFailedValue(long value);
/**
* @return Returns an OpStatsData object with necessary values. We need this function
* to support JMX exports. This should be deprecated sometime in the near future.
* populated.
*/
OpStatsData toOpStatsData();
/**
* Clear stats for this operation.
*/
void clear();
}
| 674 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
/**
* A lightweight stats library used for exporting bookkeeper stats.
*/
package org.apache.bookkeeper.stats;
| 675 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/annotations/StatsDoc.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.bookkeeper.stats.annotations;
import java.lang.annotation.Documented;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
/**
* Documenting the stats.
*/
@Retention(RetentionPolicy.RUNTIME)
@Documented
public @interface StatsDoc {
/**
* The name of the category to group stats together.
*
* @return name of the stats category.
*/
String category() default "";
/**
* The scope of this stats.
*
* @return scope of this stats
*/
String scope() default "";
/**
* The name of this stats.
*
* @return name of this stats
*/
String name();
/**
* The help message of this stats.
*
* @return help message of this stats
*/
String help();
/**
* The parent metric name.
*
* <p>It can used for analyzing the relationships
* between the metrics, especially for the latency metrics.
*
* @return the parent metric name
*/
String parent() default "";
/**
* The metric name of an operation that happens
* after the operation of this metric.
*
* <p>similar as {@link #parent()}, it can be used for analyzing
* the dependencies between metrics.
*
* @return the metric name of an operation that happens after the operation of this metric.
*/
String happensAfter() default "";
}
| 676 |
0 | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats | Create_ds/bookkeeper/stats/bookkeeper-stats-api/src/main/java/org/apache/bookkeeper/stats/annotations/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Annotations for bookkeeper stats api.
*/
package org.apache.bookkeeper.stats.annotations; | 677 |
0 | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark/bean/Page.java | package org.apache.dubbo.benchmark.bean;
import java.io.Serializable;
import java.util.List;
public class Page<T> implements Serializable {
private static final long serialVersionUID = -7529237188686406553L;
private int pageNo;
private int total;
private List<T> result;
public int getPageNo() {
return pageNo;
}
public void setPageNo(int pageNo) {
this.pageNo = pageNo;
}
public int getTotal() {
return total;
}
public void setTotal(int total) {
this.total = total;
}
public List<T> getResult() {
return result;
}
public void setResult(List<T> result) {
this.result = result;
}
@Override
public String toString() {
return "Page [pageNo=" + pageNo + ", total=" + total + ", result=" + result + "]";
}
}
| 678 |
0 | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark/bean/User.java | package org.apache.dubbo.benchmark.bean;
import java.io.Serializable;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.util.List;
public class User implements Serializable {
private static final long serialVersionUID = 2566816725396650300L;
private long id;
private String name;
private int sex;
private LocalDate birthday;
private String email;
private String mobile;
private String address;
private String icon;
private List<Integer> permissions;
private int status;
private LocalDateTime createTime;
private LocalDateTime updateTime;
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int getSex() {
return sex;
}
public void setSex(int sex) {
this.sex = sex;
}
public LocalDate getBirthday() {
return birthday;
}
public void setBirthday(LocalDate birthday) {
this.birthday = birthday;
}
public String getEmail() {
return email;
}
public void setEmail(String email) {
this.email = email;
}
public String getMobile() {
return mobile;
}
public void setMobile(String mobile) {
this.mobile = mobile;
}
public String getAddress() {
return address;
}
public void setAddress(String address) {
this.address = address;
}
public String getIcon() {
return icon;
}
public void setIcon(String icon) {
this.icon = icon;
}
public List<Integer> getPermissions() {
return permissions;
}
public void setPermissions(List<Integer> permissions) {
this.permissions = permissions;
}
public int getStatus() {
return status;
}
public void setStatus(int status) {
this.status = status;
}
public LocalDateTime getCreateTime() {
return createTime;
}
public void setCreateTime(LocalDateTime createTime) {
this.createTime = createTime;
}
public LocalDateTime getUpdateTime() {
return updateTime;
}
public void setUpdateTime(LocalDateTime updateTime) {
this.updateTime = updateTime;
}
@Override
public String toString() {
return "User [id=" + id + ", name=" + name + ", sex=" + sex + ", birthday=" + birthday + ", email=" + email
+ ", mobile=" + mobile + ", address=" + address + ", icon=" + icon + ", permissions=" + permissions
+ ", status=" + status + ", createTime=" + createTime + ", updateTime=" + updateTime + "]";
}
}
| 679 |
0 | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark/serialize/SerializationOptimizerImpl.java | package org.apache.dubbo.benchmark.serialize;
import org.apache.dubbo.benchmark.bean.Page;
import org.apache.dubbo.benchmark.bean.User;
import org.apache.dubbo.benchmark.service.UserService;
import org.apache.dubbo.common.serialize.support.SerializationOptimizer;
import java.util.Arrays;
import java.util.Collection;
public class SerializationOptimizerImpl implements SerializationOptimizer {
@Override
public Collection<Class<?>> getSerializableClasses() {
return Arrays.asList(User.class, Page.class, UserService.class);
}
}
| 680 |
0 | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark/service/UserService.java | package org.apache.dubbo.benchmark.service;
import org.apache.dubbo.benchmark.bean.Page;
import org.apache.dubbo.benchmark.bean.User;
public interface UserService {
public boolean existUser(String email);
public boolean createUser(User user);
public User getUser(long id);
public Page<User> listUser(int pageNo);
}
| 681 |
0 | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark/service/UserServiceServerImpl.java | package org.apache.dubbo.benchmark.service;
import org.apache.dubbo.benchmark.bean.Page;
import org.apache.dubbo.benchmark.bean.User;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
public class UserServiceServerImpl implements UserService {
@Override
public boolean existUser(String email) {
if (email == null || email.isEmpty()) {
return true;
}
if (email.charAt(email.length() - 1) < '5') {
return false;
}
return true;
}
@Override
public User getUser(long id) {
User user = new User();
user.setId(id);
user.setName(new String("Doug Lea"));
user.setSex(1);
user.setBirthday(LocalDate.of(1968, 12, 8));
user.setEmail(new String("dong.lea@gmail.com"));
user.setMobile(new String("18612345678"));
user.setAddress(new String("北京市 中关村 中关村大街1号 鼎好大厦 1605"));
user.setIcon(new String("https://www.baidu.com/img/bd_logo1.png"));
user.setStatus(1);
user.setCreateTime(LocalDateTime.now());
user.setUpdateTime(user.getCreateTime());
List<Integer> permissions = new ArrayList<Integer>(
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 19, 88, 86, 89, 90, 91, 92));
user.setPermissions(permissions);
return user;
}
@Override
public Page<User> listUser(int pageNo) {
List<User> userList = new ArrayList<>(15);
for (int i = 0; i < 15; i++) {
User user = new User();
user.setId(i);
user.setName("Doug Lea" + i);
user.setSex(1);
user.setBirthday(LocalDate.of(1968, 12, 8));
user.setEmail("dong.lea@gmail.com" + i);
user.setMobile("18612345678" + i);
user.setAddress("北京市 中关村 中关村大街1号 鼎好大厦 1605" + i);
user.setIcon("https://www.baidu.com/img/bd_logo1.png" + i);
user.setStatus(1);
user.setCreateTime(LocalDateTime.now());
user.setUpdateTime(user.getCreateTime());
List<Integer> permissions = new ArrayList<Integer>(
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 19, 88, 86, 89, 90, 91, 92));
user.setPermissions(permissions);
userList.add(user);
}
Page<User> page = new Page<>();
page.setPageNo(pageNo);
page.setTotal(1000);
page.setResult(userList);
return page;
}
@Override
public boolean createUser(User user) {
if (user == null) {
return false;
}
return true;
}
}
| 682 |
0 | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark/service/UserServicePb.java | //package org.apache.dubbo.benchmark.service;
//
//import org.apache.dubbo.benchmark.bean.PagePB;
//
///**
// * @author zhengzechao
// * @date 2019/7/22
// * Email ooczzoo@gmail.com
// */
//public interface UserServicePb {
// PagePB.Response createUser(PagePB.Request request);
// PagePB.Response existUser(PagePB.Request request);
// PagePB.Response listUser(PagePB.Request request);
// PagePB.Response getUser(PagePB.Request request);
//
//}
| 683 |
0 | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark/service/PBUserServiceImpl.java | package org.apache.dubbo.benchmark.service;
import com.google.protobuf.util.Timestamps;
import org.apache.dubbo.benchmark.bean.PagePB;
import org.apache.dubbo.benchmark.bean.UserServiceDubbo;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CompletableFuture;
/**
* @author zhengzechao
* @date 2020-03-06
* Email ooczzoo@gmail.com
*/
public class PBUserServiceImpl implements UserServiceDubbo.IUserService {
@Override
public PagePB.Response createUser(PagePB.Request request) {
final PagePB.Response.Builder builder = PagePB.Response.newBuilder();
if (request.getUser() == null) {
builder.setState(false);
} else {
builder.setState(true);
}
return builder.build();
}
@Override
public CompletableFuture<PagePB.Response> createUserAsync(PagePB.Request request) {
return null;
}
@Override
public PagePB.Response existUser(PagePB.Request request) {
String email = request.getEmail();
final PagePB.Response.Builder builder = PagePB.Response.newBuilder();
if (email == null || email.isEmpty()) {
builder.setState(true);
}else if (email.charAt(email.length() - 1) < '5') {
builder.setState(false);
}
builder.setState(true);
return builder.build();
}
@Override
public CompletableFuture<PagePB.Response> existUserAsync(PagePB.Request request) {
return null;
}
@Override
public PagePB.Response listUser(PagePB.Request request) {
final PagePB.Page.Builder page = PagePB.Page.newBuilder();
List<PagePB.User> userList = new ArrayList<>(15);
for (int i = 0; i < 15; i++) {
final PagePB.User.Builder user = PagePB.User.newBuilder();
user.setId(i);
user.setName(new String("Doug Lea"));
user.setSex(1);
user.setBirthday(Timestamps.fromMillis(System.currentTimeMillis()));
user.setEmail(new String("dong.lea@gmail.com"));
user.setMobile(new String("18612345678"));
user.setAddress(new String("北京市 中关村 中关村大街1号 鼎好大厦 1605"));
user.setIcon(new String("https://www.baidu.com/img/bd_logo1.png"));
user.setStatus(1);
user.setCreateTime(Timestamps.fromMillis(System.currentTimeMillis()));
user.setUpdateTime(user.getCreateTime());
List<Integer> permissions = new ArrayList<Integer>(
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 19, 88, 86, 89, 90, 91, 92));
user.addAllPermissions(permissions);
userList.add(user.build());
}
page.setPageNo(request.getPage());
page.setTotal(1000);
page.addAllUsers(userList);
return PagePB.Response.newBuilder().setPage(page.build()).build();
}
@Override
public CompletableFuture<PagePB.Response> listUserAsync(PagePB.Request request) {
return null;
}
@Override
public PagePB.Response getUser(PagePB.Request request) {
final long id = request.getId();
final PagePB.User.Builder user = PagePB.User.newBuilder();
user.setId(id);
user.setName(new String("Doug Lea"));
user.setSex(1);
user.setBirthday(Timestamps.fromMillis(System.currentTimeMillis()));
user.setEmail(new String("dong.lea@gmail.com"));
user.setMobile(new String("18612345678"));
user.setAddress(new String("北京市 中关村 中关村大街1号 鼎好大厦 1605"));
user.setIcon(new String("https://www.baidu.com/img/bd_logo1.png"));
user.setStatus(1);
user.setCreateTime(Timestamps.fromMillis(System.currentTimeMillis()));
user.setUpdateTime(user.getCreateTime());
List<Integer> permissions = new ArrayList<Integer>(
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 19, 88, 86, 89, 90, 91, 92));
user.addAllPermissions(permissions);
return PagePB.Response.newBuilder().setUser(user.build()).build();
}
@Override
public CompletableFuture<PagePB.Response> getUserAsync(PagePB.Request request) {
return null;
}
}
| 684 |
0 | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark/service/UserServiceImplPb.java | //package org.apache.dubbo.benchmark.service;
//
//import com.google.protobuf.util.Timestamps;
//import org.apache.dubbo.benchmark.bean.PagePB;
//
//import java.util.ArrayList;
//import java.util.Arrays;
//import java.util.List;
//
//
//public class UserServiceImplPb implements UserServicePb {
// @Override
// public PagePB.Response createUser(PagePB.Request request) {
// final PagePB.Response.Builder builder = PagePB.Response.newBuilder();
// if (request.getUser() == null) {
// builder.setState(false);
// } else {
// builder.setState(true);
// }
// return builder.build();
// }
//
// @Override
// public PagePB.Response existUser(PagePB.Request request) {
// String email = request.getEmail();
// final PagePB.Response.Builder builder = PagePB.Response.newBuilder();
// if (email == null || email.isEmpty()) {
// builder.setState(true);
// }else if (email.charAt(email.length() - 1) < '5') {
// builder.setState(false);
// }
//
// builder.setState(true);
// return builder.build();
// }
//
// @Override
// public PagePB.Response listUser(PagePB.Request request) {
//
// final PagePB.Page.Builder page = PagePB.Page.newBuilder();
// List<PagePB.User> userList = new ArrayList<>(15);
//
// for (int i = 0; i < 15; i++) {
// final PagePB.User.Builder user = PagePB.User.newBuilder();
//
// user.setId(i);
// user.setName(new String("Doug Lea"));
// user.setSex(1);
// user.setBirthday(Timestamps.fromMillis(System.currentTimeMillis()));
// user.setEmail(new String("dong.lea@gmail.com"));
// user.setMobile(new String("18612345678"));
// user.setAddress(new String("北京市 中关村 中关村大街1号 鼎好大厦 1605"));
// user.setIcon(new String("https://www.baidu.com/img/bd_logo1.png"));
// user.setStatus(1);
// user.setCreateTime(Timestamps.fromMillis(System.currentTimeMillis()));
// user.setUpdateTime(user.getCreateTime());
// List<Integer> permissions = new ArrayList<Integer>(
// Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 19, 88, 86, 89, 90, 91, 92));
// user.addAllPermissions(permissions);
// userList.add(user.build());
// }
//
// page.setPageNo(request.getPage());
// page.setTotal(1000);
// page.addAllUsers(userList);
//
// return PagePB.Response.newBuilder().setPage(page.build()).build();
// }
//
// @Override
// public PagePB.Response getUser(PagePB.Request request) {
// final long id = request.getId();
// final PagePB.User.Builder user = PagePB.User.newBuilder();
// user.setId(id);
// user.setName(new String("Doug Lea"));
// user.setSex(1);
// user.setBirthday(Timestamps.fromMillis(System.currentTimeMillis()));
// user.setEmail(new String("dong.lea@gmail.com"));
// user.setMobile(new String("18612345678"));
// user.setAddress(new String("北京市 中关村 中关村大街1号 鼎好大厦 1605"));
// user.setIcon(new String("https://www.baidu.com/img/bd_logo1.png"));
// user.setStatus(1);
// user.setCreateTime(Timestamps.fromMillis(System.currentTimeMillis()));
// user.setUpdateTime(user.getCreateTime());
// List<Integer> permissions = new ArrayList<Integer>(
// Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 19, 88, 86, 89, 90, 91, 92));
// user.addAllPermissions(permissions);
//
// return PagePB.Response.newBuilder().setUser(user.build()).build();
// }
//}
| 685 |
0 | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark/service/GrpcUserServiceImpl.java | package org.apache.dubbo.benchmark.service;
import com.google.protobuf.util.Timestamps;
import io.grpc.stub.StreamObserver;
import org.apache.dubbo.benchmark.bean.DubboUserServiceGrpc;
import org.apache.dubbo.benchmark.bean.PagePB;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/**
* @author zhengzechao
* @date 2020-03-06
* Email ooczzoo@gmail.com
*/
public class GrpcUserServiceImpl extends DubboUserServiceGrpc.UserServiceImplBase {
@Override
public void existUser(PagePB.Request request, StreamObserver<PagePB.Response> responseObserver) {
String email = request.getEmail();
final PagePB.Response.Builder builder = PagePB.Response.newBuilder();
if (email == null || email.isEmpty()) {
builder.setState(true);
} else if (email.charAt(email.length() - 1) < '5') {
builder.setState(false);
}
builder.setState(true);
PagePB.Response response = builder.build();
responseObserver.onNext(response);
responseObserver.onCompleted();
}
@Override
public void createUser(PagePB.Request request, StreamObserver<PagePB.Response> responseObserver) {
final PagePB.Response.Builder builder = PagePB.Response.newBuilder();
if (request.getUser() == null) {
builder.setState(false);
} else {
builder.setState(true);
}
PagePB.Response response = builder.build();
responseObserver.onNext(response);
responseObserver.onCompleted();
}
@Override
public void getUser(PagePB.Request request, StreamObserver<PagePB.Response> responseObserver) {
final long id = request.getId();
final PagePB.User.Builder user = PagePB.User.newBuilder();
user.setId(id);
user.setName(new String("Doug Lea"));
user.setSex(1);
user.setBirthday(Timestamps.fromMillis(System.currentTimeMillis()));
user.setEmail(new String("dong.lea@gmail.com"));
user.setMobile(new String("18612345678"));
user.setAddress(new String("北京市 中关村 中关村大街1号 鼎好大厦 1605"));
user.setIcon(new String("https://www.baidu.com/img/bd_logo1.png"));
user.setStatus(1);
user.setCreateTime(Timestamps.fromMillis(System.currentTimeMillis()));
user.setUpdateTime(user.getCreateTime());
List<Integer> permissions = new ArrayList<Integer>(
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 19, 88, 86, 89, 90, 91, 92));
user.addAllPermissions(permissions);
PagePB.Response response = PagePB.Response.newBuilder().setUser(user.build()).build();
responseObserver.onNext(response);
responseObserver.onCompleted();
}
@Override
public void listUser(PagePB.Request request, StreamObserver<PagePB.Response> responseObserver) {
final PagePB.Page.Builder page = PagePB.Page.newBuilder();
List<PagePB.User> userList = new ArrayList<>(15);
for (int i = 0; i < 15; i++) {
final PagePB.User.Builder user = PagePB.User.newBuilder();
user.setId(i);
user.setName(new String("Doug Lea"));
user.setSex(1);
user.setBirthday(Timestamps.fromMillis(System.currentTimeMillis()));
user.setEmail(new String("dong.lea@gmail.com"));
user.setMobile(new String("18612345678"));
user.setAddress(new String("北京市 中关村 中关村大街1号 鼎好大厦 1605"));
user.setIcon(new String("https://www.baidu.com/img/bd_logo1.png"));
user.setStatus(1);
user.setCreateTime(Timestamps.fromMillis(System.currentTimeMillis()));
user.setUpdateTime(user.getCreateTime());
List<Integer> permissions = new ArrayList<Integer>(
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 19, 88, 86, 89, 90, 91, 92));
user.addAllPermissions(permissions);
userList.add(user.build());
}
page.setPageNo(request.getPage());
page.setTotal(1000);
page.addAllUsers(userList);
PagePB.Response response = PagePB.Response.newBuilder().setPage(page.build()).build();
responseObserver.onNext(response);
responseObserver.onCompleted();
}
}
| 686 |
0 | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark | Create_ds/dubbo-benchmark/benchmark-base/src/main/java/org/apache/dubbo/benchmark/rpc/AbstractClient.java | package org.apache.dubbo.benchmark.rpc;
import org.apache.dubbo.benchmark.bean.Page;
import org.apache.dubbo.benchmark.bean.User;
import org.apache.dubbo.benchmark.service.UserService;
import org.apache.dubbo.benchmark.service.UserServiceServerImpl;
import java.util.concurrent.atomic.AtomicInteger;
public abstract class AbstractClient {
private final AtomicInteger counter = new AtomicInteger(0);
private final UserService _serviceUserService = new UserServiceServerImpl();
protected abstract UserService getUserService();
public boolean existUser() throws Exception {
String email = String.valueOf(counter.getAndIncrement());
return getUserService().existUser(email);
}
public boolean createUser() throws Exception {
int id = counter.getAndIncrement();
User user = _serviceUserService.getUser(id);
return getUserService().createUser(user);
}
public User getUser() throws Exception {
int id = counter.getAndIncrement();
return getUserService().getUser(id);
}
public Page<User> listUser() throws Exception {
int pageNo = counter.getAndIncrement();
return getUserService().listUser(pageNo);
}
}
| 687 |
0 | Create_ds/dubbo-benchmark/server-base/src/main/java/org/apache/dubbo | Create_ds/dubbo-benchmark/server-base/src/main/java/org/apache/dubbo/benchmark/Server.java | package org.apache.dubbo.benchmark;
import org.springframework.context.support.ClassPathXmlApplicationContext;
public class Server {
public static void main(String[] args) throws InterruptedException {
try (ClassPathXmlApplicationContext context = new ClassPathXmlApplicationContext("provider.xml")) {
context.start();
Thread.sleep(Integer.MAX_VALUE);
}
}
}
| 688 |
0 | Create_ds/dubbo-benchmark/client-base/src/main/java/org/apache/dubbo | Create_ds/dubbo-benchmark/client-base/src/main/java/org/apache/dubbo/benchmark/Client.java | package org.apache.dubbo.benchmark;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.Option;
import org.apache.dubbo.benchmark.bean.Page;
import org.apache.dubbo.benchmark.bean.User;
import org.apache.dubbo.benchmark.rpc.AbstractClient;
import org.apache.dubbo.benchmark.service.UserService;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.options.ChainedOptionsBuilder;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.TimeValue;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
@State(Scope.Benchmark)
public class Client extends AbstractClient {
private static final int CONCURRENCY = 32;
private final ClassPathXmlApplicationContext context;
private final UserService userService;
public Client() {
context = new ClassPathXmlApplicationContext("consumer.xml");
context.start();
userService = (UserService) context.getBean("userService");
}
@Override
protected UserService getUserService() {
return userService;
}
@TearDown
public void close() throws IOException {
context.close();
}
@Benchmark
@BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Override
public boolean existUser() throws Exception {
return super.existUser();
}
@Benchmark
@BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Override
public boolean createUser() throws Exception {
return super.createUser();
}
@Benchmark
@BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Override
public User getUser() throws Exception {
return super.getUser();
}
@Benchmark
@BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Override
public Page<User> listUser() throws Exception {
return super.listUser();
}
public static void main(String[] args) throws Exception {
System.out.println(args);
org.apache.commons.cli.Options options = new org.apache.commons.cli.Options();
options.addOption(Option.builder().longOpt("warmupIterations").hasArg().build());
options.addOption(Option.builder().longOpt("warmupTime").hasArg().build());
options.addOption(Option.builder().longOpt("measurementIterations").hasArg().build());
options.addOption(Option.builder().longOpt("measurementTime").hasArg().build());
CommandLineParser parser = new DefaultParser();
CommandLine line = parser.parse(options, args);
int warmupIterations = Integer.valueOf(line.getOptionValue("warmupIterations", "3"));
int warmupTime = Integer.valueOf(line.getOptionValue("warmupTime", "10"));
int measurementIterations = Integer.valueOf(line.getOptionValue("measurementIterations", "3"));
int measurementTime = Integer.valueOf(line.getOptionValue("measurementTime", "10"));
Options opt;
ChainedOptionsBuilder optBuilder = new OptionsBuilder()
.include(Client.class.getSimpleName())
.exclude(ClientPb.class.getSimpleName())
.exclude(ClientGrpc.class.getSimpleName())
.warmupIterations(warmupIterations)
.warmupTime(TimeValue.seconds(warmupTime))
.measurementIterations(measurementIterations)
.measurementTime(TimeValue.seconds(measurementTime))
.threads(CONCURRENCY)
.forks(1);
opt = doOptions(optBuilder).build();
new Runner(opt).run();
}
private static ChainedOptionsBuilder doOptions(ChainedOptionsBuilder optBuilder) {
String output = System.getProperty("benchmark.output");
if (output != null && !output.trim().isEmpty()) {
optBuilder.output(output);
}
return optBuilder;
}
}
| 689 |
0 | Create_ds/dubbo-benchmark/client-base/src/main/java/org/apache/dubbo | Create_ds/dubbo-benchmark/client-base/src/main/java/org/apache/dubbo/benchmark/ClientPb.java | package org.apache.dubbo.benchmark;
import com.google.protobuf.util.Timestamps;
import org.apache.dubbo.benchmark.bean.PagePB;
import org.apache.dubbo.benchmark.bean.UserServiceDubbo;
import org.apache.dubbo.config.ProtocolConfig;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.options.ChainedOptionsBuilder;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.TimeValue;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
@State(Scope.Benchmark)
public class ClientPb {
private static final int CONCURRENCY = 32;
private final ClassPathXmlApplicationContext context;
private final UserServiceDubbo.IUserService userService;
private final AtomicInteger counter = new AtomicInteger(0);
public ClientPb() {
context = new ClassPathXmlApplicationContext("consumer.xml");
context.start();
userService = (UserServiceDubbo.IUserService) context.getBean("userService");
}
@TearDown
public void close() throws IOException {
context.close();
}
@Benchmark
@BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public boolean existUser() throws Exception {
final int count = counter.getAndIncrement();
return userService.existUser(PagePB.Request.newBuilder().setEmail(String.valueOf(count)).build())
.getState();
}
@Benchmark
@BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public boolean createUser() throws Exception {
final int count = counter.getAndIncrement();
final PagePB.User.Builder user = PagePB.User.newBuilder();
user.setId(count);
user.setName(new String("Doug Lea"));
user.setSex(1);
user.setBirthday(Timestamps.fromMillis(System.currentTimeMillis()));
user.setEmail(new String("dong.lea@gmail.com"));
user.setMobile(new String("18612345678"));
user.setAddress(new String("北京市 中关村 中关村大街1号 鼎好大厦 1605"));
user.setIcon(new String("https://www.baidu.com/img/bd_logo1.png"));
user.setStatus(1);
user.setCreateTime(Timestamps.fromMillis(System.currentTimeMillis()));
user.setUpdateTime(user.getCreateTime());
List<Integer> permissions = new ArrayList<Integer>(
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 19, 88, 86, 89, 90, 91, 92));
user.addAllPermissions(permissions);
final PagePB.Request.Builder builder = PagePB.Request.newBuilder();
return userService.createUser(builder.setUser(user.build()).build()).getState();
}
@Benchmark
@BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public PagePB.User getUser() throws Exception {
final int count = counter.getAndIncrement();
return userService.getUser(PagePB.Request.newBuilder().setId(count).build()).getUser();
}
@Benchmark
@BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public PagePB.Page listUser() throws Exception {
final int count = counter.getAndIncrement();
return userService.listUser(PagePB.Request.newBuilder().setPage(count).build()).getPage();
}
public static void main(String[] args) throws Exception {
Options opt;
ChainedOptionsBuilder optBuilder = new OptionsBuilder()
.include(ClientPb.class.getSimpleName())
.warmupIterations(3)
.warmupTime(TimeValue.seconds(10))
.measurementIterations(3)
.measurementTime(TimeValue.seconds(10))
.threads(CONCURRENCY)
.forks(1);
opt = doOptions(optBuilder).build();
new Runner(opt).run();
}
private static ChainedOptionsBuilder doOptions(ChainedOptionsBuilder optBuilder) {
String output = System.getProperty("benchmark.output");
if (output != null && !output.trim().isEmpty()) {
optBuilder.output(output);
}
return optBuilder;
}
}
| 690 |
0 | Create_ds/dubbo-benchmark/client-base/src/main/java/org/apache/dubbo | Create_ds/dubbo-benchmark/client-base/src/main/java/org/apache/dubbo/benchmark/ClientGrpc.java | package org.apache.dubbo.benchmark;
import com.google.protobuf.util.Timestamps;
import org.apache.dubbo.benchmark.bean.DubboUserServiceGrpc;
import org.apache.dubbo.benchmark.bean.PagePB;
import org.apache.dubbo.benchmark.bean.UserServiceDubbo;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.options.ChainedOptionsBuilder;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import org.openjdk.jmh.runner.options.TimeValue;
import org.springframework.context.support.ClassPathXmlApplicationContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
@State(Scope.Benchmark)
public class ClientGrpc {
private static final int CONCURRENCY = 32;
private final ClassPathXmlApplicationContext context;
private final DubboUserServiceGrpc.IUserService userService;
private final AtomicInteger counter = new AtomicInteger(0);
public ClientGrpc() {
context = new ClassPathXmlApplicationContext("consumer.xml");
context.start();
userService = (DubboUserServiceGrpc.IUserService) context.getBean("userService");
}
@TearDown
public void close() throws IOException {
context.close();
}
@Benchmark
@BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public boolean existUser() throws Exception {
final int count = counter.getAndIncrement();
return userService.existUser(PagePB.Request.newBuilder().setEmail(String.valueOf(count)).build())
.getState();
}
@Benchmark
@BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public boolean createUser() throws Exception {
final int count = counter.getAndIncrement();
final PagePB.User.Builder user = PagePB.User.newBuilder();
user.setId(count);
user.setName(new String("Doug Lea"));
user.setSex(1);
user.setBirthday(Timestamps.fromMillis(System.currentTimeMillis()));
user.setEmail(new String("dong.lea@gmail.com"));
user.setMobile(new String("18612345678"));
user.setAddress(new String("北京市 中关村 中关村大街1号 鼎好大厦 1605"));
user.setIcon(new String("https://www.baidu.com/img/bd_logo1.png"));
user.setStatus(1);
user.setCreateTime(Timestamps.fromMillis(System.currentTimeMillis()));
user.setUpdateTime(user.getCreateTime());
List<Integer> permissions = new ArrayList<Integer>(
Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 19, 88, 86, 89, 90, 91, 92));
user.addAllPermissions(permissions);
final PagePB.Request.Builder builder = PagePB.Request.newBuilder();
return userService.createUser(builder.setUser(user.build()).build()).getState();
}
@Benchmark
@BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public PagePB.User getUser() throws Exception {
final int count = counter.getAndIncrement();
return userService.getUser(PagePB.Request.newBuilder().setId(count).build()).getUser();
}
@Benchmark
@BenchmarkMode({Mode.Throughput, Mode.AverageTime, Mode.SampleTime})
@OutputTimeUnit(TimeUnit.MILLISECONDS)
public PagePB.Page listUser() throws Exception {
final int count = counter.getAndIncrement();
return userService.listUser(PagePB.Request.newBuilder().setPage(count).build()).getPage();
}
public static void main(String[] args) throws Exception {
Options opt;
ChainedOptionsBuilder optBuilder = new OptionsBuilder()
.include(ClientGrpc.class.getSimpleName())
.warmupIterations(3)
.warmupTime(TimeValue.seconds(10))
.measurementIterations(3)
.measurementTime(TimeValue.seconds(10))
.threads(CONCURRENCY)
.forks(1);
opt = doOptions(optBuilder).build();
new Runner(opt).run();
}
private static ChainedOptionsBuilder doOptions(ChainedOptionsBuilder optBuilder) {
String output = System.getProperty("benchmark.output");
if (output != null && !output.trim().isEmpty()) {
optBuilder.output(output);
}
return optBuilder;
}
}
| 691 |
0 | Create_ds/aws-encryption-sdk-java | Create_ds/aws-encryption-sdk-java/compliance_exceptions/aws-kms-mrk-aware-multi-keyrings.java | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// The AWS Encryption SDK - Java does not implement
// any of the Keyring interface at this time.
//= compliance/framework/aws-kms/aws-kms-mrk-aware-multi-keyrings.txt#2.5
//= type=exception
//# The caller MUST provide:
//#
//# * A set of Region strings
//#
//# * An optional discovery filter that is an AWS partition and a set of
//# AWS accounts
//#
//# * An optional method that can take a region string and return an AWS
//# KMS client e.g. a regional client supplier
//#
//# * An optional list of AWS KMS grant tokens
//#
//# If an empty set of Region is provided this function MUST fail. If
//# any element of the set of regions is null or an empty string this
//# function MUST fail. If a regional client supplier is not passed,
//# then a default MUST be created that takes a region string and
//# generates a default AWS SDK client for the given region.
//#
//# A set of AWS KMS clients MUST be created by calling regional client
//# supplier for each region in the input set of regions.
//#
//# Then a set of AWS KMS MRK Aware Symmetric Region Discovery Keyring
//# (aws-kms-mrk-aware-symmetric-region-discovery-keyring.md) MUST be
//# created for each AWS KMS client by initializing each keyring with
//#
//# * The AWS KMS client
//#
//# * The input discovery filter
//#
//# * The input AWS KMS grant tokens
//#
//# Then a Multi-Keyring (../multi-keyring.md#inputs) MUST be initialize
//# by using this set of discovery keyrings as the child keyrings
//# (../multi-keyring.md#child-keyrings). This Multi-Keyring MUST be
//# this functions output.
//= compliance/framework/aws-kms/aws-kms-mrk-aware-multi-keyrings.txt#2.6
//= type=exception
//# The caller MUST provide:
//#
//# * An optional AWS KMS key identifiers to use as the generator.
//#
//# * An optional set of AWS KMS key identifiers to us as child
//# keyrings.
//#
//# * An optional method that can take a region string and return an AWS
//# KMS client e.g. a regional client supplier
//#
//# * An optional list of AWS KMS grant tokens
//#
//# If any of the AWS KMS key identifiers is null or an empty string this
//# function MUST fail. At least one non-null or non-empty string AWS
//# KMS key identifiers exists in the input this function MUST fail. All
//# AWS KMS identifiers are passed to Assert AWS KMS MRK are unique (aws-
//# kms-mrk-are-unique.md#Implementation) and the function MUST return
//# success otherwise this MUST fail. If a regional client supplier is
//# not passed, then a default MUST be created that takes a region string
//# and generates a default AWS SDK client for the given region.
//#
//# If there is a generator input then the generator keyring MUST be a
//# AWS KMS MRK Aware Symmetric Keyring (aws-kms-mrk-aware-symmetric-
//# keyring.md) initialized with
//#
//# * The generator input.
//#
//# * The AWS KMS client that MUST be created by the regional client
//# supplier when called with the region part of the generator ARN or
//# a signal for the AWS SDK to select the default region.
//#
//# * The input list of AWS KMS grant tokens
//#
//# If there is a set of child identifiers then a set of AWS KMS MRK
//# Aware Symmetric Keyring (aws-kms-mrk-aware-symmetric-keyring.md) MUST
//# be created for each AWS KMS key identifier by initialized each
//# keyring with
//#
//# * AWS KMS key identifier.
//#
//# * The AWS KMS client that MUST be created by the regional client
//# supplier when called with the region part of the AWS KMS key
//# identifier or a signal for the AWS SDK to select the default
//# region.
//#
//# * The input list of AWS KMS grant tokens
//#
//# NOTE: The AWS Encryption SDK SHOULD NOT attempt to evaluate its own
//# default region.
//#
//# Then a Multi-Keyring (../multi-keyring.md#inputs) MUST be initialize
//# by using this generator keyring as the generator keyring (../multi-
//# keyring.md#generator-keyring) and this set of child keyrings as the
//# child keyrings (../multi-keyring.md#child-keyrings). This Multi-
//# Keyring MUST be this functions output.
| 692 |
0 | Create_ds/aws-encryption-sdk-java | Create_ds/aws-encryption-sdk-java/compliance_exceptions/aws-kms-mrk-aware-symmetric-region-discovery-keyring.java | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// The AWS Encryption SDK - Java does not implement
// any of the Keyring interface at this time.
//= compliance/framework/aws-kms/aws-kms-mrk-aware-symmetric-region-discovery-keyring.txt#2.5
//= type=exception
//# MUST implement that AWS Encryption SDK Keyring interface (../keyring-
//# interface.md#interface)
//= compliance/framework/aws-kms/aws-kms-mrk-aware-symmetric-region-discovery-keyring.txt#2.6
//= type=exception
//# On initialization the caller MUST provide:
//#
//# * An AWS KMS client
//#
//# * An optional discovery filter that is an AWS partition and a set of
//# AWS accounts
//#
//# * An optional list of AWS KMS grant tokens
//#
//# The keyring MUST know what Region the AWS KMS client is in. It
//# SHOULD obtain this information directly from the client as opposed to
//# having an additional parameter. However if it can not, then it MUST
//# NOT create the client itself. It SHOULD have a Region parameter and
//# SHOULD try to identify mismatched configurations. i.e. The client is
//# in Region A and the Region parameter is B.
//= compliance/framework/aws-kms/aws-kms-mrk-aware-symmetric-region-discovery-keyring.txt#2.7
//= type=exception
//# This function MUST fail.
//= compliance/framework/aws-kms/aws-kms-mrk-aware-symmetric-region-discovery-keyring.txt#2.8
//= type=exception
//# OnDecrypt MUST take decryption materials (structures.md#decryption-
//# materials) and a list of encrypted data keys
//# (structures.md#encrypted-data-key) as input.
//#
//# If the decryption materials (structures.md#decryption-materials)
//# already contained a valid plaintext data key OnDecrypt MUST
//# immediately return the unmodified decryption materials
//# (structures.md#decryption-materials).
//#
//# The set of encrypted data keys MUST first be filtered to match this
//# keyring's configuration. For the encrypted data key to match
//#
//# * Its provider ID MUST exactly match the value "aws-kms".
//#
//# * The provider info MUST be a valid AWS KMS ARN (aws-kms-key-
//# arn.md#a-valid-aws-kms-arn) with a resource type of "key" or
//# OnDecrypt MUST fail.
//#
//# * If a discovery filter is configured, its partition and the
//# provider info partition MUST match.
//#
//# * If a discovery filter is configured, its set of accounts MUST
//# contain the provider info account.
//#
//# * If the provider info is not identified as a multi-Region key (aws-
//# kms-key-arn.md#identifying-an-aws-kms-multi-region-key), then the
//# provider info's Region MUST match the AWS KMS client region.
//#
//# For each encrypted data key in the filtered set, one at a time, the
//# OnDecrypt MUST attempt to decrypt the data key. If this attempt
//# results in an error, then these errors are collected.
//#
//# To attempt to decrypt a particular encrypted data key
//# (structures.md#encrypted-data-key), OnDecrypt MUST call AWS KMS
//# Decrypt (https://docs.aws.amazon.com/kms/latest/APIReference/
//# API_Decrypt.html) with the configured AWS KMS client.
//#
//# When calling AWS KMS Decrypt
//# (https://docs.aws.amazon.com/kms/latest/APIReference/
//# API_Decrypt.html), the keyring MUST call with a request constructed
//# as follows:
//#
//# * "KeyId": If the provider info's resource type is "key" and its
//# resource is a multi-Region key then a new ARN MUST be created
//# where the region part MUST equal the AWS KMS client region and
//# every other part MUST equal the provider info. Otherwise it MUST
//# be the provider info.
//#
//# * "CiphertextBlob": The encrypted data key ciphertext
//# (structures.md#ciphertext).
//#
//# * "EncryptionContext": The encryption context
//# (structures.md#encryption-context) included in the input
//# decryption materials (structures.md#decryption-materials).
//#
//# * "GrantTokens": this keyring's grant tokens
//# (https://docs.aws.amazon.com/kms/latest/developerguide/
//# concepts.html#grant_token)
//#
//# If the call to AWS KMS Decrypt
//# (https://docs.aws.amazon.com/kms/latest/APIReference/
//# API_Decrypt.html) succeeds OnDecrypt verifies
//#
//# * The "KeyId" field in the response MUST equal the requested "KeyId"
//#
//# * The length of the response's "Plaintext" MUST equal the key
//# derivation input length (algorithm-suites.md#key-derivation-input-
//# length) specified by the algorithm suite (algorithm-suites.md)
//# included in the input decryption materials
//# (structures.md#decryption-materials).
//#
//# If the response does not satisfies these requirements then an error
//# is collected and the next encrypted data key in the filtered set MUST
//# be attempted.
//#
//# Since the response does satisfies these requirements then OnDecrypt
//# MUST do the following with the response:
//#
//# * set the plaintext data key on the decryption materials
//# (structures.md#decryption-materials) as the response "Plaintext".
//#
//# * immediately return the modified decryption materials
//# (structures.md#decryption-materials).
//#
//# If OnDecrypt fails to successfully decrypt any encrypted data key
//# (structures.md#encrypted-data-key), then it MUST yield an error that
//# includes all collected errors.
| 693 |
0 | Create_ds/aws-encryption-sdk-java | Create_ds/aws-encryption-sdk-java/compliance_exceptions/aws-kms-mrk-aware-symmetric-keyring.java | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
// The AWS Encryption SDK - Java does not implement
// any of the Keyring interface at this time.
//= compliance/framework/aws-kms/aws-kms-mrk-aware-symmetric-keyring.txt#2.5
//= type=exception
//# MUST implement the AWS Encryption SDK Keyring interface (../keyring-
//# interface.md#interface)
//= compliance/framework/aws-kms/aws-kms-mrk-aware-symmetric-keyring.txt#2.6
//= type=exception
//# On initialization the caller MUST provide:
//#
//# * An AWS KMS key identifier
//#
//# * An AWS KMS SDK client
//#
//# * An optional list of Grant Tokens
//#
//# The AWS KMS key identifier MUST NOT be null or empty. The AWS KMS
//# key identifier MUST be a valid identifier (aws-kms-key-arn.md#a-
//# valid-aws-kms-identifier). The AWS KMS SDK client MUST NOT be null.
//= compliance/framework/aws-kms/aws-kms-mrk-aware-symmetric-keyring.txt#2.7
//= type=exception
//# OnEncrypt MUST take encryption materials (structures.md#encryption-
//# materials) as input.
//#
//# If the input encryption materials (structures.md#encryption-
//# materials) do not contain a plaintext data key OnEncrypt MUST attempt
//# to generate a new plaintext data key by calling AWS KMS
//# GenerateDataKey (https://docs.aws.amazon.com/kms/latest/APIReference/
//# API_GenerateDataKey.html).
//#
//# If the keyring calls AWS KMS GenerateDataKeys, it MUST use the
//# configured AWS KMS client to make the call. The keyring MUST call
//# AWS KMS GenerateDataKeys with a request constructed as follows:
//#
//# * "KeyId": this keyring's KMS key identifier.
//#
//# * "NumberOfBytes": the key derivation input length (algorithm-
//# suites.md#key-derivation-input-length) specified by the algorithm
//# suite (algorithm-suites.md) included in the input encryption
//# materials (structures.md#encryption-materials).
//#
//# * "EncryptionContext": the encryption context
//# (structures.md#encryption-context) included in the input
//# encryption materials (structures.md#encryption-materials).
//#
//# * "GrantTokens": this keyring's grant tokens
//# (https://docs.aws.amazon.com/kms/latest/developerguide/
//# concepts.html#grant_token)
//#
//# If the call to AWS KMS GenerateDataKey
//# (https://docs.aws.amazon.com/kms/latest/APIReference/
//# API_GenerateDataKey.html) does not succeed, OnEncrypt MUST NOT modify
//# the encryption materials (structures.md#encryption-materials) and
//# MUST fail.
//#
//# If the Generate Data Key call succeeds, OnEncrypt MUST verify that
//# the response "Plaintext" length matches the specification of the
//# algorithm suite (algorithm-suites.md)'s Key Derivation Input Length
//# field. The Generate Data Key response's "KeyId" MUST be A valid AWS
//# KMS key ARN (aws-kms-key-arn.md#identifying-an-aws-kms-multi-region-
//# key). If verified, OnEncrypt MUST do the following with the response
//# from AWS KMS GenerateDataKey
//# (https://docs.aws.amazon.com/kms/latest/APIReference/
//# API_GenerateDataKey.html):
//#
//# * set the plaintext data key on the encryption materials
//# (structures.md#encryption-materials) as the response "Plaintext".
//#
//# * append a new encrypted data key (structures.md#encrypted-data-key)
//# to the encrypted data key list in the encryption materials
//# (structures.md#encryption-materials), constructed as follows:
//#
//# - the ciphertext (structures.md#ciphertext) is the response
//# "CiphertextBlob".
//#
//# - the key provider id (structures.md#key-provider-id) is "aws-
//# kms".
//#
//# - the key provider information (structures.md#key-provider-
//# information) is the response "KeyId".
//#
//# * OnEncrypt MUST output the modified encryption materials
//# (structures.md#encryption-materials)
//#
//# Given a plaintext data key in the encryption materials
//# (structures.md#encryption-materials), OnEncrypt MUST attempt to
//# encrypt the plaintext data key using the configured AWS KMS key
//# identifier.
//#
//# The keyring MUST call AWS KMS Encrypt
//# (https://docs.aws.amazon.com/kms/latest/APIReference/
//# API_Encrypt.html) using the configured AWS KMS client. The keyring
//# MUST AWS KMS Encrypt call with a request constructed as follows:
//#
//# * "KeyId": The configured AWS KMS key identifier.
//#
//# * "PlaintextDataKey": the plaintext data key in the encryption
//# materials (structures.md#encryption-materials).
//#
//# * "EncryptionContext": the encryption context
//# (structures.md#encryption-context) included in the input
//# encryption materials (structures.md#encryption-materials).
//#
//# * "GrantTokens": this keyring's grant tokens
//# (https://docs.aws.amazon.com/kms/latest/developerguide/
//# concepts.html#grant_token)
//#
//# If the call to AWS KMS Encrypt
//# (https://docs.aws.amazon.com/kms/latest/APIReference/
//# API_Encrypt.html) does not succeed, OnEncrypt MUST fail.
//#
//# If the Encrypt call succeeds The response's "KeyId" MUST be A valid
//# AWS KMS key ARN (aws-kms-key-arn.md#identifying-an-aws-kms-multi-
//# region-key). If verified, OnEncrypt MUST do the following with the
//# response from AWS KMS Encrypt
//# (https://docs.aws.amazon.com/kms/latest/APIReference/
//# API_Encrypt.html):
//#
//# * append a new encrypted data key (structures.md#encrypted-data-key)
//# to the encrypted data key list in the encryption materials
//# (structures.md#encryption-materials), constructed as follows:
//#
//# - The ciphertext (structures.md#ciphertext) is the response
//# "CiphertextBlob".
//#
//# - The key provider id (structures.md#key-provider-id) is "aws-
//# kms".
//#
//# - The key provider information (structures.md#key-provider-
//# information) is the response "KeyId". Note that the "KeyId" in
//# the response is always in key ARN format.
//#
//# If all Encrypt calls succeed, OnEncrypt MUST output the modified
//# encryption materials (structures.md#encryption-materials).
//= compliance/framework/aws-kms/aws-kms-mrk-aware-symmetric-keyring.txt#2.8
//= type=exception
//# OnDecrypt MUST take decryption materials (structures.md#decryption-
//# materials) and a list of encrypted data keys
//# (structures.md#encrypted-data-key) as input.
//#
//# If the decryption materials (structures.md#decryption-materials)
//# already contained a valid plaintext data key OnDecrypt MUST
//# immediately return the unmodified decryption materials
//# (structures.md#decryption-materials).
//#
//# The set of encrypted data keys MUST first be filtered to match this
//# keyring's configuration. For the encrypted data key to match
//#
//# * Its provider ID MUST exactly match the value "aws-kms".
//#
//# * The provider info MUST be a valid AWS KMS ARN (aws-kms-key-
//# arn.md#a-valid-aws-kms-arn) with a resource type of "key" or
//# OnDecrypt MUST fail.
//#
//# * The the function AWS KMS MRK Match for Decrypt (aws-kms-mrk-match-
//# for-decrypt.md#implementation) called with the configured AWS KMS
//# key identifier and the provider info MUST return "true".
//#
//# For each encrypted data key in the filtered set, one at a time, the
//# OnDecrypt MUST attempt to decrypt the data key. If this attempt
//# results in an error, then these errors MUST be collected.
//#
//# To attempt to decrypt a particular encrypted data key
//# (structures.md#encrypted-data-key), OnDecrypt MUST call AWS KMS
//# Decrypt (https://docs.aws.amazon.com/kms/latest/APIReference/
//# API_Decrypt.html) with the configured AWS KMS client.
//#
//# When calling AWS KMS Decrypt
//# (https://docs.aws.amazon.com/kms/latest/APIReference/
//# API_Decrypt.html), the keyring MUST call with a request constructed
//# as follows:
//#
//# * "KeyId": The configured AWS KMS key identifier.
//#
//# * "CiphertextBlob": the encrypted data key ciphertext
//# (structures.md#ciphertext).
//#
//# * "EncryptionContext": the encryption context
//# (structures.md#encryption-context) included in the input
//# decryption materials (structures.md#decryption-materials).
//#
//# * "GrantTokens": this keyring's grant tokens
//# (https://docs.aws.amazon.com/kms/latest/developerguide/
//# concepts.html#grant_token)
//#
//# If the call to AWS KMS Decrypt
//# (https://docs.aws.amazon.com/kms/latest/APIReference/
//# API_Decrypt.html) succeeds OnDecrypt verifies
//#
//# * The "KeyId" field in the response MUST equal the configured AWS
//# KMS key identifier.
//#
//# * The length of the response's "Plaintext" MUST equal the key
//# derivation input length (algorithm-suites.md#key-derivation-input-
//# length) specified by the algorithm suite (algorithm-suites.md)
//# included in the input decryption materials
//# (structures.md#decryption-materials).
//#
//# If the response does not satisfies these requirements then an error
//# MUST be collected and the next encrypted data key in the filtered set
//# MUST be attempted.
//#
//# If the response does satisfies these requirements then OnDecrypt MUST
//# do the following with the response:
//#
//# * set the plaintext data key on the decryption materials
//# (structures.md#decryption-materials) as the response "Plaintext".
//#
//# * immediately return the modified decryption materials
//# (structures.md#decryption-materials).
//#
//# If OnDecrypt fails to successfully decrypt any encrypted data key
//# (structures.md#encrypted-data-key), then it MUST yield an error that
//# includes all the collected errors.
| 694 |
0 | Create_ds/aws-encryption-sdk-java/src/test/java/com/amazonaws/crypto | Create_ds/aws-encryption-sdk-java/src/test/java/com/amazonaws/crypto/examples/DiscoveryMultiRegionDecryptionExampleTest.java | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.crypto.examples;
import com.amazonaws.encryptionsdk.kms.KMSTestFixtures;
import org.junit.Test;
import software.amazon.awssdk.regions.Region;
public class DiscoveryMultiRegionDecryptionExampleTest {
@Test
public void testEncryptAndDecrypt() {
DiscoveryMultiRegionDecryptionExample.encryptAndDecrypt(
KMSTestFixtures.US_EAST_1_MULTI_REGION_KEY_ID,
KMSTestFixtures.PARTITION,
KMSTestFixtures.ACCOUNT_ID,
Region.US_WEST_2);
}
}
| 695 |
0 | Create_ds/aws-encryption-sdk-java/src/test/java/com/amazonaws/crypto | Create_ds/aws-encryption-sdk-java/src/test/java/com/amazonaws/crypto/examples/RestrictRegionExampleTest.java | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.crypto.examples;
import com.amazonaws.encryptionsdk.kms.KMSTestFixtures;
import org.junit.Test;
import software.amazon.awssdk.regions.Region;
public class RestrictRegionExampleTest {
@Test
public void testEncryptAndDecrypt() {
RestrictRegionExample.encryptAndDecrypt(
KMSTestFixtures.US_WEST_2_KEY_ID,
KMSTestFixtures.PARTITION,
KMSTestFixtures.ACCOUNT_ID,
Region.US_WEST_2);
}
}
| 696 |
0 | Create_ds/aws-encryption-sdk-java/src/test/java/com/amazonaws/crypto | Create_ds/aws-encryption-sdk-java/src/test/java/com/amazonaws/crypto/examples/MultipleCmkEncryptExampleTest.java | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.crypto.examples;
import com.amazonaws.encryptionsdk.kms.KMSTestFixtures;
import org.junit.Test;
public class MultipleCmkEncryptExampleTest {
@Test
public void testEncryptAndDecrypt() {
MultipleCmkEncryptExample.encryptAndDecrypt(
KMSTestFixtures.TEST_KEY_IDS[0], KMSTestFixtures.TEST_KEY_IDS[1]);
}
}
| 697 |
0 | Create_ds/aws-encryption-sdk-java/src/test/java/com/amazonaws/crypto | Create_ds/aws-encryption-sdk-java/src/test/java/com/amazonaws/crypto/examples/SimpleDataKeyCachingExampleTest.java | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.crypto.examples;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import com.amazonaws.encryptionsdk.ParsedCiphertext;
import com.amazonaws.encryptionsdk.kms.KMSTestFixtures;
import org.junit.Test;
public class SimpleDataKeyCachingExampleTest {
private static final int MAX_ENTRY_AGE = 100;
private static final int CACHE_CAPACITY = 1000;
@Test
public void testEncryptWithCaching() {
final byte[] result =
SimpleDataKeyCachingExample.encryptWithCaching(
KMSTestFixtures.TEST_KEY_IDS[0], MAX_ENTRY_AGE, CACHE_CAPACITY);
assertNotNull(result);
final ParsedCiphertext parsedResult = new ParsedCiphertext(result);
assertEquals(1, parsedResult.getEncryptedKeyBlobs().size());
assertArrayEquals(
KMSTestFixtures.TEST_KEY_IDS[0].getBytes(),
parsedResult.getEncryptedKeyBlobs().get(0).getProviderInformation());
}
}
| 698 |
0 | Create_ds/aws-encryption-sdk-java/src/test/java/com/amazonaws/crypto | Create_ds/aws-encryption-sdk-java/src/test/java/com/amazonaws/crypto/examples/SetEncryptionAlgorithmExampleTest.java | // Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.crypto.examples;
import com.amazonaws.encryptionsdk.kms.KMSTestFixtures;
import org.junit.Test;
public class SetEncryptionAlgorithmExampleTest {
@Test
public void testEncryptAndDecrypt() {
SetEncryptionAlgorithmExample.encryptAndDecrypt(KMSTestFixtures.TEST_KEY_IDS[0]);
}
}
| 699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.