index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb/controlplane/CreateDynamoDBTable.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.controlplane;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.model.AttributeDefinition;
import com.amazonaws.services.dynamodbv2.model.CreateTableRequest;
import com.amazonaws.services.dynamodbv2.model.KeySchemaElement;
import com.amazonaws.services.dynamodbv2.model.KeyType;
import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughput;
import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException;
import com.amazonaws.services.dynamodbv2.model.ScalarAttributeType;
import com.amazonaws.services.dynamodbv2.model.TableDescription;
import com.amazonaws.services.dynamodbv2.util.TableUtils;
import com.google.common.base.Preconditions;
import com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.AbstractDynamoDBOperation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.function.Supplier;
/**
* @author ipapapa
* @author Alexander Patrikalakis
*/
public class CreateDynamoDBTable extends AbstractDynamoDBOperation implements Supplier<TableDescription> {
private static final Logger logger = LoggerFactory.getLogger(CreateDynamoDBTable.class);
private final long readCapacityUnits;
private final long writeCapacityUnits;
public CreateDynamoDBTable(AmazonDynamoDB dynamoDB, String tableName, String partitionKeyName,
long readCapacityUnits, long writeCapacityUnits) {
super(dynamoDB, tableName, partitionKeyName);
Preconditions.checkArgument(readCapacityUnits > 0);
Preconditions.checkArgument(writeCapacityUnits > 0);
this.readCapacityUnits = readCapacityUnits;
this.writeCapacityUnits = writeCapacityUnits;
}
@Override
public TableDescription get() {
/*
* Create a table with a primary hash key named 'name', which holds a string.
* Several properties such as provisioned throughput and atribute names are
* defined in the configuration interface.
*/
logger.debug("Creating table if it does not exist yet");
// key schema
ArrayList<KeySchemaElement> keySchema = new ArrayList<>();
keySchema.add(new KeySchemaElement().withAttributeName(partitionKeyName).withKeyType(KeyType.HASH));
// Attribute definitions
ArrayList<AttributeDefinition> attributeDefinitions = new ArrayList<>();
attributeDefinitions.add(new AttributeDefinition().withAttributeName(partitionKeyName)
.withAttributeType(ScalarAttributeType.S));
/*
* constructing the table request: Schema + Attributed definitions + Provisioned
* throughput
*/
CreateTableRequest request = new CreateTableRequest().withTableName(tableName)
.withKeySchema(keySchema).withAttributeDefinitions(attributeDefinitions)
.withProvisionedThroughput(new ProvisionedThroughput().withReadCapacityUnits(readCapacityUnits)
.withWriteCapacityUnits(writeCapacityUnits));
logger.info("Creating Table: " + tableName);
// Creating table
try {
return dynamoDB.describeTable(tableName).getTable();
} catch (ResourceNotFoundException e) {
TableDescription tableDescription = dynamoDB.createTable(request).getTableDescription();
logger.debug("Waiting until the table is in ACTIVE state");
try {
TableUtils.waitUntilActive(dynamoDB, tableName);
} catch (InterruptedException e1) {
throw new IllegalStateException("Table interrupted exception", e);
}
return tableDescription;
}
}
}
| 9,100 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb/controlplane/DescribeLimits.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.controlplane;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.model.DescribeLimitsRequest;
import com.amazonaws.services.dynamodbv2.model.DescribeLimitsResult;
import com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.AbstractDynamoDBOperation;
import java.util.function.Supplier;
/**
* @author Alexander Patrikalakis
*/
public class DescribeLimits extends AbstractDynamoDBOperation implements Supplier<DescribeLimitsResult> {
public DescribeLimits(AmazonDynamoDB dynamoDB, String tableName, String partitionKeyName) {
super(dynamoDB, tableName, partitionKeyName);
}
@Override
public DescribeLimitsResult get() {
return dynamoDB.describeLimits(new DescribeLimitsRequest());
}
}
| 9,101 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb/dataplane/DynamoDBReadBulk.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.dataplane;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.model.AttributeValue;
import com.amazonaws.services.dynamodbv2.model.BatchGetItemRequest;
import com.amazonaws.services.dynamodbv2.model.BatchGetItemResult;
import com.amazonaws.services.dynamodbv2.model.KeysAndAttributes;
import com.amazonaws.services.dynamodbv2.model.ReturnConsumedCapacity;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.netflix.ndbench.api.plugin.DataGenerator;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* @author Alexander Patrikalakis
* @author ipapapa
*/
public class DynamoDBReadBulk extends AbstractDynamoDBReadOperation
implements CapacityConsumingFunction<BatchGetItemResult, List<String>, List<String>> {
public DynamoDBReadBulk(DataGenerator dataGenerator, AmazonDynamoDB dynamoDB, String tableName,
String partitionKeyName, boolean consistentRead,
ReturnConsumedCapacity returnConsumedCapacity) {
super(dataGenerator, dynamoDB, tableName, partitionKeyName, consistentRead, returnConsumedCapacity);
}
@Override
public List<String> apply(List<String> keys) {
Preconditions.checkArgument(new HashSet<>(keys).size() == keys.size());
final KeysAndAttributes keysAndAttributes = generateReadRequests(keys);
try {
readUntilDone(keysAndAttributes);
return keysAndAttributes.getKeys().stream()
.map(Map::toString)
.collect(Collectors.toList());
} catch (AmazonServiceException ase) {
throw amazonServiceException(ase);
} catch (AmazonClientException ace) {
throw amazonClientException(ace);
}
}
private KeysAndAttributes generateReadRequests(List<String> keys) {
return new KeysAndAttributes()
.withKeys(keys.stream()
.map(key -> ImmutableMap.of("id", new AttributeValue(key)))
.collect(Collectors.toList()))
.withConsistentRead(consistentRead);
}
private void readUntilDone(KeysAndAttributes keysAndAttributes) {
KeysAndAttributes remainingKeys = keysAndAttributes;
BatchGetItemResult result;
do {
remainingKeys.withConsistentRead(consistentRead);
result = runBatchGetRequest(remainingKeys);
remainingKeys = result.getUnprocessedKeys().get(tableName);
} while (remainingKeys != null && remainingKeys.getKeys() != null && !remainingKeys.getKeys().isEmpty());
}
private BatchGetItemResult runBatchGetRequest(KeysAndAttributes keysAndAttributes) {
//TODO self throttle and estimate size of requests
return measureConsumedCapacity(dynamoDB.batchGetItem(new BatchGetItemRequest()
.withRequestItems(ImmutableMap.of(tableName, keysAndAttributes))
.withReturnConsumedCapacity(returnConsumedCapacity)));
}
@Override
public BatchGetItemResult measureConsumedCapacity(BatchGetItemResult result) {
consumed.addAndGet(result.getConsumedCapacity() == null ? 0 : getConsumedCapacityForTable(result.getConsumedCapacity()));
return result;
}
}
| 9,102 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb/dataplane/DynamoDBWriteSingle.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.dataplane;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.model.AttributeValue;
import com.amazonaws.services.dynamodbv2.model.ConsumedCapacity;
import com.amazonaws.services.dynamodbv2.model.PutItemRequest;
import com.amazonaws.services.dynamodbv2.model.PutItemResult;
import com.amazonaws.services.dynamodbv2.model.ReturnConsumedCapacity;
import com.netflix.ndbench.api.plugin.DataGenerator;
import java.util.Optional;
/**
* @author Alexander Patrikalakis
* @author ipapapa
*/
public class DynamoDBWriteSingle extends AbstractDynamoDBDataPlaneOperation
implements CapacityConsumingFunction<PutItemResult, String, String> {
public DynamoDBWriteSingle(DataGenerator dataGenerator, AmazonDynamoDB dynamoDB, String tableName,
String partitionKeyName, ReturnConsumedCapacity returnConsumedCapacity) {
super(dynamoDB, tableName, partitionKeyName, dataGenerator, returnConsumedCapacity);
}
@Override
public String apply(String key) {
PutItemRequest request = new PutItemRequest()
.withTableName(tableName)
.withReturnConsumedCapacity(returnConsumedCapacity)
.addItemEntry(partitionKeyName, new AttributeValue().withS(key))
.addItemEntry(ATTRIBUTE_NAME, new AttributeValue().withS(dataGenerator.getRandomValue()));
try {
// Write the item to the table
return Optional.ofNullable(dynamoDB.putItem(request))
.map(this::measureConsumedCapacity)
.map(PutItemResult::toString)
.orElse(null);
} catch (AmazonServiceException ase) {
throw amazonServiceException(ase);
} catch (AmazonClientException ace) {
throw amazonClientException(ace);
}
}
@Override
public PutItemResult measureConsumedCapacity(PutItemResult result) {
ConsumedCapacity consumedCapacity = result.getConsumedCapacity();
if (consumedCapacity != null && consumedCapacity.getCapacityUnits() != null) {
consumed.addAndGet(result.getConsumedCapacity().getCapacityUnits());
}
return result;
}
}
| 9,103 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb/dataplane/AbstractDynamoDBReadOperation.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.dataplane;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.model.ReturnConsumedCapacity;
import com.netflix.ndbench.api.plugin.DataGenerator;
/**
* @author Alexander Patrikalakis
*/
public abstract class AbstractDynamoDBReadOperation extends AbstractDynamoDBDataPlaneOperation {
protected final boolean consistentRead;
protected AbstractDynamoDBReadOperation(DataGenerator dataGenerator, AmazonDynamoDB dynamoDB, String tableName,
String partitionKeyName, boolean consistentRead,
ReturnConsumedCapacity returnConsumedCapacity) {
super(dynamoDB, tableName, partitionKeyName, dataGenerator, returnConsumedCapacity);
this.consistentRead = consistentRead;
}
}
| 9,104 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb/dataplane/DynamoDBReadSingle.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.dataplane;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.model.AttributeValue;
import com.amazonaws.services.dynamodbv2.model.ConsumedCapacity;
import com.amazonaws.services.dynamodbv2.model.GetItemRequest;
import com.amazonaws.services.dynamodbv2.model.GetItemResult;
import com.amazonaws.services.dynamodbv2.model.ReturnConsumedCapacity;
import com.google.common.collect.ImmutableMap;
import com.netflix.ndbench.api.plugin.DataGenerator;
import java.util.Map;
import java.util.Optional;
/**
* @author Alexander Patrikalakis
* @author ipapapa
*/
public class DynamoDBReadSingle extends AbstractDynamoDBReadOperation
implements CapacityConsumingFunction<GetItemResult, String, String> {
public DynamoDBReadSingle(DataGenerator dataGenerator, AmazonDynamoDB dynamoDB, String tableName,
String partitionKeyName, boolean consistentRead,
ReturnConsumedCapacity returnConsumedCapacity) {
super(dataGenerator, dynamoDB, tableName, partitionKeyName, consistentRead, returnConsumedCapacity);
}
@Override
public String apply(String key) {
final GetItemRequest request = new GetItemRequest()
.withTableName(tableName)
.withKey(ImmutableMap.of(partitionKeyName, new AttributeValue(key)))
.withReturnConsumedCapacity(returnConsumedCapacity)
.withConsistentRead(consistentRead);
try {
return Optional.ofNullable(dynamoDB.getItem(request))
.map(this::measureConsumedCapacity)
.map(GetItemResult::getItem)
.map(Map::toString)
.orElse(null);
} catch (AmazonServiceException ase) {
throw amazonServiceException(ase);
} catch (AmazonClientException ace) {
throw amazonClientException(ace);
}
}
@Override
public GetItemResult measureConsumedCapacity(GetItemResult result) {
ConsumedCapacity consumedCapacity = result.getConsumedCapacity();
if (consumedCapacity != null && consumedCapacity.getCapacityUnits() != null) {
consumed.addAndGet(result.getConsumedCapacity().getCapacityUnits());
}
return result;
}
}
| 9,105 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb/dataplane/DynamoDBWriteBulk.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.dataplane;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.model.AttributeValue;
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemRequest;
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult;
import com.amazonaws.services.dynamodbv2.model.PutRequest;
import com.amazonaws.services.dynamodbv2.model.ReturnConsumedCapacity;
import com.amazonaws.services.dynamodbv2.model.WriteRequest;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.netflix.ndbench.api.plugin.DataGenerator;
import java.util.HashSet;
import java.util.List;
import java.util.stream.Collectors;
/**
* @author Alexander Patrikalakis
* @author ipapapa
*/
public class DynamoDBWriteBulk extends AbstractDynamoDBDataPlaneOperation
implements CapacityConsumingFunction<BatchWriteItemResult, List<String>, List<String>> {
public DynamoDBWriteBulk(DataGenerator dataGenerator, AmazonDynamoDB dynamoDB, String tableName,
String partitionKeyName, ReturnConsumedCapacity returnConsumedCapacity) {
super(dynamoDB, tableName, partitionKeyName, dataGenerator, returnConsumedCapacity);
}
@Override
public List<String> apply(List<String> keys) {
Preconditions.checkArgument(new HashSet<>(keys).size() == keys.size());
final List<WriteRequest> writeRequests = generateWriteRequests(keys);
try {
writeUntilDone(writeRequests);
return writeRequests.stream()
.map(WriteRequest::getPutRequest)
.map(PutRequest::toString)
.collect(Collectors.toList());
} catch (AmazonServiceException ase) {
throw amazonServiceException(ase);
} catch (AmazonClientException ace) {
throw amazonClientException(ace);
}
}
private List<WriteRequest> generateWriteRequests(List<String> keys) {
return keys.stream()
.map(key -> ImmutableMap.of(partitionKeyName, new AttributeValue(key),
ATTRIBUTE_NAME, new AttributeValue(this.dataGenerator.getRandomValue())))
.map(item -> new PutRequest().withItem(item))
.map(put -> new WriteRequest().withPutRequest(put))
.collect(Collectors.toList());
}
private void writeUntilDone(List<WriteRequest> requests) {
List<WriteRequest> remainingRequests = requests;
BatchWriteItemResult result;
do {
result = runBatchWriteRequest(remainingRequests);
remainingRequests = result.getUnprocessedItems().get(tableName);
} while (remainingRequests!= null && remainingRequests.isEmpty());
}
private BatchWriteItemResult runBatchWriteRequest(List<WriteRequest> writeRequests) {
//todo self throttle
return measureConsumedCapacity(dynamoDB.batchWriteItem(new BatchWriteItemRequest()
.withRequestItems(ImmutableMap.of(tableName, writeRequests))
.withReturnConsumedCapacity(returnConsumedCapacity)));
}
@Override
public BatchWriteItemResult measureConsumedCapacity(BatchWriteItemResult result) {
consumed.addAndGet(result.getConsumedCapacity() == null ? 0 : getConsumedCapacityForTable(result.getConsumedCapacity()));
return result;
}
}
| 9,106 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb/dataplane/AbstractDynamoDBDataPlaneOperation.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.dataplane;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.model.ConsumedCapacity;
import com.amazonaws.services.dynamodbv2.model.ReturnConsumedCapacity;
import com.google.common.base.Preconditions;
import com.google.common.util.concurrent.AtomicDouble;
import com.netflix.ndbench.api.plugin.DataGenerator;
import com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.AbstractDynamoDBOperation;
import java.util.List;
/**
* @author Alexander Patrikalakis
*/
public class AbstractDynamoDBDataPlaneOperation extends AbstractDynamoDBOperation {
protected final DataGenerator dataGenerator;
protected final AtomicDouble consumed = new AtomicDouble(0.0);
protected final ReturnConsumedCapacity returnConsumedCapacity;
protected AbstractDynamoDBDataPlaneOperation(AmazonDynamoDB dynamoDB, String tableName, String partitionKeyName,
DataGenerator dataGenerator,
ReturnConsumedCapacity returnConsumedCapacity) {
super(dynamoDB, tableName, partitionKeyName);
this.dataGenerator = dataGenerator;
this.returnConsumedCapacity = returnConsumedCapacity;
}
protected double getConsumedCapacityForTable(List<ConsumedCapacity> consumedCapacities) {
Preconditions.checkNotNull(consumedCapacities);
return consumedCapacities.stream()
.filter(c -> tableName.equals(c.getTableName()))
.map(ConsumedCapacity::getCapacityUnits)
.findFirst()
.orElse(0.0);
}
public double getAndResetConsumed() {
return consumed.getAndSet(0.0);
}
}
| 9,107 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb/dataplane/DynamoDBWriteTransaction.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.dataplane;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.amazonaws.services.cloudwatch.model.ResourceNotFoundException;
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB;
import com.amazonaws.services.dynamodbv2.model.AttributeValue;
import com.amazonaws.services.dynamodbv2.model.CancellationReason;
import com.amazonaws.services.dynamodbv2.model.ConsumedCapacity;
import com.amazonaws.services.dynamodbv2.model.InternalServerErrorException;
import com.amazonaws.services.dynamodbv2.model.Put;
import com.amazonaws.services.dynamodbv2.model.PutItemResult;
import com.amazonaws.services.dynamodbv2.model.ReturnConsumedCapacity;
import com.amazonaws.services.dynamodbv2.model.ReturnValuesOnConditionCheckFailure;
import com.amazonaws.services.dynamodbv2.model.TransactWriteItem;
import com.amazonaws.services.dynamodbv2.model.TransactWriteItemsRequest;
import com.amazonaws.services.dynamodbv2.model.TransactionCanceledException;
import com.netflix.ndbench.api.plugin.DataGenerator;
/**
* Performs writes on main table and child tables as part of a single transaction
*
* @author Sumanth Pasupuleti
*/
public class DynamoDBWriteTransaction extends AbstractDynamoDBDataPlaneOperation
implements CapacityConsumingFunction<PutItemResult, String, String> {
private static final Logger logger = LoggerFactory.getLogger(DynamoDBWriteTransaction.class);
private static final String ResultOK = "Ok";
private static final String ResultFailed = "Failed";
private String childTableNamePrefix;
private int mainTableColsPerRow;
public DynamoDBWriteTransaction(DataGenerator dataGenerator, AmazonDynamoDB dynamoDB, String tableName,
String partitionKeyName, String childTableNamePrefix, int mainTableColsPerRow,
ReturnConsumedCapacity returnConsumedCapacity) {
super(dynamoDB, tableName, partitionKeyName, dataGenerator, returnConsumedCapacity);
this.childTableNamePrefix = childTableNamePrefix;
this.mainTableColsPerRow = mainTableColsPerRow;
}
@Override
public String apply(String key) {
Collection<TransactWriteItem> writes = new ArrayList<>();
HashMap<String, AttributeValue> mainTableItem = new HashMap<>();
mainTableItem.put(partitionKeyName, new AttributeValue(key));
for (int i = 0; i < mainTableColsPerRow; i++)
{
String value = this.dataGenerator.getRandomValue();
// main table entry
mainTableItem.put(childTableNamePrefix + i, new AttributeValue(value));
// child table entries
HashMap<String, AttributeValue> childTableItem = new HashMap<>();
childTableItem.put(partitionKeyName, new AttributeValue(value));
Put childTableEntry = new Put()
.withTableName(childTableNamePrefix + i)
.withItem(childTableItem)
.withReturnValuesOnConditionCheckFailure(ReturnValuesOnConditionCheckFailure.ALL_OLD);
writes.add(new TransactWriteItem().withPut(childTableEntry));
}
Put mainTableEntry = new Put()
.withTableName(tableName)
.withItem(mainTableItem)
.withReturnValuesOnConditionCheckFailure(ReturnValuesOnConditionCheckFailure.ALL_OLD);
writes.add(new TransactWriteItem().withPut(mainTableEntry));
TransactWriteItemsRequest placeWriteTransaction = new TransactWriteItemsRequest()
.withTransactItems(writes)
.withReturnConsumedCapacity(ReturnConsumedCapacity.TOTAL);
try
{
dynamoDB.transactWriteItems(placeWriteTransaction);
return ResultOK;
}
catch (ResourceNotFoundException rnf)
{
logger.error("One of the table involved in the transaction is not found" + rnf);
throw rnf;
}
catch (InternalServerErrorException ise)
{
logger.error("Internal Server Error" + ise);
throw ise;
}
catch (TransactionCanceledException tce)
{
StringBuilder sb = new StringBuilder();
sb.append(String.format("Transaction cancelled. %s", tce));
// get cancellation reasons
List<CancellationReason> cancellationReasonList = tce.getCancellationReasons();
if (cancellationReasonList != null)
{
for (CancellationReason cancellationReason : cancellationReasonList)
{
sb.append(String.format("Cancellation reason: %s", cancellationReason.getMessage()));
}
}
logger.warn(sb.toString());
throw tce;
}
}
@Override
public PutItemResult measureConsumedCapacity(PutItemResult result) {
ConsumedCapacity consumedCapacity = result.getConsumedCapacity();
if (consumedCapacity != null && consumedCapacity.getCapacityUnits() != null) {
consumed.addAndGet(result.getConsumedCapacity().getCapacityUnits());
}
return result;
}
}
| 9,108 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/operations/dynamodb/dataplane/CapacityConsumingFunction.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.operations.dynamodb.dataplane;
import java.util.function.Function;
public interface CapacityConsumingFunction<T, I, O> extends Function<I, O> {
T measureConsumedCapacity(T t);
}
| 9,109 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/configs/ProgrammaticDynamoDBConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.configs;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
/**
* Configurations for DynamoDB benchmarks
*
* @author ipapapa
* @author Alexander Patrikalakis
*/
@Configuration(prefix = NdBenchConstants.PROP_NAMESPACE + "dynamodb")
public interface ProgrammaticDynamoDBConfiguration extends DynamoDBConfigurationBase {
/*
* Provisioned read capacity units to create the table with.
*/
@DefaultValue("5")
String getReadCapacityUnits();
/**
* Provisioned write capacity units to create the table with.
*/
@DefaultValue("5")
String getWriteCapacityUnits();
/*
* Application Autoscaling for DynamoDB
*/
@DefaultValue("true")
Boolean getAutoscaling();
/**
* Target read utilization represented as a percentage of the provisioned read throughput.
*/
@DefaultValue("70")
String getTargetReadUtilization();
/**
* Target write utilization represented as a percentage of the provisioned write throughput.
*/
@DefaultValue("70")
String getTargetWriteUtilization();
/*
* DynamoDB publishes one minute metrics for Consumed Capacity. To supplement this metric,
* ndbench can publish 1-second high resolution metrics of consumed capacity to CloudWatch.
*/
@DefaultValue("false")
Boolean publishHighResolutionConsumptionMetrics();
/*
* The interval, in milliseconds at which ndbench publishes high resolution consumption metrics to CloudWatch.
*/
@DefaultValue("1000")
Long getHighResolutionMetricsPublishingInterval();
/*
* DynamoDB publishes one minute metrics for Consumed Capacity. To supplement this metric,
* ndbench can publish 1-second high resolution metrics of consumed capacity to CloudWatch.
*/
@DefaultValue("false")
Boolean alarmOnHighResolutionConsumptionMetrics();
/*
* High resolution alarm threshold percentage of consumed capacity.
*/
@DefaultValue("80")
Double highResolutionAlarmThresholdPercentageOfProvisionedCapacity();
}
| 9,110 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/configs/DynamoDBConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.configs;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
/**
* Configurations for DynamoDB benchmarks
*
* @author ipapapa
* @author Alexander Patrikalakis
*/
@Configuration(prefix = NdBenchConstants.PROP_NAMESPACE + "dynamodb")
public interface DynamoDBConfiguration extends DynamoDBConfigurationBase {
}
| 9,111 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/configs/DynamoDBModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.configs;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPluginGuiceModule;
/**
*
* @author ipapapa
* @author Alexander Patrikalakis
*
*/
@NdBenchClientPluginGuiceModule
public class DynamoDBModule extends AbstractModule {
@Override
protected void configure() {
}
@Provides
DynamoDBConfiguration getDynamoDBConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(DynamoDBConfiguration.class);
}
@Provides
ProgrammaticDynamoDBConfiguration getProgrammaticDynamoDBConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(ProgrammaticDynamoDBConfiguration.class);
}
}
| 9,112 |
0 | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb | Create_ds/ndbench/ndbench-dynamodb-plugins/src/main/java/com/netflix/ndbench/plugin/dynamodb/configs/DynamoDBConfigurationBase.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dynamodb.configs;
import com.netflix.archaius.api.annotations.DefaultValue;
/**
* Configurations for DynamoDB benchmarks
*
* @author ipapapa
* @author Alexander Patrikalakis
*/
public interface DynamoDBConfigurationBase {
/**
* The name of the table to use
*/
@DefaultValue("ndbench-table")
String getTableName();
/*
* Attributes – Each item is composed of one or more attributes. An attribute is
* a fundamental data element, something that does not need to be broken down
* any further.
*/
@DefaultValue("id")
String getAttributeName();
/*
* Consistency: When you request a strongly consistent read, DynamoDB returns a
* response with the most up-to-date data, reflecting the updates from all prior
* write operations that were successful.
*/
@DefaultValue("true")
Boolean consistentRead();
/*
* Compression: HTTP clients for DynamoDB can be configured to use GZip compression.
* Effects are negligible for small items, but can be significant for large items with
* high deflation ratios.
*/
@DefaultValue("false")
Boolean isCompressing();
/*
* Region: Allowing customers to override the region enables baselining cross-region use cases
*/
String getRegion();
/*
* Region: Allowing customers to override the endpoint enables baselining cross-region use cases
* and testing with DynamoDB local
*/
String getEndpoint();
/*
* Max connections: the HTTP client in the DynamoDB client has a connection pool. Making it configurable here
* makes it possible to drive workloads from one host that require more than 50 total read and write workers.
*/
@DefaultValue("50")
Integer getMaxConnections();
/*
* Max client timeout (milliseconds): maximum amount of time HTTP client will wait for a response from DynamoDB.
* The default -1 means that there is no request timeout by default.
*/
@DefaultValue("-1")
Integer getMaxRequestTimeout();
/*
* Max SDK retries: maximum number of times the SDK client will retry a request after a retriable exception.
*/
@DefaultValue("10")
Integer getMaxRetries();
/*
* Number of main table columns and consequently number of child tables - related to use cases of domain and mapping tables.
*/
@DefaultValue("5")
Integer getMainTableColsPerRow();
/*
* Prefix for child table name
*/
@DefaultValue("child")
String getChildTableNamePrefix();
}
| 9,113 |
0 | Create_ds/ndbench/ndbench-web/src/test/java/com | Create_ds/ndbench/ndbench-web/src/test/java/com/test/Log4jConfigurationTest.java | package com.test;
import org.apache.commons.io.FileUtils;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
public class Log4jConfigurationTest {
static {
try {
File temp = File.createTempFile("temp-file-name", ".log");
temp.deleteOnExit();
FileUtils.writeStringToFile(temp, "log4j.logger.com.test=TRACE");
System.setProperty(
"log4j.configuration",
"file:///" + temp.getAbsolutePath());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Test
public void verifyLog4jPropertiesConfigurationWorksAsExpected() throws Exception {
final Logger logger = LoggerFactory.getLogger(Log4jConfigurationTest .class);
if (! logger.isTraceEnabled()) {
throw new RuntimeException("slf4j seems not to be bound to a log4j implementation. check depdendencies!");
}
}
}
| 9,114 |
0 | Create_ds/ndbench/ndbench-web/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-web/src/main/java/com/netflix/ndbench/core/config/WebGuiceInjectorProvider.java | package com.netflix.ndbench.core.config;
import com.google.inject.*;
import com.google.inject.util.Modules;
import com.netflix.appinfo.EurekaInstanceConfig;
import com.netflix.appinfo.providers.MyDataCenterInstanceConfigProvider;
import com.netflix.discovery.guice.EurekaModule;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
import org.apache.commons.lang.StringUtils;
import java.util.List;
/**
* ndbench-web app-specific GuiceInjectorProvider impl which adds EurekaModule (local or AWS) to avoid conflicts from
* different modules instantiating different LookupService implementations.
* <p>
* {@inheritDoc}
*/
public class WebGuiceInjectorProvider extends GuiceInjectorProvider {
/**
* GuiceInjectorProvider impl which adds EurekaModule (local or AWS) to avoid conflicts from different modules
* instantiating different LookupService implementations.
* <p>
* {@inheritDoc}
*/
@Override
public Injector getInjector(AbstractModule... modules) {
List<Module> moduleList = getModuleList(modules);
// Add EurekaModule for any plugins which require it.
// Currently needed only for ndbench-evcache-plugins, but we don't add EurekaModule to module-level
// because choice of LookupService impl should be made at application level to avoid conflicts from
// different modules creating different LookupService implementations.
String discoveryEnv = System.getenv(NdBenchConstants.DISCOVERY_ENV);
if(StringUtils.isBlank(discoveryEnv) || discoveryEnv == "local"){
moduleList.add(Modules.override(new EurekaModule()).with(new AbstractModule() {
@Override
protected void configure() {
// Default EurekaInstanceConfig is CloudInstanceConfig, which works only in AWS env.
// When not in AWS, override to use MyDataCenterInstanceConfig instead.
bind(EurekaInstanceConfig.class).toProvider(MyDataCenterInstanceConfigProvider.class).in(Scopes.SINGLETON);
}
}));
} else {
moduleList.add(new EurekaModule());
}
Injector injector = Guice.createInjector(moduleList);
injector.getInstance(IConfiguration.class).initialize();
return injector;
}
} | 9,115 |
0 | Create_ds/ndbench/ndbench-web/src/main/java/com/netflix/ndbench | Create_ds/ndbench/ndbench-web/src/main/java/com/netflix/ndbench/defaultimpl/InjectedWebListener.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.defaultimpl;
import com.google.inject.Injector;
import com.google.inject.servlet.GuiceServletContextListener;
import com.google.inject.servlet.ServletModule;
import com.netflix.ndbench.core.config.GuiceInjectorProvider;
import com.sun.jersey.api.core.PackagesResourceConfig;
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
import com.sun.jersey.spi.container.servlet.ServletContainer;
import java.util.HashMap;
import java.util.Map;
/**
* @author vchella
*/
public class InjectedWebListener extends GuiceServletContextListener
{
@Override
protected Injector getInjector()
{
return new GuiceInjectorProvider().getInjector( new JaxServletModule());
}
public static class JaxServletModule extends ServletModule
{
@Override
protected void configureServlets()
{
Map<String, String> params = new HashMap<>();
String packages =
"com.netflix.ndbench.core.resources;" +
"com.netflix.ndbench.core.filters;";
params.put(PackagesResourceConfig.PROPERTY_PACKAGES, packages);
params.put(PackagesResourceConfig.PROPERTY_CONTAINER_RESPONSE_FILTERS,
"com.netflix.ndbench.core.filters.CorsResponseFilter");
params.put(ServletContainer.PROPERTY_FILTER_CONTEXT_PATH, "/REST");
serve("/REST/*").with(GuiceContainer.class, params);
}
}
} | 9,116 |
0 | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin/dyno/DynoJedisExtFunc.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ndbench.plugin.dyno;
import com.google.inject.Singleton;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.ndbench.api.plugin.DataGenerator;
import com.netflix.ndbench.api.plugin.NdBenchBaseClient;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPlugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
/**
* This is the extended functional test for Dynomite.
*
* It tests:
*
* 1. GET 2. pipelined GET 3. pipelined HGETALL 4. ZRANGE
*
* 1. SET 2. pipelined SET 3. pipelined HMSET, 4. ZADD
*
* @author ipapapa
*
*/
@Singleton
@NdBenchClientPlugin("DynoExtFunc")
public class DynoJedisExtFunc extends NdBenchBaseClient {
private final Logger logger = LoggerFactory.getLogger(DynoJedisExtFunc.class);
private static final int MIN_PIPE_KEYS = 3;
private static final int MAX_PIPE_KEYS = 10;
private static final int MAX_SCORE = 5;
private static final String HM_KEY_PREFIX = "HM__";
private static final String Z_KEY_PREFIX = "Z__";
private static final String ClusterName = "dynomite_redis";
private DataGenerator dataGenerator;
private AtomicReference<DynoJedisClient> jedisClient = new AtomicReference<>(null);
@Override
public String readSingle(String key) throws Exception {
StringBuilder sb = new StringBuilder();
String correct = null;
DynoJedisUtils jedisUtils = new DynoJedisUtils(jedisClient);
correct = jedisUtils.nonPipelineRead(key);
if (correct == null)
return null;
sb.append("simple get: " + correct + " , ");
correct = jedisUtils.pipelineRead(key, MAX_PIPE_KEYS, MIN_PIPE_KEYS);
if (correct == null)
return null;
sb.append("pipeline get: " + correct + " , ");
correct = jedisUtils.pipelineReadHGETALL(key, HM_KEY_PREFIX);
if (correct == null)
return null;
sb.append("pipeline hash: " + correct + " , ");
correct = jedisUtils.nonPipelineZRANGE(key, MAX_SCORE);
if (correct == null)
return null;
sb.append("sorted set: " + correct + " , ");
return sb.toString();
}
@Override
public String writeSingle(String key) throws Exception {
StringBuilder sb = new StringBuilder();
String correct = null;
DynoJedisUtils jedisUtils = new DynoJedisUtils(jedisClient);
correct = jedisUtils.nonpipelineWrite(key, dataGenerator);
if (correct == null) {
return null;
}
sb.append("simple get: " + correct + " , ");
correct = jedisUtils.pipelineWrite(key, dataGenerator, MAX_PIPE_KEYS, MIN_PIPE_KEYS);
if (correct == null)
return null;
sb.append("pipeline set: " + correct + " , ");
correct = jedisUtils.pipelineWriteHMSET(key, dataGenerator, HM_KEY_PREFIX);
if (correct == null)
return null;
sb.append("pipeline HMSET: " + correct + " , ");
correct = jedisUtils.nonPipelineZADD(key, dataGenerator, Z_KEY_PREFIX, MAX_SCORE);
if (correct == null)
return null;
sb.append("non pipeline ZADD: " + correct + " , ");
return sb.toString();
}
/**
* Perform a bulk read operation
* @return a list of response codes
* @throws Exception
*/
public List<String> readBulk(final List<String> keys) throws Exception {
throw new UnsupportedOperationException("bulk operation is not supported");
}
/**
* Perform a bulk write operation
* @return a list of response codes
* @throws Exception
*/
public List<String> writeBulk(final List<String> keys) throws Exception {
throw new UnsupportedOperationException("bulk operation is not supported");
}
@Override
public void shutdown() throws Exception {
if (jedisClient.get() != null) {
jedisClient.get().stopClient();
jedisClient.set(null);
}
}
@Override
public String getConnectionInfo() throws Exception {
return String.format("Cluster Name - %s", ClusterName);
}
@Override
public void init(DataGenerator dataGenerator) throws Exception {
this.dataGenerator = dataGenerator;
if (jedisClient.get() != null) {
return;
}
logger.info("Initing dyno jedis client");
logger.info("\nDynomite Cluster: " + ClusterName);
HostSupplier hSupplier = () -> {
List<Host> hosts = new ArrayList<>();
hosts.add(new HostBuilder().setHostname("localhost").setPort(8102).setRack("local-dc").setStatus(Host.Status.Up).createHost());
return hosts;
};
DynoJedisClient jClient = new DynoJedisClient.Builder().withApplicationName(ClusterName)
.withDynomiteClusterName(ClusterName).withHostSupplier(hSupplier).build();
jedisClient.set(jClient);
}
@Override
public String runWorkFlow() throws Exception {
return null;
}
}
| 9,117 |
0 | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin/dyno/DynoJedis.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.dyno;
import com.google.inject.Singleton;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.ndbench.api.plugin.DataGenerator;
import com.netflix.ndbench.api.plugin.NdBenchClient;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPlugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
/**
* @author vchella
*/
@Singleton
@NdBenchClientPlugin("DynoJedis")
public class DynoJedis implements NdBenchClient {
private static final Logger logger = LoggerFactory.getLogger(DynoJedis.class);
private static final String ResultOK = "Ok";
private static final String CacheMiss = null;
private static final String ClusterName = "dynomite_redis";
private DataGenerator dataGenerator;
private final AtomicReference<DynoJedisClient> jedisClient = new AtomicReference<>(null);
@Override
public void init(DataGenerator dataGenerator) throws Exception {
this.dataGenerator = dataGenerator;
if (jedisClient.get() != null) {
return;
}
logger.info("Initing dyno jedis client");
logger.info("\nDynomite Cluster: " + ClusterName);
HostSupplier hSupplier = () -> {
List<Host> hosts = new ArrayList<>();
hosts.add(new HostBuilder().setHostname("localhost").setPort(8102).setRack("local-dc").setStatus(Host.Status.Up).createHost());
return hosts;
};
DynoJedisClient jClient = new DynoJedisClient.Builder().withApplicationName(ClusterName)
.withDynomiteClusterName(ClusterName).withHostSupplier(hSupplier).build();
jedisClient.set(jClient);
}
@Override
public String readSingle(String key) throws Exception {
String res = jedisClient.get().get(key);
if (res != null) {
if (res.isEmpty()) {
throw new Exception("Data retrieved is not ok ");
}
} else {
return CacheMiss;
}
return ResultOK;
}
@Override
public String writeSingle(String key) throws Exception {
String result = jedisClient.get().set(key, dataGenerator.getRandomValue());
if (!"OK".equals(result)) {
logger.error("SET_ERROR: GOT " + result + " for SET operation");
throw new RuntimeException(String.format("DynoJedis: value %s for SET operation is NOT VALID", key));
}
return result;
}
/**
* Perform a bulk read operation
* @return a list of response codes
* @throws Exception
*/
public List<String> readBulk(final List<String> keys) throws Exception {
throw new UnsupportedOperationException("bulk operation is not supported");
}
/**
* Perform a bulk write operation
* @return a list of response codes
* @throws Exception
*/
public List<String> writeBulk(final List<String> keys) throws Exception {
throw new UnsupportedOperationException("bulk operation is not supported");
}
/**
* Shutdown the client
*/
@Override
public void shutdown() throws Exception {
if (jedisClient.get() != null) {
jedisClient.get().stopClient();
jedisClient.set(null);
}
}
/**
* Get connection information
*/
@Override
public String getConnectionInfo() throws Exception {
return String.format("DynoJedis Plugin - ConnectionInfo ::Cluster Name - %s", ClusterName);
}
@Override
public String runWorkFlow() throws Exception {
return null;
}
}
| 9,118 |
0 | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin/dyno/DynoJedisGetSetPipeline.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ndbench.plugin.dyno;
import com.google.inject.Singleton;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.ndbench.api.plugin.DataGenerator;
import com.netflix.ndbench.api.plugin.NdBenchClient;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPlugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
/**
* This pluging performs GET/SET inside a pipeline of size MAX_PIPE_KEYS against
* Dynomite.
*
* @author ipapapa
*
*/
@Singleton
@NdBenchClientPlugin("DynoGetSetPipeline")
public class DynoJedisGetSetPipeline implements NdBenchClient {
private static final Logger logger = LoggerFactory.getLogger(DynoJedisGetSetPipeline.class);
private static final int MIN_PIPE_KEYS = 3;
private static final int MAX_PIPE_KEYS = 10;
private static final String ClusterName = "dynomite_redis";
private final AtomicReference<DynoJedisClient> jedisClient = new AtomicReference<>(null);
private DataGenerator dataGenerator;
@Override
public void shutdown() throws Exception {
if (jedisClient.get() != null) {
jedisClient.get().stopClient();
jedisClient.set(null);
}
}
@Override
public String getConnectionInfo() throws Exception {
return String.format("Cluster Name - %s", ClusterName);
}
@Override
public void init(DataGenerator dataGenerator) throws Exception {
this.dataGenerator = dataGenerator;
if (jedisClient.get() != null) {
return;
}
logger.info("Initing dyno jedis client");
logger.info("\nDynomite Cluster: " + ClusterName);
HostSupplier hSupplier = () -> {
List<Host> hosts = new ArrayList<>();
hosts.add(new HostBuilder().setHostname("localhost").setPort(8102).setRack("local-dc").setStatus(Host.Status.Up).createHost());
return hosts;
};
DynoJedisClient jClient = new DynoJedisClient.Builder().withApplicationName(ClusterName)
.withDynomiteClusterName(ClusterName).withHostSupplier(hSupplier).build();
jedisClient.set(jClient);
}
@Override
public String runWorkFlow() throws Exception {
return null;
}
@Override
public String readSingle(String key) throws Exception {
DynoJedisUtils jedisUtils = new DynoJedisUtils(jedisClient);
return jedisUtils.pipelineRead(key, MAX_PIPE_KEYS, MIN_PIPE_KEYS);
}
@Override
public String writeSingle(String key) throws Exception {
DynoJedisUtils jedisUtils = new DynoJedisUtils(jedisClient);
return jedisUtils.pipelineWrite(key, dataGenerator, MAX_PIPE_KEYS, MIN_PIPE_KEYS);
}
/**
* Perform a bulk read operation
* @return a list of response codes
* @throws Exception
*/
public List<String> readBulk(final List<String> keys) throws Exception {
throw new UnsupportedOperationException("bulk operation is not supported");
}
/**
* Perform a bulk write operation
* @return a list of response codes
* @throws Exception
*/
public List<String> writeBulk(final List<String> keys) throws Exception {
throw new UnsupportedOperationException("bulk operation is not supported");
}
}
| 9,119 |
0 | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin/dyno/DynoJedisHashPipeline.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ndbench.plugin.dyno;
import com.google.inject.Singleton;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.ndbench.api.plugin.DataGenerator;
import com.netflix.ndbench.api.plugin.NdBenchClient;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPlugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
/**
* This pluging performs hash operations (HMSET/HGETALL) inside a pipeline of
* size MAX_PIPE_KEYS against Dynomite.
*
* @author ipapapa
*
*/
@Singleton
@NdBenchClientPlugin("DynoHashPipeline")
public class DynoJedisHashPipeline implements NdBenchClient {
private static final Logger logger = LoggerFactory.getLogger(DynoJedisHashPipeline.class);
private static final String HM_KEY_PREFIX = "HM__";
private static final String ClusterName = "dynomite_redis";
private final AtomicReference<DynoJedisClient> jedisClient = new AtomicReference<>(null);
private DataGenerator dataGenerator;
@Override
public void init(DataGenerator dataGenerator) throws Exception {
this.dataGenerator = dataGenerator;
if (jedisClient.get() != null) {
return;
}
logger.info("Initing dyno jedis client");
logger.info("\nDynomite Cluster: " + ClusterName);
HostSupplier hSupplier = () -> {
List<Host> hosts = new ArrayList<>();
hosts.add(new HostBuilder().setHostname("localhost").setPort(8102).setRack("local-dc").setStatus(Host.Status.Up).createHost());
return hosts;
};
DynoJedisClient jClient = new DynoJedisClient.Builder().withApplicationName(ClusterName)
.withDynomiteClusterName(ClusterName).withHostSupplier(hSupplier).build();
jedisClient.set(jClient);
}
@Override
public String readSingle(String key) throws Exception {
DynoJedisUtils jedisUtils = new DynoJedisUtils(jedisClient);
return jedisUtils.pipelineReadHGETALL(key, HM_KEY_PREFIX);
}
@Override
public String writeSingle(String key) throws Exception {
DynoJedisUtils jedisUtils = new DynoJedisUtils(jedisClient);
return jedisUtils.pipelineWriteHMSET(key, dataGenerator, HM_KEY_PREFIX);
}
/**
* Perform a bulk read operation
* @return a list of response codes
* @throws Exception
*/
public List<String> readBulk(final List<String> keys) throws Exception {
throw new UnsupportedOperationException("bulk operation is not supported");
}
/**
* Perform a bulk write operation
* @return a list of response codes
* @throws Exception
*/
public List<String> writeBulk(final List<String> keys) throws Exception {
throw new UnsupportedOperationException("bulk operation is not supported");
}
@Override
public void shutdown() throws Exception {
if (jedisClient.get() != null) {
jedisClient.get().stopClient();
jedisClient.set(null);
}
}
@Override
public String getConnectionInfo() throws Exception {
return String.format("Cluster Name - %s", ClusterName);
}
@Override
public String runWorkFlow() throws Exception {
return null;
}
}
| 9,120 |
0 | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin/dyno/DynoJedisUtils.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ndbench.plugin.dyno;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.dyno.jedis.DynoJedisPipeline;
import com.netflix.ndbench.api.plugin.DataGenerator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Response;
import java.util.*;
import java.util.concurrent.atomic.AtomicReference;
public class DynoJedisUtils {
// private final AtomicReference<DynoJedisClient> jedisClient = new
// AtomicReference<DynoJedisClient>(null);
private AtomicReference<DynoJedisClient> jedisClient;
private static final String ResultOK = "Ok";
private static final String CacheMiss = null;
private static final Logger logger = LoggerFactory.getLogger(DynoJedisUtils.class);
private static Random randomGenerator = new Random();
public DynoJedisUtils(AtomicReference<DynoJedisClient> jedisClient) {
this.jedisClient = jedisClient;
}
/**
* This is the non pipelined version of the reads
*
* @param key
* @return the value of the corresponding key
* @throws Exception
*/
public String nonPipelineRead(String key) throws Exception {
String res = jedisClient.get().get(key);
if (res != null) {
if (res.isEmpty()) {
throw new Exception("Data retrieved is not ok ");
}
} else {
return CacheMiss;
}
return ResultOK;
}
/**
* This is the pipelined version of the reads
*
* @param key
* @return "OK" if everything was read
* @throws Exception
*/
public String pipelineRead(String key, int max_pipe_keys, int min_pipe_keys) throws Exception {
int pipe_keys = randomGenerator.nextInt(max_pipe_keys);
pipe_keys = Math.max(min_pipe_keys, pipe_keys);
DynoJedisPipeline pipeline = this.jedisClient.get().pipelined();
Map<String, Response<String>> responses = new HashMap<>();
for (int n = 0; n < pipe_keys; ++n) {
String nth_key = key + "_" + n;
// NOTE: Dyno Jedis works on only one key, so we always use the same
// key in every get operation
Response<String> resp = pipeline.get(key);
// We however use the nth key as the key in the hashmap to check
// individual response on every operation.
responses.put(nth_key, resp);
}
pipeline.sync();
for (int n = 0; n < pipe_keys; ++n) {
String nth_key = key + "_" + n;
Response<String> resp = responses.get(nth_key);
if (resp == null || resp.get() == null) {
logger.info("Cache Miss on pipelined read: key:" + key);
return null;
} else {
if (resp.get().startsWith("ERR")) {
throw new Exception(String.format("DynoJedisPipeline: error %s", resp.get()));
}
if (!isValidResponse(key, resp.get())) {
throw new Exception(String.format(
"DynoJedisPipeline: pipeline read: value %s does not contain key %s", resp.get(), key));
}
}
}
return "OK";
}
/**
* This the pipelined HGETALL
*
* @param key
* @return the contents of the hash
* @throws Exception
*/
public String pipelineReadHGETALL(String key, String hm_key_prefix) throws Exception {
DynoJedisPipeline pipeline = jedisClient.get().pipelined();
Response<Map<byte[], byte[]>> resp = pipeline.hgetAll((hm_key_prefix + key).getBytes());
pipeline.sync();
if (resp == null || resp.get() == null) {
logger.info("Cache Miss: key:" + key);
return null;
} else {
StringBuilder sb = new StringBuilder();
for (byte[] bytes : resp.get().keySet()) {
if (sb.length() > 0) {
sb.append(",");
}
sb.append(new String(bytes));
}
return "HGETALL:" + sb.toString();
}
}
/**
* Exercising ZRANGE to receive all keys between 0 and MAX_SCORE
*
* @param key
*/
public String nonPipelineZRANGE(String key, int max_score) {
StringBuilder sb = new StringBuilder();
// Return all elements
Set<String> returnEntries = this.jedisClient.get().zrange(key, 0, -1);
if (returnEntries.isEmpty()) {
logger.error("The number of entries in the sorted set are less than the number of entries written");
return null;
}
returnEntries.forEach(sb::append);
return sb.toString();
}
/**
* a simple write without a pipeline
*
* @param key
* @return the result of write (i.e. "OK" if it was successful
*/
public String nonpipelineWrite(String key, DataGenerator dataGenerator) {
String value = key + "__" + dataGenerator.getRandomValue() + "__" + key;
String result = this.jedisClient.get().set(key, value);
if (!"OK".equals(result)) {
logger.error("SET_ERROR: GOT " + result + " for SET operation");
throw new RuntimeException(String.format("DynoJedis: value %s for SET operation is NOT VALID", value, key));
}
return result;
}
/**
* pipelined version of the write
*
* @param key
* @return "key_n"
*/
public String pipelineWrite(String key, DataGenerator dataGenerator, int max_pipe_keys, int min_pipe_keys)
throws Exception {
// Create a random key between [0,MAX_PIPE_KEYS]
int pipe_keys = randomGenerator.nextInt(max_pipe_keys);
// Make sure that the number of keys in the pipeline are at least
// MIN_PIPE_KEYS
pipe_keys = Math.max(min_pipe_keys, pipe_keys);
DynoJedisPipeline pipeline = this.jedisClient.get().pipelined();
Map<String, Response<String>> responses = new HashMap<>();
/**
* writeSingle returns a single string, so we want to create a
* StringBuilder to append all the keys in the form "key_n". This is
* just used to return a single string
*/
StringBuilder sb = new StringBuilder();
// Iterate across the number of keys in the pipeline and set
for (int n = 0; n < pipe_keys; ++n) {
String nth_key = key + "_" + n;
sb.append(nth_key);
Response<String> resp = pipeline.set(key, key + dataGenerator.getRandomValue() + key);
responses.put(nth_key, resp);
}
pipeline.sync();
return sb.toString();
}
/**
* writes with an pipelined HMSET
*
* @param key
* @return the keys of the hash that was stored.
*/
public String pipelineWriteHMSET(String key, DataGenerator dataGenerator, String hm_key_prefix) {
Map<String, String> map = new HashMap<>();
String hmKey = hm_key_prefix + key;
map.put((hmKey + "__1"), (key + "__" + dataGenerator.getRandomValue() + "__" + key));
map.put((hmKey + "__2"), (key + "__" + dataGenerator.getRandomValue() + "__" + key));
DynoJedisPipeline pipeline = jedisClient.get().pipelined();
pipeline.hmset(hmKey, map);
pipeline.expire(hmKey, 3600);
pipeline.sync();
return "HMSET:" + hmKey;
}
/**
* This adds MAX_SCORE of elements in a sorted set
*
* @param key
* @return "OK" if all write operations have succeeded
* @throws Exception
*/
public String nonPipelineZADD(String key, DataGenerator dataGenerator, String z_key_prefix, int max_score)
throws Exception {
String zKey = z_key_prefix + key;
int success = 0;
long returnOp = 0;
for (int i = 0; i < max_score; i++) {
returnOp = jedisClient.get().zadd(zKey, i, dataGenerator.getRandomValue() + "__" + zKey);
success += returnOp;
}
// all the above operations will separate entries
if (success != max_score - 1) {
return null;
}
return "OK";
}
private static boolean isValidResponse(String key, String value) {
return value.startsWith(key) && value.endsWith(key);
}
}
| 9,121 |
0 | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin/local/dynomite | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin/local/dynomite/proxy/LocalHttpEndpointBasedTokenMapSupplier.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ndbench.plugin.local.dynomite.proxy;
import com.netflix.dyno.connectionpool.impl.lb.HttpEndpointBasedTokenMapSupplier;
public class LocalHttpEndpointBasedTokenMapSupplier extends HttpEndpointBasedTokenMapSupplier{
public LocalHttpEndpointBasedTokenMapSupplier() {
super("http://{hostname}:8081/REST/v1/admin/cluster_describe", 8081);
}
}
| 9,122 |
0 | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin/local/dynomite | Create_ds/ndbench/ndbench-dyno-plugins/src/main/java/com/netflix/ndbench/plugin/local/dynomite/proxy/LocalDynomiteProxyPlugin.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ndbench.plugin.local.dynomite.proxy;
import com.google.inject.Singleton;
import com.netflix.dyno.connectionpool.ConnectionPoolConfiguration.LoadBalancingStrategy;
import com.netflix.dyno.connectionpool.Host;
import com.netflix.dyno.connectionpool.HostBuilder;
import com.netflix.dyno.connectionpool.HostSupplier;
import com.netflix.dyno.connectionpool.OperationResult;
import com.netflix.dyno.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.dyno.jedis.DynoJedisClient;
import com.netflix.ndbench.api.plugin.DataGenerator;
import com.netflix.ndbench.api.plugin.NdBenchClient;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPlugin;
import com.netflix.ndbench.plugin.dyno.DynoJedis;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
/**
* @author diegopacheco
*/
@Singleton
@NdBenchClientPlugin("LocalDynomiteProxyPlugin")
public class LocalDynomiteProxyPlugin implements NdBenchClient{
private static final Logger logger = LoggerFactory.getLogger(DynoJedis.class);
private static final String ResultOK = "Ok";
private static final String CacheMiss = null;
private static final String ClusterName = "dynomite_redis";
private DataGenerator dataGenerator;
private final AtomicReference<DynoJedisClient> jedisClient = new AtomicReference<>(null);
@Override
public void init(DataGenerator dataGenerator) throws Exception {
int dynomitePort = 8102;
this.dataGenerator = dataGenerator;
if (jedisClient.get() != null) {
return;
}
logger.info("Initing dyno jedis client");
logger.info("\nDynomite Cluster: " + ClusterName);
HostSupplier hSupplier = () -> {
List<Host> hosts = new ArrayList<>();
hosts.add(new HostBuilder().setHostname("localhost").setPort(dynomitePort).setRack("local-dc").setStatus(Host.Status.Up).createHost());
return hosts;
};
DynoJedisClient jClient = new DynoJedisClient.Builder()
.withApplicationName(ClusterName)
.withDynomiteClusterName(ClusterName)
.withHostSupplier(hSupplier)
.withCPConfig(new ConnectionPoolConfigurationImpl("myCP")
.withTokenSupplier(new LocalHttpEndpointBasedTokenMapSupplier())
.setLoadBalancingStrategy(LoadBalancingStrategy.TokenAware))
.build();
jedisClient.set(jClient);
}
@Override
public String readSingle(String key) throws Exception {
String res = jedisClient.get().get(key);
if(res!=null)
{
if(res.isEmpty())
{
throw new Exception("Data retrieved is not ok ");
}
}
else
{
return CacheMiss;
}
return ResultOK;
}
@Override
public String writeSingle(String key) throws Exception {
OperationResult<String> result = jedisClient.get().d_set(key, dataGenerator.getRandomValue());
if (!"OK".equals(result.getResult())) {
logger.error("SET_ERROR: GOT " + result.getResult() + " for SET operation");
throw new RuntimeException(String.format("DynoJedis: value %s for SET operation is NOT VALID", key));
}
return result.getResult();
}
/**
* Perform a bulk read operation
* @return a list of response codes
* @throws Exception
*/
public List<String> readBulk(final List<String> keys) throws Exception {
throw new UnsupportedOperationException("bulk operation is not supported");
}
/**
* Perform a bulk write operation
* @return a list of response codes
* @throws Exception
*/
public List<String> writeBulk(final List<String> keys) throws Exception {
throw new UnsupportedOperationException("bulk operation is not supported");
}
/**
*
*/
@Override
public void shutdown() throws Exception {
if (jedisClient.get() != null) {
jedisClient.get().stopClient();
jedisClient.set(null);
}
}
/**
* shutdown the client
*/
@Override
public String getConnectionInfo() throws Exception {
return String.format("DynoJedisPlugin - ConnectionInfo ::Cluster Name - %s", ClusterName);
}
@Override
public String runWorkFlow() throws Exception {
return null;
}
} | 9,123 |
0 | Create_ds/ndbench/ndbench-cli/src/main/java/com/netflix/ndbench | Create_ds/ndbench/ndbench-cli/src/main/java/com/netflix/ndbench/cli/NdbenchCli.java | /*
* Copyright 2018 Netflix
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.cli;
import com.google.inject.Injector;
import com.netflix.ndbench.cli.config.CliConfigs;
import com.netflix.ndbench.core.NdBenchClientFactory;
import com.netflix.ndbench.core.NdBenchDriver;
import com.netflix.ndbench.core.config.GuiceInjectorProvider;
import com.netflix.ndbench.core.util.LoadPattern;
import org.slf4j.LoggerFactory;
/**
* This class is a CLI entry point to facilitate quick testing of the Netflix Data Benchmark (NdBench).
* In particular, this class does not require deploying a WAR to Tomcat to run the benchmark.
* Nor does this class require running a Context in a web container.
*
* @author Alexander Patrikalakis
*/
public class NdbenchCli {
private static final org.slf4j.Logger logger = LoggerFactory.getLogger(NdbenchCli.class);
public static void main(final String[] argv) {
Injector injector = new GuiceInjectorProvider().getInjector(new CliModule());
CliConfigs cliConfigs = injector.getInstance(CliConfigs.class);
NdBenchDriver driver = injector.getInstance(NdBenchDriver.class);
try {
driver.init(injector.getInstance(NdBenchClientFactory.class).getClient(cliConfigs.getClientName()));
long millisToWait = Integer.valueOf(cliConfigs.getCliTimeoutMillis());
logger.info("Starting driver in CLI with loadPattern=" + cliConfigs.getLoadPattern()
+ ", windowSize=" + cliConfigs.getWindowSize()
+ ", windowDurationInSec=" + cliConfigs.getWindowDurationInSec()
+ ", bulkSize=" + cliConfigs.getBulkSize()
+ ", timeout(ms)=" + (millisToWait == 0L ? "no timeout" : cliConfigs.getCliTimeoutMillis())
+ ", clientName=" + cliConfigs.getClientName());
driver.start(
LoadPattern.fromString(cliConfigs.getLoadPattern()),
Integer.valueOf(cliConfigs.getWindowSize()),
Integer.valueOf(cliConfigs.getWindowDurationInSec()),
Integer.valueOf(cliConfigs.getBulkSize())
);
if (millisToWait > 0) {
logger.info("Waiting " + millisToWait + " ms for reads and writes to finish");
Thread.sleep(millisToWait); //blocking
logger.info("Waited " + millisToWait + " ms for reads and writes to finish. Stopping driver.");
driver.stop(); //blocking
logger.info("Stopped driver");
System.exit(0);
}
} catch(Exception e) {
logger.error("Encountered an exception when driving load", e);
System.exit(-1);
}
}
}
| 9,124 |
0 | Create_ds/ndbench/ndbench-cli/src/main/java/com/netflix/ndbench | Create_ds/ndbench/ndbench-cli/src/main/java/com/netflix/ndbench/cli/CliModule.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.cli;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPluginGuiceModule;
import com.netflix.ndbench.cli.config.CliConfigs;
/**
* This Module allows Guice to inject archaius configuration for the CLI
*
* @author Alexander Patrikalakis
*/
@NdBenchClientPluginGuiceModule
public class CliModule extends AbstractModule {
@Override
protected void configure() {
}
@Provides
CliConfigs getCliConfigs(ConfigProxyFactory factory) {
return factory.newProxy(CliConfigs.class);
}
}
| 9,125 |
0 | Create_ds/ndbench/ndbench-cli/src/main/java/com/netflix/ndbench/cli | Create_ds/ndbench/ndbench-cli/src/main/java/com/netflix/ndbench/cli/config/CliConfigs.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.cli.config;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
import com.netflix.archaius.api.annotations.PropertyName;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
/**
* This class contains configuration for the CLI.
* The CLI can pull this configuration from environment variables or from system properties.
*
* @author Alexander Patrikalakis
*/
@Configuration(prefix = NdBenchConstants.PROP_NAMESPACE + "cli")
public interface CliConfigs {
@PropertyName(name = "bulkSize")
@DefaultValue("1")
String getBulkSize();
@PropertyName(name = "timeoutMillis")
@DefaultValue("0")
String getCliTimeoutMillis();
@PropertyName(name = "loadPattern")
@DefaultValue("random")
String getLoadPattern();
@PropertyName(name = "windowSize")
@DefaultValue("-1")
String getWindowSize();
@PropertyName(name = "windowDurationInSec")
@DefaultValue("-1")
String getWindowDurationInSec();
@PropertyName(name = "clientName")
@DefaultValue("InMemoryTest")
String getClientName();
}
| 9,126 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/org/libex | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/hamcrest/IsThrowable.java | package org.libex.hamcrest;
import org.hamcrest.BaseMatcher;
import org.hamcrest.CoreMatchers;
import org.hamcrest.Description;
import org.hamcrest.Matcher;
import org.hamcrest.Matchers;
import javax.annotation.Nullable;
import javax.annotation.ParametersAreNonnullByDefault;
import javax.annotation.concurrent.ThreadSafe;
import java.util.Optional;
@ParametersAreNonnullByDefault
@ThreadSafe
public class IsThrowable<T extends Throwable> extends BaseMatcher<Object> {
public static <T extends Throwable> IsThrowable<T> isThrowableOfType(
Class<T> type) {
return new IsThrowable<>(Optional.of(type), Optional.empty());
}
public static IsThrowable<Throwable> isThrowableWithMessage(String message) {
return new IsThrowable<>(Optional.empty(),
Optional.of(message));
}
public static <T extends Throwable> IsThrowable<T> isThrowable(
Class<T> type, String message) {
return new IsThrowable<>(Optional.of(type), Optional.of(message));
}
@Nullable
private final Matcher<Object> type;
@Nullable
private final Matcher<String> message;
private IsThrowable(Optional<Class<T>> type, Optional<String> message) {
super();
this.type = type.map(CoreMatchers::instanceOf).orElse(null);
this.message = message.map(Matchers::containsString).orElse(null);
}
@Override
public void describeTo(Description description) {
description.appendText("A throwable");
if (type != null) {
description.appendText(" of type matching ");
description.appendDescriptionOf(type);
}
if (message != null) {
if (type != null)
description.appendText("and");
description.appendText(" with message matching ");
description.appendDescriptionOf(message);
}
}
@Override
public boolean matches(Object arg0) {
boolean result = arg0 instanceof Throwable;
if (result) {
Throwable t = (Throwable) arg0;
if (type != null) {
result &= type.matches(t);
}
if (message != null) {
if (arg0 == null)
result = false;
else
result &= message.matches(t.getMessage());
}
}
return result;
}
}
| 9,127 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/org/libex | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/test/TestBase.java | package org.libex.test;
import org.junit.Rule;
import org.junit.rules.ExpectedException;
import org.libex.test.google.NullPointerTester;
import org.libex.test.rules.CheckableErrorCollector;
import javax.annotation.Nullable;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.CoreMatchers.sameInstance;
/**
* JUnit test base class.
*
* @author John Butler
*/
public abstract class TestBase {
protected NullPointerTester nullPointerTester = new NullPointerTester();
@Rule
public ExpectedException expectedException = ExpectedException.none();
@Rule
public CheckableErrorCollector errorCollector = new CheckableErrorCollector();
/**
* Sets the {@code expectedException} to expect an exception of the provided {@code type} and with a superstring of
* the provided {@code substring}
*
* @param type
* the type of Exception to expect
* @param substring
* a substring of the exception message to expect
*/
protected void expectException(final Class<? extends Throwable> type, @Nullable final String substring) {
expectedException.expect(type);
if (substring != null) {
expectedException.expectMessage(substring);
}
}
protected void expectException(final Class<? extends Throwable> type, @Nullable final String substring, final Exception cause) {
expectException(type, substring);
expectedException.expectCause(sameInstance(cause));
}
@SuppressWarnings({ "unchecked", "rawtypes" })
protected void expectException(final Class<? extends Throwable> type, @Nullable final String substring,
final Class<? extends Throwable> cause) {
expectException(type, substring);
expectedException.expectCause(instanceOf(cause));
}
}
| 9,128 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/test | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/test/google/NullPointerTester.java | package org.libex.test.google;
import com.google.common.base.Converter;
import com.google.common.base.Objects;
import com.google.common.collect.*;
import com.google.common.reflect.Invokable;
import com.google.common.reflect.Parameter;
import com.google.common.reflect.Reflection;
import com.google.common.reflect.TypeToken;
import com.google.common.testing.ArbitraryInstances;
import com.google.common.testing.NullPointerTester.Visibility;
import junit.framework.Assert;
import junit.framework.AssertionFailedError;
import javax.annotation.Nullable;
import java.lang.reflect.*;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.ConcurrentMap;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* This is a modified version of {@link com.google.common.testing.NullPointerTester} to enable configuration
* of the acceptable exception types thrown when {@code null} is provided to a
* constructor or method
*
* @see com.google.common.testing.NullPointerTester
*/
public class NullPointerTester {
private final ClassToInstanceMap<Object> defaults =
MutableClassToInstanceMap.create();
private final List<Member> ignoredMembers = Lists.newArrayList();
private ExceptionTypePolicy policy = ExceptionTypePolicy.NPE_OR_UOE;
public <T> NullPointerTester setDefault(final Class<T> type, final T value) {
defaults.putInstance(type, checkNotNull(value));
return this;
}
public NullPointerTester ignore(final Method method) {
ignoredMembers.add(checkNotNull(method));
return this;
}
public NullPointerTester policy(final ExceptionTypePolicy policy) {
this.policy = policy;
return this;
}
public void testConstructors(
final Class<?> c,
final Visibility minimalVisibility)
{
for (Constructor<?> constructor : c.getDeclaredConstructors()) {
if (convert(minimalVisibility).isVisible(constructor) && !isIgnored(constructor)) {
testConstructor(constructor);
}
}
}
private VisibilityLocal convert(
final Visibility visibility)
{
return VisibilityLocal.valueOf(visibility.name());
}
public void testAllPublicConstructors(final Class<?> c) {
testConstructors(c, Visibility.PUBLIC);
}
public void testStaticMethods(final Class<?> c, final Visibility minimalVisibility) {
for (Method method : convert(minimalVisibility).getStaticMethods(c)) {
if (!isIgnored(method)) {
testMethod(null, method);
}
}
}
public void testAllPublicStaticMethods(final Class<?> c) {
testStaticMethods(c, Visibility.PUBLIC);
}
public void testInstanceMethods(final Object instance, final Visibility minimalVisibility) {
for (Method method : getInstanceMethodsToTest(instance.getClass(), minimalVisibility)) {
testMethod(instance, method);
}
}
ImmutableList<Method> getInstanceMethodsToTest(final Class<?> c, final Visibility minimalVisibility) {
ImmutableList.Builder<Method> builder = ImmutableList.builder();
for (Method method : convert(minimalVisibility).getInstanceMethods(c)) {
if (!isIgnored(method)) {
builder.add(method);
}
}
return builder.build();
}
public void testAllPublicInstanceMethods(final Object instance) {
testInstanceMethods(instance, Visibility.PUBLIC);
}
public void testMethod(@Nullable final Object instance, final Method method) {
Class<?>[] types = method.getParameterTypes();
for (int nullIndex = 0; nullIndex < types.length; nullIndex++) {
testMethodParameter(instance, method, nullIndex);
}
}
public void testConstructor(final Constructor<?> ctor) {
Class<?> declaringClass = ctor.getDeclaringClass();
checkArgument(Modifier.isStatic(declaringClass.getModifiers())
|| declaringClass.getEnclosingClass() == null,
"Cannot test constructor of non-static inner class: %s", declaringClass.getName());
Class<?>[] types = ctor.getParameterTypes();
for (int nullIndex = 0; nullIndex < types.length; nullIndex++) {
testConstructorParameter(ctor, nullIndex);
}
}
public void testMethodParameter(
@Nullable final Object instance, final Method method, final int paramIndex) {
method.setAccessible(true);
testParameter(instance, invokable(instance, method), paramIndex, method.getDeclaringClass());
}
public void testConstructorParameter(final Constructor<?> ctor, final int paramIndex) {
ctor.setAccessible(true);
testParameter(null, Invokable.from(ctor), paramIndex, ctor.getDeclaringClass());
}
/** Visibility of any method or constructor. */
public enum VisibilityLocal {
PACKAGE {
@Override boolean isVisible(final int modifiers) {
return !Modifier.isPrivate(modifiers);
}
},
PROTECTED {
@Override boolean isVisible(final int modifiers) {
return Modifier.isPublic(modifiers) || Modifier.isProtected(modifiers);
}
},
PUBLIC {
@Override boolean isVisible(final int modifiers) {
return Modifier.isPublic(modifiers);
}
};
abstract boolean isVisible(final int modifiers);
final boolean isVisible(final Member member) {
return isVisible(member.getModifiers());
}
final Iterable<Method> getStaticMethods(final Class<?> cls) {
ImmutableList.Builder<Method> builder = ImmutableList.builder();
for (Method method : getVisibleMethods(cls)) {
if (Invokable.from(method).isStatic()) {
builder.add(method);
}
}
return builder.build();
}
final Iterable<Method> getInstanceMethods(final Class<?> cls) {
ConcurrentMap<Signature, Method> map = Maps.newConcurrentMap();
for (Method method : getVisibleMethods(cls)) {
if (!Invokable.from(method).isStatic()) {
map.putIfAbsent(new Signature(method), method);
}
}
return map.values();
}
private ImmutableList<Method> getVisibleMethods(final Class<?> cls) {
// Don't use cls.getPackage() because it does nasty things like reading
// a file.
String visiblePackage = Reflection.getPackageName(cls);
ImmutableList.Builder<Method> builder = ImmutableList.builder();
for (Class<?> type : TypeToken.of(cls).getTypes().classes().rawTypes()) {
if (!Reflection.getPackageName(type).equals(visiblePackage)) {
break;
}
for (Method method : type.getDeclaredMethods()) {
if (!method.isSynthetic() && isVisible(method)) {
builder.add(method);
}
}
}
return builder.build();
}
}
// TODO(benyu): Use labs/reflect/Signature if it graduates.
private static final class Signature {
private final String name;
private final ImmutableList<Class<?>> parameterTypes;
Signature(final Method method) {
this(method.getName(), ImmutableList.copyOf(method.getParameterTypes()));
}
Signature(final String name, final ImmutableList<Class<?>> parameterTypes) {
this.name = name;
this.parameterTypes = parameterTypes;
}
@Override public boolean equals(final Object obj) {
if (obj instanceof Signature) {
Signature that = (Signature) obj;
return name.equals(that.name)
&& parameterTypes.equals(that.parameterTypes);
}
return false;
}
@Override public int hashCode() {
return Objects.hashCode(name, parameterTypes);
}
}
private void testParameter(final Object instance, final Invokable<?, ?> invokable,
final int paramIndex, final Class<?> testedClass) {
if (isPrimitiveOrNullable(invokable.getParameters().get(paramIndex))) {
return; // there's nothing to test
}
Object[] params = buildParamList(invokable, paramIndex);
try {
@SuppressWarnings("unchecked") // We'll get a runtime exception if the type is wrong.
Invokable<Object, ?> unsafe = (Invokable<Object, ?>) invokable;
unsafe.invoke(instance, params);
Assert.fail("No exception thrown for parameter at index " + paramIndex
+ " from " + invokable + Arrays.toString(params) + " for " + testedClass);
} catch (InvocationTargetException e) {
Throwable cause = e.getCause();
if (policy.isExpectedType(cause)) {
return;
}
AssertionFailedError error = new AssertionFailedError(
"wrong exception thrown from " + invokable + ": " + cause);
error.initCause(cause);
throw error;
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
private Object[] buildParamList(final Invokable<?, ?> invokable, final int indexOfParamToSetToNull) {
ImmutableList<Parameter> params = invokable.getParameters();
Object[] args = new Object[params.size()];
for (int i = 0; i < args.length; i++) {
Parameter param = params.get(i);
if (i != indexOfParamToSetToNull) {
args[i] = getDefaultValue(param.getType());
Assert.assertTrue(
"Can't find or create a sample instance for type '"
+ param.getType()
+ "'; please provide one using NullPointerTester.setDefault()",
args[i] != null || isNullable(param));
}
}
return args;
}
private <T> T getDefaultValue(final TypeToken<T> type) {
// We assume that all defaults are generics-safe, even if they aren't,
// we take the risk.
@SuppressWarnings("unchecked")
T defaultValue = (T) defaults.getInstance(type.getRawType());
if (defaultValue != null) {
return defaultValue;
}
@SuppressWarnings("unchecked") // All arbitrary instances are generics-safe
T arbitrary = (T) ArbitraryInstances.get(type.getRawType());
if (arbitrary != null) {
return arbitrary;
}
if (type.getRawType() == Class.class) {
// If parameter is Class<? extends Foo>, we return Foo.class
@SuppressWarnings("unchecked")
T defaultClass = (T) getFirstTypeParameter(type.getType()).getRawType();
return defaultClass;
}
if (type.getRawType() == TypeToken.class) {
// If parameter is TypeToken<? extends Foo>, we return TypeToken<Foo>.
@SuppressWarnings("unchecked")
T defaultType = (T) getFirstTypeParameter(type.getType());
return defaultType;
}
if (type.getRawType() == Converter.class) {
TypeToken<?> convertFromType = type.resolveType(
Converter.class.getTypeParameters()[0]);
TypeToken<?> convertToType = type.resolveType(
Converter.class.getTypeParameters()[1]);
@SuppressWarnings("unchecked") // returns default for both F and T
T defaultConverter = (T) defaultConverter(convertFromType, convertToType);
return defaultConverter;
}
if (type.getRawType().isInterface()) {
return newDefaultReturningProxy(type);
}
return null;
}
private <F, T> Converter<F, T> defaultConverter(
final TypeToken<F> convertFromType, final TypeToken<T> convertToType) {
return new Converter<F, T>() {
@Override protected T doForward(final F a) {
return doConvert(convertToType);
}
@Override protected F doBackward(final T b) {
return doConvert(convertFromType);
}
private /*static*/ <S> S doConvert(final TypeToken<S> type) {
return checkNotNull(getDefaultValue(type));
}
};
}
private static TypeToken<?> getFirstTypeParameter(final Type type) {
if (type instanceof ParameterizedType) {
return TypeToken.of(
((ParameterizedType) type).getActualTypeArguments()[0]);
} else {
return TypeToken.of(Object.class);
}
}
private <T> T newDefaultReturningProxy(final TypeToken<T> type) {
return new DummyProxy() {
@Override <R> R dummyReturnValue(final TypeToken<R> returnType) {
return getDefaultValue(returnType);
}
}.newProxy(type);
}
private static Invokable<?, ?> invokable(@Nullable final Object instance, final Method method) {
if (instance == null) {
return Invokable.from(method);
} else {
return TypeToken.of(instance.getClass()).method(method);
}
}
static boolean isPrimitiveOrNullable(final Parameter param) {
return param.getType().getRawType().isPrimitive() || isNullable(param);
}
private static boolean isNullable(final Parameter param) {
return param.isAnnotationPresent(Nullable.class);
}
private boolean isIgnored(final Member member) {
return member.isSynthetic() || ignoredMembers.contains(member);
}
/**
* Strategy for exception type matching used by {@link NullPointerTester}.
*/
public enum ExceptionTypePolicy {
/**
* Exceptions should be {@link NullPointerException} or
* {@link UnsupportedOperationException}.
*/
NPE_OR_UOE() {
@Override
public boolean isExpectedType(final Throwable cause) {
return cause instanceof NullPointerException
|| cause instanceof UnsupportedOperationException;
}
},
/**
* Exceptions should be {@link NullPointerException},
* {@link IllegalArgumentException}, or
* {@link UnsupportedOperationException}.
*/
NPE_IAE_OR_UOE() {
@Override
public boolean isExpectedType(final Throwable cause) {
return cause instanceof NullPointerException
|| cause instanceof IllegalArgumentException
|| cause instanceof UnsupportedOperationException;
}
};
public abstract boolean isExpectedType(final Throwable cause);
}
}
| 9,129 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/test | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/test/google/DummyProxy.java | package org.libex.test.google;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Sets;
import com.google.common.reflect.AbstractInvocationHandler;
import com.google.common.reflect.Invokable;
import com.google.common.reflect.Parameter;
import com.google.common.reflect.TypeToken;
import javax.annotation.Nullable;
import java.io.Serializable;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.Set;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* This is a direct copy of {@code com.google.common.test.DummyProxy} from version guava-testlib 18.0
*/
abstract class DummyProxy {
/**
* Returns a new proxy for {@code interfaceType}. Proxies of the same interface are equal to each
* other if the {@link DummyProxy} instance that created the proxies are equal.
*/
final <T> T newProxy(TypeToken<T> interfaceType) {
Set<Class<?>> interfaceClasses = Sets.newLinkedHashSet();
interfaceClasses.addAll(interfaceType.getTypes().interfaces().rawTypes());
// Make the proxy serializable to work with SerializableTester
interfaceClasses.add(Serializable.class);
Object dummy = Proxy.newProxyInstance(
interfaceClasses.iterator().next().getClassLoader(),
interfaceClasses.toArray(new Class<?>[interfaceClasses.size()]),
new DummyHandler(interfaceType));
@SuppressWarnings("unchecked") // interfaceType is T
T result = (T) dummy;
return result;
}
/** Returns the dummy return value for {@code returnType}. */
abstract <R> R dummyReturnValue(TypeToken<R> returnType);
private class DummyHandler extends AbstractInvocationHandler implements Serializable {
private final TypeToken<?> interfaceType;
DummyHandler(TypeToken<?> interfaceType) {
this.interfaceType = interfaceType;
}
@Override protected Object handleInvocation(
Object proxy, Method method, Object[] args) {
Invokable<?, ?> invokable = interfaceType.method(method);
ImmutableList<Parameter> params = invokable.getParameters();
for (int i = 0; i < args.length; i++) {
Parameter param = params.get(i);
if (!param.isAnnotationPresent(Nullable.class)) {
checkNotNull(args[i]);
}
}
return dummyReturnValue(interfaceType.resolveType(method.getGenericReturnType()));
}
@Override public int hashCode() {
return identity().hashCode();
}
@Override public boolean equals(Object obj) {
if (obj instanceof DummyHandler) {
DummyHandler that = (DummyHandler) obj;
return identity().equals(that.identity());
} else {
return false;
}
}
private DummyProxy identity() {
return DummyProxy.this;
}
@Override public String toString() {
return "Dummy proxy for " + interfaceType;
}
// Since type variables aren't serializable, reduce the type down to raw type before
// serialization.
private Object writeReplace() {
return new DummyHandler(TypeToken.of(interfaceType.getRawType()));
}
}
}
| 9,130 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/test | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/test/rules/CheckableErrorCollector.java | package org.libex.test.rules;
import org.junit.rules.ErrorCollector;
import org.junit.runners.model.MultipleFailureException;
import javax.annotation.ParametersAreNonnullByDefault;
import javax.annotation.concurrent.ThreadSafe;
import java.util.List;
import static com.google.common.collect.Lists.newArrayList;
@ParametersAreNonnullByDefault
@ThreadSafe
public class CheckableErrorCollector extends ErrorCollector {
private List<Throwable> errors = newArrayList();
public CheckableErrorCollector() {
}
public boolean containsErrors() {
return !errors.isEmpty();
}
public boolean doesNotContainErrors() {
return errors.isEmpty();
}
@Override
public void verify() throws Throwable {
MultipleFailureException.assertEmpty(errors);
super.verify();
}
@Override
public void addError(Throwable error) {
errors.add(error);
super.addError(error);
}
}
| 9,131 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/test/logging | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/test/logging/log4j/Log4jCapturer.java | package org.libex.test.logging.log4j;
import com.google.common.collect.ImmutableList;
import org.apache.log4j.Appender;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Layout;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.Priority;
import org.apache.log4j.spi.LoggingEvent;
import org.hamcrest.CoreMatchers;
import org.hamcrest.Matcher;
import org.hamcrest.MatcherAssert;
import org.hamcrest.Matchers;
import org.hamcrest.StringDescription;
import org.hamcrest.collection.IsIterableWithSize;
import org.hamcrest.core.IsAnything;
import org.junit.rules.TestRule;
import org.junit.runner.Description;
import org.junit.runners.model.Statement;
import org.libex.hamcrest.IsThrowable;
import org.libex.logging.log4j.InMemoryAppender;
import org.libex.logging.log4j.LoggingEventsEx;
import javax.annotation.ParametersAreNonnullByDefault;
import javax.annotation.concurrent.NotThreadSafe;
import java.util.List;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.collect.Lists.newArrayList;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.libex.logging.log4j.LoggingEventsEx.toMessage;
/**
*
* Source: https://raw.githubusercontent.com/dancerjohn/LibEx/master/testlibex/src/main/java/org/libex/test/logging/log4j/Log4jCapturer.java
* Not yet available in standard maven repo's, so including source here.
*
*
* Rule that allows for capturing Log4J logging for test verification.
*
* @author John Butler
*
*/
@NotThreadSafe
@ParametersAreNonnullByDefault
public class Log4jCapturer implements TestRule {
private static final String DEFAULT_LAYOUT = "%d{DATE} %5p %C{1}.%M(),%L - %m%n";
private static final String APPENDER_NAME = "Log4jCapturerAppender";
/**
* @return new capturer builder
*/
public static Log4jCapturerBuilder builder() {
return new Log4jCapturerBuilder();
}
/**
* Builder for {@link Log4jCapturer}
*/
public static class Log4jCapturerBuilder {
// private Logger logger = Logger.getRootLogger();
private List<Logger> loggers = newArrayList();
private Level threshold = Level.INFO;
private Layout layout = new PatternLayout(DEFAULT_LAYOUT);
/**
* Sets the logging threshold for messages that should be recorded. This
* is set as the threshold on the created {@link Appender}
*
* @param threshold
* the lowest level of messages that should be held
* @return this instance
*
* @see AppenderSkeleton#setThreshold(Priority)
*/
public Log4jCapturerBuilder setThreshold(final Level threshold) {
this.threshold = threshold;
return this;
}
/**
* Sets the logging layout for message that are recorded. This is set as
* the layout on the created {@link Appender}
*
* @param layout
* the layout to set
* @return this instance
*
* @see AppenderSkeleton#setLayout(Layout)
*/
public Log4jCapturerBuilder setLayout(final Layout layout) {
this.layout = layout;
return this;
}
/**
* Add the logger for messages that are recorded.
*
* @param logger
* the logger to add
* @return this instance
*/
public Log4jCapturerBuilder addLogger(
final String logger)
{
this.loggers.add(Logger.getLogger(logger));
return this;
}
/**
* @return a new {@link Log4jCapturer}
*/
public Log4jCapturer build() {
return new Log4jCapturer(threshold, layout, loggers);
}
}
private final InMemoryAppender appender;
private final List<Logger> loggers;
private Log4jCapturer(
final Level threshold,
final Layout layout,
final List<Logger> loggers) {
appender = new InMemoryAppender();
appender.setThreshold(threshold);
appender.setLayout(layout);
appender.setName(APPENDER_NAME);
this.loggers = (loggers.isEmpty()) ? newArrayList(Logger.getRootLogger()) : ImmutableList.copyOf(loggers);
for (Logger logger : this.loggers) {
logger.setLevel(threshold);
}
}
/*
* (non-Javadoc)
*
* @see org.junit.rules.TestRule#apply(org.junit.runners.model.Statement,
* org.junit.runner.Description)
*/
@Override
public Statement apply(final Statement statement, final Description description) {
return new Statement() {
@Override
public void evaluate() throws Throwable {
addAppender();
try {
statement.evaluate();
} finally {
removeAppender();
}
}
};
}
private void addAppender() {
appender.clear();
for (Logger logger : loggers) {
logger.addAppender(appender);
}
}
private void removeAppender() {
try {
appender.clear();
for (Logger logger : loggers) {
logger.removeAppender(appender);
}
} catch (RuntimeException e) {
e.printStackTrace();
}
}
/**
* Clears the list of currently recorded logs.
*/
public void clearLog() {
appender.clear();
}
/**
* Gets the list of logs that matches the passed assertion
*
* @param assertion
* the filter by which to retrieve logs
* @return an unmodifiable Iterable over the list of logs that match the
* passed assertion
*/
private Stream<LoggingEvent> filter(final Predicate<LoggingEvent> assertion) {
return appender.getLoggingEvents().stream().filter(assertion);
}
/**
* Gets the list of logs that matches the passed assertion
*
* @param assertion
* the filter by which to retrieve logs
* @return an unmodifiable Iterable over the list of logs that match the
* passed assertion
*/
private Stream<LoggingEvent> getLogs(final LogAssertion assertion) {
return filter(assertion.criteria());
}
/**
* Gets the list of log messages for the logs that match the passed assertion
*
* @param assertion
* the filter by which to retrieve logs
* @return an unmodifiable Iterable over the list of log messages for logs
* that match the passed assertion
*/
public Iterable<String> getLogMessages(final LogAssertion assertion) {
return getLogs(assertion).map(toMessage()).collect(Collectors.toList());
}
/**
* Asserts the passed assertion
*
* @param assertion
* the logging assertion to verify
*/
public void assertThat(final LogAssertion assertion) {
List<LoggingEvent> logs = appender.getLoggingEvents();
if (assertion.times <=1 ) {
LoggingEvent event = logs.stream().filter(assertion.criteria()).findFirst().orElse(null);
Matcher<Object> matcher = (assertion.logged) ? notNullValue()
: nullValue();
MatcherAssert.assertThat(assertion.toString(), event, matcher);
} else {
MatcherAssert.assertThat(assertion.toString(),
logs.stream().filter(assertion.criteria()).collect(Collectors.toList()),
IsIterableWithSize.iterableWithSize(assertion.times));
}
}
/**
* Asserts that the passed substring was logged at the passed level
*
* @param level
* the expected level
* @param substring
* the expected substring
*/
public void assertRenderedMessageLogged(final Level level, final String substring) {
assertThat(LogAssertion.newLogAssertion()
.isLogged()
.withLevel(level)
.withRenderedMessage(substring));
}
/**
* A LoggingEvent assertion
*/
public static class LogAssertion {
/**
* @return a new empty assertion with default values
*/
public static LogAssertion newLogAssertion() {
return new LogAssertion();
}
private boolean logged = true;
private int times = 1;
private Matcher<? super Level> level = Matchers.anything();
private Matcher<? super String> message = Matchers.anything();
private Matcher<?> exception = Matchers.anything();
/**
* Sets the assertion to expect the message to be logged. This method
* should be used in conjunction with one of the other {@code withX} methods. This method is mutually exclusive
* with {@link #isNotLogged()}
*
* @return this instance
*/
public LogAssertion isLogged() {
return isLogged(1);
}
/**
* Sets the assertion to expect the message to be logged. This method
* should be used in conjunction with one of the other {@code withX} methods. This method is mutually exclusive
* with {@link #isNotLogged()}
*
* @param times
* the number of times to expect the message to be logged.
* Values 0 or greater are valid, If 0, will cause the
* expectation that the message was NOT logged
* @return this instance
*/
public LogAssertion isLogged(final int times) {
checkArgument(times >= 0);
if (times == 0) {
return isNotLogged();
} else {
this.logged = true;
this.times = times;
return this;
}
}
/**
* Sets the assertion to expect the message to NOT be logged. This
* method should be used in conjunction with one of the other {@code withX} methods. This method is mutually
* exclusive with {@link #isLogged()}
*
* @return this instance
*/
public LogAssertion isNotLogged() {
this.logged = false;
this.times = 0;
return this;
}
/**
* Sets the assertion to expect the message to have the passed {@code level}. The use of this method is
* sufficient to assert a
* message is logged. No other method calls are required, other than the
* call to {@link Log4jCapturer#assertThat(LogAssertion)}.
*
* @param level
* the level to expect
* @return this instance
*/
public LogAssertion withLevel(final Level level) {
return withLevel(Matchers.equalTo(level));
}
/**
* Sets the assertion to expect the message to have a level that matches
* the passed {@code level}. The use of this method is sufficient to
* assert a message is logged. No other method calls are required, other
* than the call to {@link Log4jCapturer#assertThat(LogAssertion)}.
*
* @param level
* the level to expect
* @return this instance
*/
public LogAssertion withLevel(final Matcher<? super Level> level) {
this.level = level;
return this;
}
/**
* Sets the assertion to expect the rendered (formatted) message to have
* a message that is super-string of the passed {@code substring}. The
* use of this method is sufficient to assert a message is logged. No
* other method calls are required, other than the call to {@link Log4jCapturer#assertThat(LogAssertion)}.
*
* @param substring
* the message to expect
* @return this instance
*/
public LogAssertion withRenderedMessage(final String substring) {
return withRenderedMessage(Matchers.containsString(substring));
}
/**
* Sets the assertion to expect the rendered (formatted) message to
* match the passed {@code message}. The use of this method is
* sufficient to assert a message is logged. No other method calls are
* required, other than the call to {@link Log4jCapturer#assertThat(LogAssertion)}.
*
* @param message
* the message to expect
* @return this instance
*/
public LogAssertion withRenderedMessage(final Matcher<? super String> message) {
this.message = message;
return this;
}
/**
* Sets the assertion to expect the logging event to contain an
* exception that matches the passed {@code exception}. The use of this
* method is sufficient to assert a message is logged. No other method
* calls are required, other than the call to {@link Log4jCapturer#assertThat(LogAssertion)}.
*
* @param exception
* the exception matcher, consider {@link IsThrowable}
* @return this instance
*/
public LogAssertion withException(final Class<? extends Throwable> exception) {
return withException(CoreMatchers.instanceOf(exception));
}
/**
* Sets the assertion to expect the logging event to contain an
* exception that matches the passed {@code exception}. The use of this
* method is sufficient to assert a message is logged. No other method
* calls are required, other than the call to {@link Log4jCapturer#assertThat(LogAssertion)}.
*
* @param exception
* the exception matcher, consider {@link IsThrowable}
* @return this instance
*/
public LogAssertion withException(final Matcher<?> exception)
{
this.exception = exception;
return this;
}
@SuppressWarnings("unchecked")
private Predicate<LoggingEvent> criteria() {
return LoggingEventsEx.withLevel(level)
.and(LoggingEventsEx.withRenderedMessage(message))
.and(LoggingEventsEx.withThrowable(exception));
}
@Override
public String toString() {
org.hamcrest.Description description = new StringDescription();
if (logged) {
description.appendText("Message logged");
} else {
description.appendText("No message logged");
}
if (notIsAnything(level)) {
description.appendText(" with level ");
description.appendDescriptionOf(level);
}
if (notIsAnything(message)) {
description.appendText(" with message ");
description.appendDescriptionOf(message);
}
if (notIsAnything(exception)) {
description.appendText(" with exception ");
description.appendDescriptionOf(exception);
}
return description.toString();
}
private boolean notIsAnything(final Matcher<?> matcher) {
return !(matcher instanceof IsAnything);
}
}
}
| 9,132 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/logging | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/logging/log4j/InMemoryAppender.java | package org.libex.logging.log4j;
import com.google.common.collect.ImmutableList;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.spi.LoggingEvent;
import javax.annotation.ParametersAreNonnullByDefault;
import javax.annotation.concurrent.ThreadSafe;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* Appender that maintains the logging events in memory.
*
* @author John Butler
*/
@ThreadSafe
@ParametersAreNonnullByDefault
public class InMemoryAppender extends AppenderSkeleton {
private final ReentrantReadWriteLock rwLock = new ReentrantReadWriteLock();
private final Lock readLock = rwLock.readLock();
private final Lock writeLock = rwLock.writeLock();
private volatile ImmutableList.Builder<LoggingEvent> eventListBuilder = ImmutableList
.builder();
@Override
public void close() {
// NO OP
}
@Override
public boolean requiresLayout() {
return false;
}
@Override
protected void append(LoggingEvent event) {
writeLock.lock();
try {
eventListBuilder.add(event);
} finally {
writeLock.unlock();
}
}
/**
* @return the observed logging events
*/
public ImmutableList<LoggingEvent> getLoggingEvents() {
readLock.lock();
try {
return eventListBuilder.build();
} finally {
readLock.unlock();
}
}
/**
* Clears the list of observed logging events
*/
public void clear() {
writeLock.lock();
try {
eventListBuilder = ImmutableList.builder();
} finally {
writeLock.unlock();
}
}
}
| 9,133 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/logging | Create_ds/ndbench/ndbench-core/src/test/java/org/libex/logging/log4j/LoggingEventsEx.java | package org.libex.logging.log4j;
import org.apache.log4j.Level;
import org.apache.log4j.spi.LoggingEvent;
import org.hamcrest.Matcher;
import org.hamcrest.Matchers;
import org.libex.hamcrest.IsThrowable;
import javax.annotation.ParametersAreNonnullByDefault;
import javax.annotation.concurrent.NotThreadSafe;
import java.util.function.Function;
import java.util.function.Predicate;
import static com.google.common.base.Preconditions.checkNotNull;
/**
* Utilities on {@link LoggingEvent}
*
* @author John Butler
*
*/
@NotThreadSafe
@ParametersAreNonnullByDefault
public final class LoggingEventsEx {
/**
* Creates a {@link Predicate} that matches a {@link LoggingEvent} that has
* the specified level
*
* @param level
* the level to match
* @return a {@link Predicate} that matches a {@link LoggingEvent} that has
* the specified level
*/
public static Predicate<LoggingEvent> withLevel(final Level level) {
checkNotNull(level);
return withLevel(Matchers.equalTo(level));
}
/**
* Creates a {@link Predicate} that matches a {@link LoggingEvent} whose
* level matches the passed matcher
*
* @param matcher
* the matcher to use
* @return a {@link Predicate} that matches a {@link LoggingEvent} whose
* level matches the passed matcher
*/
public static Predicate<LoggingEvent> withLevel(
final Matcher<? super Level> matcher) {
checkNotNull(matcher);
return event -> event != null && matcher.matches(event.getLevel());
}
public static Predicate<LoggingEvent> withRenderedMessage(
final String message) {
checkNotNull(message);
return withRenderedMessage(Matchers.equalTo(message));
}
public static Predicate<LoggingEvent> withRenderedMessage(
final Matcher<? super String> matcher) {
return event -> event != null
&& matcher.matches(event.getRenderedMessage());
}
public static Predicate<LoggingEvent> withThrowable(
final Class<? extends Throwable> type) {
checkNotNull(type);
return withThrowable(IsThrowable.isThrowableOfType(type));
}
public static Predicate<LoggingEvent> withThrowable(
final Matcher<?> matcher)
{
return event -> event != null
&& matcher
.matches((event.getThrowableInformation() == null) ? null
: event.getThrowableInformation()
.getThrowable());
}
private static final Function<LoggingEvent, String> TO_MESSAGE = event -> (String) event.getMessage();
public static Function<LoggingEvent, String> toMessage() {
return TO_MESSAGE;
}
private LoggingEventsEx() {
}
}
| 9,134 |
0 | Create_ds/ndbench/ndbench-core/src/test/java | Create_ds/ndbench/ndbench-core/src/test/java/com/ConfigurationPropertiesTest.java | package com;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.archaius.test.TestPropertyOverride;
import com.netflix.governator.guice.test.ModulesForTesting;
import com.netflix.governator.guice.test.junit4.GovernatorJunit4ClassRunner;
import com.netflix.ndbench.core.config.IConfiguration;
import com.netflix.ndbench.core.defaultimpl.NdBenchGuiceModule;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import javax.inject.Inject;
/**
* Verifies that system properties may be used to set values returned by dynamic proxies generated from
* {@link com.netflix.ndbench.core.config.IConfiguration},using the namespace prefix "ndbench.config."
*
*/
@RunWith(GovernatorJunit4ClassRunner.class)
@ModulesForTesting({NdBenchGuiceModule.class, ArchaiusModule.class})
@TestPropertyOverride(value={"ndbench.config.numKeys=777" })
public class ConfigurationPropertiesTest {
@Inject
IConfiguration config;
@Test
public void testInvokingProcessMethodOnWriteOperationSetsNewRateLimit() throws Exception {
Assert.assertEquals(777, config.getNumKeys());
}
}
| 9,135 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/com/netflix/ndbench | Create_ds/ndbench/ndbench-core/src/test/java/com/netflix/ndbench/core/RPSCountTest.java | package com.netflix.ndbench.core;
import com.google.common.util.concurrent.RateLimiter;
import com.netflix.ndbench.api.plugin.NdBenchMonitor;
import com.netflix.ndbench.core.config.IConfiguration;
import org.apache.log4j.Level;
import org.junit.Rule;
import org.junit.Test;
import org.libex.test.TestBase;
import org.libex.test.logging.log4j.Log4jCapturer;
import org.libex.test.logging.log4j.Log4jCapturer.LogAssertion;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class RPSCountTest extends TestBase {
@Rule
public Log4jCapturer logCapturer = Log4jCapturer.builder().build();
@Test
public void testMessageLogged() {
// Note: readSuccess+readFail will be divided by stats update frequency of 10,
// and similarly for writeSuccess+writeFail
//
verifyLoggerActivity( // verify no logging if expected rate < observed rate
"Observed Read RPS",
false,
getRPSCount(
true, true, 9/*readRate*/, 1/*writeRate*/,
100/*readSuccess*/, 0/*readFail*/,
0/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify no logging if expected rate == observed rate
"Observed Read RPS",
false,
getRPSCount(
true, true, 9/*readRate*/, 1/*writeRate*/,
90/*readSuccess*/, 0/*readFail*/,
0/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify have logging if expected rate > observed rate
"Observed Read RPS",
true,
getRPSCount(
true, true, 9/*readRate*/, 1/*writeRate*/,
89/*readSuccess*/, 0/*readFail*/,
0/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify have logging if expected rate > observed rate
"Observed Read RPS",
false,
getRPSCount(
false, true, 9/*readRate*/, 1/*writeRate*/,
89/*readSuccess*/, 0/*readFail*/,
0/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify no logging if expected rate < observed rate
"Observed Write RPS",
false,
getRPSCount(
true, true, 1/*readRate*/, 9/*writeRate*/,
1/*readSuccess*/, 0/*readFail*/,
100/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify no logging if expected rate == observed rate
"Observed Write RPS",
false,
getRPSCount(
true, true, 1/*readRate*/, 9/*writeRate*/,
0/*readSuccess*/, 0/*readFail*/,
90/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify have logging if expected rate > observed rate
"Observed Write RPS",
true,
getRPSCount(
true, true, 1/*readRate*/, 9/*writeRate*/,
1/*readSuccess*/, 0/*readFail*/,
89/*writeSuccess*/, 0/*writeFail*/));
verifyLoggerActivity( // verify have logging if expected rate > observed rate
"Observed Write RPS",
false,
getRPSCount(
true, false, 1/*readRate*/, 9/*writeRate*/,
1/*readSuccess*/, 0/*readFail*/,
89/*writeSuccess*/, 0/*writeFail*/));
}
private void verifyLoggerActivity(String fragmentOfExpectedLoggedMsg,
boolean shouldBeLogged,
RPSCount counter) {
logCapturer.clearLog();
counter.updateRPS();
logCapturer.assertThat(LogAssertion.newLogAssertion()
.withLevel(Level.DEBUG).isNotLogged());
LogAssertion assertionTmp = LogAssertion.newLogAssertion()
.withLevel(Level.WARN).withRenderedMessage(fragmentOfExpectedLoggedMsg);
LogAssertion assertion;
if (shouldBeLogged) {
assertion = assertionTmp.isLogged();
} else {
assertion = assertionTmp.isNotLogged();
}
logCapturer.assertThat(assertion);
}
private RPSCount getRPSCount(boolean readsStarted,
boolean writesStarted,
double readRate,
double writeRate,
long readSuccess,
long readFailure,
long writeSuccess,
long writeFailure) {
IConfiguration config = mock(IConfiguration.class);
when(config.getStatsUpdateFreqSeconds()).thenReturn(10);
when(config.isReadEnabled()).thenReturn(true);
when(config.isWriteEnabled()).thenReturn(true);
NdBenchMonitor monitor = mock(NdBenchMonitor.class);
when(monitor.getReadSuccess()).thenReturn(readSuccess);
when(monitor.getReadFailure()).thenReturn(readFailure);
when(monitor.getWriteSuccess()).thenReturn(writeSuccess);
when(monitor.getWriteFailure()).thenReturn(writeFailure);
RPSCount counter =
new RPSCount(
new AtomicBoolean(readsStarted),
new AtomicBoolean(writesStarted),
new AtomicReference(RateLimiter.create(readRate)),
new AtomicReference(RateLimiter.create(writeRate)),
config,
monitor);
return counter;
}
}
| 9,136 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/com/netflix/ndbench | Create_ds/ndbench/ndbench-core/src/test/java/com/netflix/ndbench/core/NdbenchDriverTest.java | package com.netflix.ndbench.core;
import com.google.common.util.concurrent.RateLimiter;
import com.netflix.archaius.api.inject.RuntimeLayer;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.archaius.test.Archaius2TestConfig;
import com.netflix.governator.guice.test.ModulesForTesting;
import com.netflix.governator.guice.test.junit4.GovernatorJunit4ClassRunner;
import com.netflix.ndbench.api.plugin.DataGenerator;
import com.netflix.ndbench.api.plugin.NdBenchClient;
import com.netflix.ndbench.api.plugin.NdBenchMonitor;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
import com.netflix.ndbench.core.config.IConfiguration;
import com.netflix.ndbench.core.defaultimpl.NdBenchGuiceModule;
import com.netflix.ndbench.core.operations.WriteOperation;
import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import javax.inject.Inject;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.*;
@RunWith(GovernatorJunit4ClassRunner.class)
@ModulesForTesting({NdBenchGuiceModule.class, ArchaiusModule.class})
public class NdbenchDriverTest {
@Rule
@RuntimeLayer
public Archaius2TestConfig settableConfig = new Archaius2TestConfig();
@Inject
IConfiguration config;
@Inject
NdBenchMonitor ndBenchMonitor;
@Inject
DataGenerator dataGenerator;
@Test
public void testInvokingProcessMethodOnWriteOperationSetsNewRateLimit() throws Exception {
NdBenchClient mockClientPlugin = mock(NdBenchClient.class);
when(mockClientPlugin.writeSingle(anyString())).thenReturn("foo");
when(mockClientPlugin.
autoTuneWriteRateLimit(anyDouble(), Collections.singletonList(anyString()), any(NdBenchMonitor.class))).
thenReturn(500D);
NdBenchMonitor mockMonitor = mock(NdBenchMonitor .class);
doNothing().when(mockMonitor).recordReadLatency(anyLong());
doNothing().when(mockMonitor).incWriteSuccess();
when(mockClientPlugin.writeSingle(anyString())).thenReturn("foo");
NdBenchDriver driver = new NdBenchDriver(config, ndBenchMonitor, dataGenerator, settableConfig);
WriteOperation writeOperation = new WriteOperation(mockClientPlugin);
writeOperation.
process(driver, mockMonitor, Collections.singletonList("some-key"), new AtomicReference<>(RateLimiter.create(100)), true);
int rateFromSettableConfig = settableConfig.getInteger(NdBenchConstants.WRITE_RATE_LIMIT_FULL_NAME);
assertEquals(rateFromSettableConfig , 500D, .001);
// Next check won't work unless we figure out how to configure Property Listener to kick in during the test run
//double rateFromDriverRateLimiter = driver.getWriteLimiter().get().getRate();
//assertEquals(rateFromDriverRateLimiter, 500D, .001);
}
@Test
public void testInvokingProcessMethodOnBulkWriteOperationSetsNewRateLimit() throws Exception {
NdBenchClient mockClientPlugin = mock(NdBenchClient.class);
when(mockClientPlugin.writeSingle(anyString())).thenReturn("foo");
when(mockClientPlugin.
autoTuneWriteRateLimit(anyDouble(), Collections.singletonList(anyString()), any(NdBenchMonitor.class))).
thenReturn(500D);
NdBenchMonitor mockMonitor = mock(NdBenchMonitor .class);
doNothing().when(mockMonitor).recordReadLatency(anyLong());
doNothing().when(mockMonitor).incWriteSuccess();
when(mockClientPlugin.writeSingle(anyString())).thenReturn("foo");
NdBenchDriver driver = new NdBenchDriver(config, ndBenchMonitor, dataGenerator, settableConfig);
WriteOperation writeOperation = new WriteOperation(mockClientPlugin);
List<String> keys = new ArrayList<>(1000);
for (int i = 0; i < 1000; i++) {
keys.add("keys" + i);
}
writeOperation.
process(driver, mockMonitor, keys, new AtomicReference<>(RateLimiter.create(100)), true);
int rateFromSettableConfig = settableConfig.getInteger(NdBenchConstants.WRITE_RATE_LIMIT_FULL_NAME);
assertEquals(rateFromSettableConfig , 500D, .001);
}
}
| 9,137 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/com/netflix/ndbench | Create_ds/ndbench/ndbench-core/src/test/java/com/netflix/ndbench/core/DataBackfillTest.java | package com.netflix.ndbench.core;
import java.util.List;
import javax.inject.Inject;
import org.apache.commons.lang3.tuple.Pair;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.governator.guice.test.ModulesForTesting;
import com.netflix.governator.guice.test.junit4.GovernatorJunit4ClassRunner;
import com.netflix.ndbench.api.plugin.NdBenchClient;
import com.netflix.ndbench.core.config.IConfiguration;
import com.netflix.ndbench.core.defaultimpl.NdBenchGuiceModule;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@RunWith(GovernatorJunit4ClassRunner.class)
@ModulesForTesting({ NdBenchGuiceModule.class, ArchaiusModule.class})
public class DataBackfillTest
{
@Inject
DataBackfill dataBackfill;
@Inject
IConfiguration config;
@After
public void afterMethod()
{
dataBackfill.stopBackfill();
}
@Test
public void backfill() throws Exception
{
NdBenchClient mockClientPlugin = mock(NdBenchClient.class);
when(mockClientPlugin.writeSingle(anyString())).thenReturn("foo");
dataBackfill.backfill(mockClientPlugin);
}
@Test
public void backfillAsync() throws Exception
{
NdBenchClient mockClientPlugin = mock(NdBenchClient.class);
when(mockClientPlugin.writeSingle(anyString())).thenReturn("foo");
dataBackfill.backfillAsync(mockClientPlugin);
}
@Test
public void backfillAsyncRestart() throws Exception
{
NdBenchClient mockClientPlugin = mock(NdBenchClient.class);
when(mockClientPlugin.writeSingle(anyString())).thenReturn("foo");
dataBackfill.backfillAsync(mockClientPlugin);
dataBackfill.stopBackfill();
dataBackfill.backfillAsync(mockClientPlugin);
}
@Test
public void getKeyRangesPerThread()
{
for (int i = 0; i < 100; i++)
{
List<Pair<Integer, Integer>> s = dataBackfill.getKeyRangesPerThread(10, 4, 100);
s.forEach(st_end -> Assert.assertTrue(st_end.getRight() <= 100));
s.forEach(System.out::println);
}
}
} | 9,138 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/test/java/com/netflix/ndbench/core/util/NdbUtilTest.java | package com.netflix.ndbench.core.util;
import java.util.Arrays;
import java.util.Collection;
import org.junit.Assert;
import org.junit.Test;
import static com.netflix.ndbench.core.util.NdbUtil.humanReadableByteCount;
/**
* @author vchella
*/
public class NdbUtilTest
{
static Collection<Object[]> generateData() {
return Arrays.asList(new Object[][] { { 0L, "0 bytes" },
{ 27L, "27 bytes" }, { 999L, "999 bytes" }, {1000L, "1000 bytes" },
{1023L, "1023 bytes"},{1024L, "1.0 KB"},{1728L, "1.7 KB"},{110592L, "108.0 KB"},
{7077888L, "6.8 MB"}, {452984832L, "432.0 MB"}, {28991029248L, "27.0 GB"},
{1855425871872L, "1.7 TB"}, {9223372036854775807L, "8.0 EB"}});
}
@Test
public void testByteCountToDisplaySizeBigInteger() {
generateData().forEach(objects -> Assert.assertEquals(objects[1],
humanReadableByteCount(((long)objects[0]))));
}
} | 9,139 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/test/java/com/netflix/ndbench/core/util/ConstantStepWiseRateIncreaserTest.java | package com.netflix.ndbench.core.util;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertThat;
public class ConstantStepWiseRateIncreaserTest {
@Test
public void testGetRateReturnsProperRateRelativeToTimeZero() throws Exception {
ConstantStepWiseRateIncreaser increaser = new ConstantStepWiseRateIncreaser(100, 10, 0, 10);
assertThat(increaser.getRateForGivenClockTime(0, 9), is(equalTo(0.0)));
assertThat(increaser.getRateForGivenClockTime(0, 10), is(equalTo(1.0)));
assertThat(increaser.getRateForGivenClockTime(0, 99), is(equalTo(9.0)));
assertThat(increaser.getRateForGivenClockTime(0, 100), is(equalTo(10.0)));
assertThat(increaser.getRateForGivenClockTime(0, 101), is(equalTo(10.0)));
assertThat(increaser.getRateForGivenClockTime(0, Long.MAX_VALUE - 1), is(equalTo(10.0)));
}
}
| 9,140 |
0 | Create_ds/ndbench/ndbench-core/src/test/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/test/java/com/netflix/ndbench/core/util/ChecksumUtilTest.java | package com.netflix.ndbench.core.util;
import org.apache.commons.lang.RandomStringUtils;
import org.junit.Assert;
import org.junit.Test;
/**
* @author Sumanth Pasupuleti
*/
public class ChecksumUtilTest
{
@Test
public void testChecksumGenerationAndValidationWithAppendFalse()
{
String randomString = RandomStringUtils.random(128);
String encodedString = CheckSumUtil.appendCheckSumAndEncodeBase64(randomString, false);
Assert.assertTrue(CheckSumUtil.isChecksumValid(encodedString));
}
@Test
public void testChecksumGenerationAndValidationWithAppendTrue()
{
String randomString = RandomStringUtils.random(128);
String encodedString = CheckSumUtil.appendCheckSumAndEncodeBase64(randomString, true);
Assert.assertTrue(CheckSumUtil.isChecksumValid(encodedString));
}
}
| 9,141 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/NdBenchClientFactory.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.ndbench.api.plugin.NdBenchAbstractClient;
import java.util.Map;
import java.util.Set;
/**
* @author vchella
*/
@Singleton
public class NdBenchClientFactory {
private Map<String, NdBenchAbstractClient<?>> clientMap;
@Inject
public NdBenchClientFactory(Map<String, NdBenchAbstractClient<?>> driverMap) {
this.clientMap = driverMap;
}
public NdBenchAbstractClient<?> getClient(String clientName) {
NdBenchAbstractClient<?> client = clientMap.get(clientName);
if (client == null) {
throw new RuntimeException("Client not found: " + clientName);
}
return client;
}
public Set<String> getClientDrivers() {
return clientMap.keySet();
}
}
| 9,142 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/NdBenchDriver.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core;
import com.google.common.util.concurrent.RateLimiter;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.archaius.api.config.SettableConfig;
import com.netflix.archaius.api.inject.RuntimeLayer;
import com.netflix.ndbench.api.plugin.DataGenerator;
import com.netflix.ndbench.api.plugin.NdBenchAbstractClient;
import com.netflix.ndbench.api.plugin.NdBenchMonitor;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
import com.netflix.ndbench.core.config.IConfiguration;
import com.netflix.ndbench.core.generators.KeyGenerator;
import com.netflix.ndbench.core.generators.KeyGeneratorFactory;
import com.netflix.ndbench.core.operations.ReadOperation;
import com.netflix.ndbench.core.operations.WriteOperation;
import com.netflix.ndbench.core.util.LoadPattern;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
/**
* @author vchella
*/
@Singleton
public class NdBenchDriver {
private static final Logger logger = LoggerFactory.getLogger(NdBenchDriver.class);
public static final int TIMEOUT = 5;
private final AtomicInteger readWorkers = new AtomicInteger(0);
private final AtomicInteger writeWorkers = new AtomicInteger(0);
private final AtomicReference<ExecutorService> tpReadRef = new AtomicReference<>(null);
private final AtomicReference<ExecutorService> tpWriteRef = new AtomicReference<>(null);
private final AtomicBoolean readsStarted = new AtomicBoolean(false);
private final AtomicBoolean writesStarted = new AtomicBoolean(false);
private final AtomicBoolean clientInited = new AtomicBoolean(false);
private final AtomicReference<RateLimiter> readLimiter;
private final AtomicReference<RateLimiter> writeLimiter;
private final AtomicReference<ExecutorService> timerRef = new AtomicReference<>(null);
private final RPSCount rpsCount;
private final AtomicReference<NdBenchAbstractClient<?>> clientRef =
new AtomicReference<>(null);
private final AtomicReference<KeyGenerator> keyGeneratorWriteRef = new AtomicReference<>(null);
private final AtomicReference<KeyGenerator> keyGeneratorReadRef = new AtomicReference<>(null);
private final IConfiguration config;
private final NdBenchMonitor ndBenchMonitor;
private final DataGenerator dataGenerator;
private final SettableConfig settableConfig;
@Inject
public NdBenchDriver(IConfiguration config,
NdBenchMonitor ndBenchMonitor,
DataGenerator dataGenerator,
@RuntimeLayer SettableConfig settableConfig) {
this.config = config;
this.ndBenchMonitor = ndBenchMonitor;
this.readLimiter = new AtomicReference<>();
this.writeLimiter = new AtomicReference<>();
this.dataGenerator = dataGenerator;
this.settableConfig = settableConfig;
this.rpsCount = new RPSCount(readsStarted, writesStarted, readLimiter, writeLimiter, config, ndBenchMonitor);
Runtime.getRuntime().addShutdownHook(new Thread(() -> {
logger.info("*** shutting down NdBench server since JVM is shutting down");
NdBenchDriver.this.stop();
try {
NdBenchDriver.this.shutdownClient();
Thread.sleep(2000);
} catch (Exception e) {
//ignore
}
}));
}
public void start(LoadPattern loadPattern, int windowSize, long windowDurationInSec, int bulkSize) {
logger.info("Starting Load Test Driver...");
startWrites(loadPattern, windowSize, windowDurationInSec, bulkSize);
startReads(loadPattern, windowSize, windowDurationInSec, bulkSize);
}
public void startReads(LoadPattern loadPattern, int windowSize, long windowDurationInSec, int bulkSize) {
if (readsStarted.get()) {
logger.info("Reads already started ... ignoring");
return;
}
startReadsInternal(loadPattern, windowSize, windowDurationInSec, bulkSize);
}
private void startReadsInternal(LoadPattern loadPattern, int windowSize, long windowDurationInSec, int bulkSize) {
logger.info("Starting NdBenchDriver reads...");
NdBenchOperation operation;
operation = new ReadOperation(clientRef.get());
KeyGeneratorFactory keyGeneratorFactory = new KeyGeneratorFactory();
KeyGenerator<String> keyGenerator = keyGeneratorFactory.getKeyGenerator(loadPattern,
config.getNumKeys(), windowSize, windowDurationInSec, config.isPreloadKeys(), config.getZipfExponent());
keyGeneratorReadRef.set(keyGenerator);
startOperation(
config.isReadEnabled(),
config.getNumReaders(),
readWorkers,
tpReadRef,
readLimiter,
operation,
keyGenerator,
config.isAutoTuneEnabled(),
bulkSize);
readsStarted.set(true);
}
public void startWrites(LoadPattern loadPattern, int windowSize, long windowDurationInSec, int bulkSize) {
if (writesStarted.get()) {
logger.info("Writes already started ... ignoring");
return;
}
startWritesInternal(loadPattern, windowSize, windowDurationInSec, bulkSize);
}
private void startWritesInternal(LoadPattern loadPattern, int windowSize, long windowDurationInSec, int bulkSize) {
logger.info("Starting NdBenchDriver writes...");
NdBenchOperation operation;
operation = new WriteOperation(clientRef.get());
KeyGeneratorFactory keyGeneratorFactory = new KeyGeneratorFactory();
KeyGenerator<String> keyGenerator = keyGeneratorFactory.getKeyGenerator(loadPattern,
config.getNumKeys(), windowSize, windowDurationInSec, config.isPreloadKeys(), config.getZipfExponent());
keyGeneratorWriteRef.set(keyGenerator);
startOperation(config.isWriteEnabled(),
config.getNumWriters(),
writeWorkers,
tpWriteRef,
writeLimiter,
operation,
keyGenerator,
config.isAutoTuneEnabled(),
bulkSize);
writesStarted.set(true);
}
public boolean getIsWriteRunning() {
ExecutorService tp = tpWriteRef.get();
if (tp != null) {
return true;
}
return false;
}
public boolean getIsReadRunning() {
ExecutorService tp = tpReadRef.get();
return tp != null;
}
private void startOperation(boolean operationEnabled,
int numWorkersConfig,
AtomicInteger numWorkers,
AtomicReference<ExecutorService> tpRef,
final AtomicReference<RateLimiter> rateLimiter,
final NdBenchOperation operation,
final KeyGenerator<String> keyGenerator,
Boolean isAutoTuneEnabled,
int bulkSize) {
if (!operationEnabled) {
logger.info("Operation : {} not enabled, ignoring", operation.getClass().getSimpleName());
return;
}
keyGenerator.init();
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setNameFormat("ndbench-"+operation.getClass().getSimpleName()+"-pool-%d")
.setDaemon(false).build();
ExecutorService threadPool = Executors.newFixedThreadPool(numWorkersConfig, threadFactory);
boolean success = tpRef.compareAndSet(null, threadPool);
if (!success) {
throw new RuntimeException("Unknown threadpool when performing tpRef CAS operation");
}
logger.info("\n\nWorker threads: " + numWorkersConfig + ", Num Keys: " + config.getNumKeys() + "\n\n");
for (int i = 0; i < numWorkersConfig; i++) {
threadPool.submit((Callable<Void>) () -> {
while (!Thread.currentThread().isInterrupted()) {
boolean noMoreKey = false;
if (((operation.isReadType() && readsStarted.get()) ||
(operation.isWriteType() && writesStarted.get())) && rateLimiter.get().tryAcquire()) {
final Set<String> keys = new HashSet<>(bulkSize * 2);
while (keys.size() < bulkSize) {
keys.add(keyGenerator.getNextKey());
if (!keyGenerator.hasNextKey()) {
noMoreKey = true;
break;
}
} // eo keygens
operation.process(
NdBenchDriver.this,
ndBenchMonitor,
new ArrayList<>(keys),
rateLimiter,
isAutoTuneEnabled);
} // eo if read or write
if (noMoreKey) {
logger.info("No more keys to process, hence stopping this thread.");
if (operation.isReadType()) {
stopReads();
} else if (operation.isWriteType()) {
stopWrites();
}
Thread.currentThread().interrupt();
break;
} // eo if noMoreKey
} // eo while thread not interrupted
logger.info("NdBenchWorker shutting down");
return null;
});
numWorkers.incrementAndGet();
}
}
/**
* FUNCTIONALITY FOR STOPPING THE WORKERS
*/
public void stop() {
stopWrites();
stopReads();
if (timerRef != null && timerRef.get() != null) {
timerRef.get().shutdownNow();
timerRef.set(null);
}
ndBenchMonitor.resetStats();
}
public void stopReads() {
readsStarted.set(false);
keyGeneratorReadRef.set(null);
stopOperation(tpReadRef);
}
public void stopWrites() {
writesStarted.set(false);
keyGeneratorWriteRef.set(null);
stopOperation(tpWriteRef);
}
public void stopOperation(AtomicReference<ExecutorService> tpRef) {
ExecutorService tp = tpRef.get();
if (tp == null) {
logger.warn("Broken reference to threadPool -- unable to stop!");
return;
}
tp.shutdownNow();
tpRef.set(null);
logger.info("Attempting to shutdown threadpool");
while (!tp.isTerminated()) {
try {
logger.info("Waiting for worker pool to stop, sleeping for 5 to 10 seconds");
// Wait a while for existing tasks to terminate
if (!tp.awaitTermination(TIMEOUT, TimeUnit.SECONDS)) {
tp.shutdownNow(); // Cancel currently executing tasks
// Wait a while for tasks to respond to being cancelled
if (!tp.awaitTermination(TIMEOUT, TimeUnit.SECONDS))
logger.error("Error while shutting down executor service : ");
}
logger.info("Threadpool has terminated!");
} catch (InterruptedException e) {
Thread.currentThread().interrupt();//preserve the message
tp.shutdownNow();
logger.info("Failed to terminate Threadpool! Ignoring.");
break;
}
}
logger.info("Threadpool has terminated!");
}
public interface NdBenchOperation {
boolean process(NdBenchDriver driver,
NdBenchMonitor monitor,
List<String> keys,
AtomicReference<RateLimiter> rateLimiter,
boolean isAutoTuneEnabled);
boolean isReadType();
boolean isWriteType();
}
public void init(NdBenchAbstractClient<?> client) throws Exception {
if (!clientInited.get()) {
try {
if (clientInited.compareAndSet(false, true)) {
client.init(this.dataGenerator); // Exceptions from init method will be caught and clientInited will be reset
clientRef.set(client);
}
} catch (Exception e) {
clientInited.compareAndSet(true, false);
throw new Exception("Exception initializing client", e);
}
// Logic for dealing with rate limits
setWriteRateLimit(config.getWriteRateLimit());
setReadRateLimit(config.getReadRateLimit());
checkAndInitTimer();
}
}
public void onWriteRateLimitChange() {
checkAndInitRateLimit(writeLimiter, config.getWriteRateLimit(), "writeLimiter");
}
public void onReadRateLimitChange() {
checkAndInitRateLimit(readLimiter, config.getReadRateLimit(), "readLimiter");
}
public void updateWriteRateLimit(double newLimit) {
settableConfig.setProperty(NdBenchConstants.WRITE_RATE_LIMIT_FULL_NAME, (int) Math.ceil(newLimit));
onWriteRateLimitChange();
}
private void setWriteRateLimit(int prop) {
checkAndInitRateLimit(writeLimiter, prop, "writeLimiter");
}
private void setReadRateLimit(int prop) {
checkAndInitRateLimit(readLimiter, prop, "readLimiter");
}
private void checkAndInitRateLimit(AtomicReference<RateLimiter> rateLimiter, int property, String prop) {
RateLimiter oldLimiter = rateLimiter.get();
if (oldLimiter == null) {
logger.info("Setting rate Limit for: " + prop + " to: " + property);
rateLimiter.set(RateLimiter.create(property));
return;
}
int oldLimit = Double.valueOf(oldLimiter.getRate()).intValue();
int newLimit = property;
logger.info("oldlimit={} / newLimit={}", oldLimit, newLimit);
if (oldLimit != newLimit) {
logger.info("Updating rate Limit for: " + prop + " to: " + newLimit);
rateLimiter.set(RateLimiter.create(newLimit));
}
}
private void checkAndInitTimer() {
/** CODE TO PERIODICALLY LOG RPS */
ExecutorService timer = timerRef.get();
if (timer == null) {
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setNameFormat("ndbench-updaterps-pool-%d")
.setDaemon(false).build();
timer = Executors.newFixedThreadPool(1, threadFactory);
timer.submit(() -> {
while (!Thread.currentThread().isInterrupted()) {
rpsCount.updateRPS();
Thread.sleep(config.getStatsUpdateFreqSeconds() * 1000);
}
return null;
});
timerRef.set(timer);
}
}
public void shutdownClient() throws Exception {
if (clientInited.get()) {
clientRef.get().shutdown();
if (clientInited.compareAndSet(true, false)) {
clientRef.set(null);
}
}
}
public String readSingle(String key) throws Exception {
try {
return clientRef.get().readSingle(key);
} catch (Exception e) {
logger.error("FAILED readSingle ", e);
throw e;
}
}
public String writeSingle(String key) throws Exception {
Object result = clientRef.get().writeSingle(key);
return result == null ? "<null>" : result.toString();
}
public NdBenchAbstractClient<?> getClient() {
return clientRef.get();
}
public KeyGenerator getWriteLoadPattern() {
return keyGeneratorWriteRef.get();
}
public KeyGenerator getReadLoadPattern() {
return keyGeneratorReadRef.get();
}
}
| 9,143 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/RPSCount.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core;
import com.google.common.util.concurrent.RateLimiter;
import com.netflix.ndbench.api.plugin.NdBenchMonitor;
import com.netflix.ndbench.core.config.IConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
/**
* @author vchella
*/
public class RPSCount {
private static final Logger logger = LoggerFactory.getLogger(RPSCount.class);
private final AtomicLong reads = new AtomicLong(0L);
private final AtomicLong writes = new AtomicLong(0L);
private final IConfiguration config;
private final NdBenchMonitor ndBenchMonitor;
private final AtomicReference<RateLimiter> readLimiter;
private final AtomicReference<RateLimiter> writeLimiter;
private final AtomicBoolean readsStarted;
private final AtomicBoolean writesStarted;
RPSCount(AtomicBoolean readsStarted,
AtomicBoolean writesStarted,
AtomicReference<RateLimiter> readLimiter,
AtomicReference<RateLimiter> writeLimiter,
IConfiguration config,
NdBenchMonitor ndBenchMonitor) {
this.readsStarted = readsStarted;
this.writesStarted = writesStarted;
this.readLimiter = readLimiter;
this.writeLimiter = writeLimiter;
this.config = config;
this.ndBenchMonitor = ndBenchMonitor;
}
void updateRPS() {
int secondsFreq = config.getStatsUpdateFreqSeconds();
long totalReads = ndBenchMonitor.getReadSuccess() + ndBenchMonitor.getReadFailure();
long totalWrites = ndBenchMonitor.getWriteSuccess() + ndBenchMonitor.getWriteFailure();
long totalOps = totalReads + totalWrites;
long totalSuccess = ndBenchMonitor.getReadSuccess() + ndBenchMonitor.getWriteSuccess();
long readRps = (totalReads - reads.get()) / secondsFreq;
long writeRps = (totalWrites - writes.get()) / secondsFreq;
long sRatio = (totalOps > 0) ? (totalSuccess * 100L / (totalOps)) : 0;
reads.set(totalReads);
writes.set(totalWrites);
ndBenchMonitor.setWriteRPS(writeRps);
ndBenchMonitor.setReadRPS(readRps);
logger.info("Read avg: " + (double) ndBenchMonitor.getReadLatAvg() / 1000.0 + "ms, Read RPS: " + readRps
+ ", Write avg: " + (double) ndBenchMonitor.getWriteLatAvg() / 1000.0 + "ms, Write RPS: " + writeRps
+ ", total RPS: " + (readRps + writeRps) + ", Success Ratio: " + sRatio + "%");
long expectedReadRate = (long) this.readLimiter.get().getRate();
long expectedwriteRate = (long) this.writeLimiter.get().getRate();
String bottleneckMsg = "If this occurs consistently the benchmark client could be the bottleneck.";
if (this.config.isReadEnabled() && readsStarted.get() && readRps < expectedReadRate) {
logger.warn("Observed Read RPS ({}) less than expected read rate + ({}).\n{}",
readRps, expectedReadRate, bottleneckMsg);
}
if (this.config.isWriteEnabled() && writesStarted.get() && writeRps < expectedwriteRate) {
logger.warn("Observed Write RPS ({}) less than expected write rate + ({}).\n{}",
writeRps, expectedwriteRate, bottleneckMsg);
}
}
}
| 9,144 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/DataBackfill.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.ndbench.api.plugin.NdBenchAbstractClient;
import com.netflix.ndbench.core.config.IConfiguration;
/**
* @author vchella
*/
@Singleton
public class DataBackfill {
private static final Logger logger = LoggerFactory.getLogger(DataBackfill.class);
private final IConfiguration config;
private final AtomicBoolean stop = new AtomicBoolean(false);
private final AtomicReference<ExecutorService> threadPool = new AtomicReference<>(null);
private final AtomicInteger missCount = new AtomicInteger(0);
final AtomicInteger count = new AtomicInteger(0);
private final Random random = new Random();
private final AtomicReference<Future<Void>> futureRef = new AtomicReference<>(null);
@Inject
public DataBackfill(IConfiguration config) {
this.config = config;
}
public void backfill(final NdBenchAbstractClient<?> client) throws Exception {
backfill(client, new NormalBackfill());
}
public void conditionalBackfill(final NdBenchAbstractClient<?> client) throws Exception {
backfill(client, new ConditionalBackfill());
}
public void verifyBackfill(final NdBenchAbstractClient<?> client) throws Exception {
backfill(client, new VerifyBackfill());
}
private void backfill(final NdBenchAbstractClient<?> client, final BackfillOperation backfillOperation) throws Exception {
long start = System.currentTimeMillis();
backfillAsync(client, backfillOperation);
logger.info("Backfiller waiting to finish");
futureRef.get();
logger.info("Backfiller latch done! in " + (System.currentTimeMillis() - start) + " ms");
}
public void backfillAsync(final NdBenchAbstractClient<?> client) {
backfillAsync(client, new NormalBackfill());
}
private void backfillAsync(final NdBenchAbstractClient<?> client, final BackfillOperation backfillOperation) {
stop.set(false);
//Default #Cores*4 so that we can keep the CPUs busy even while waiting on I/O
final int numThreads = Runtime.getRuntime().availableProcessors() * 4;
initThreadPool(numThreads);
List<Pair<Integer, Integer>> keyRanges = getKeyRangesPerThread(numThreads,
config.getBackfillKeySlots(),
config.getNumKeys());
final CountDownLatch latch = new CountDownLatch(numThreads);
for (int i = 0; i < numThreads; i++) {
final int startKey = keyRanges.get(i).getLeft();
final int endKey = keyRanges.get(i).getRight();
threadPool.get().submit(() -> {
int k = startKey;
while (k < endKey && !stop.get()) {
try {
String key = "T" + k;
k++;
count.incrementAndGet();
String result = backfillOperation.process(client, key);
logger.info("Backfill Key:" + key + " | Result: " + result);
} catch (Exception e) {
logger.error("Exception in processing backfill write. Key: T{}", k, e);
}
}
latch.countDown();
logger.info("Stopping datafill writer");
return null;
});
}
Future<Void> future = threadPool.get().submit(() -> {
final AtomicBoolean stopCounting = new AtomicBoolean(false);
while (!Thread.currentThread().isInterrupted() && !stopCounting.get()) {
logger.info("Backfill so far: " + count.get() + ", miss count: " + missCount.get());
try {
boolean done = latch.await(5000, TimeUnit.MILLISECONDS);
if (done) {
stopCounting.set(true);
}
} catch (InterruptedException e) {
// return from here.
stopCounting.set(true);
}
}
logger.info("Stopping datafill status poller");
return null;
});
futureRef.set(future);
}
public boolean getIsBackfillRunning() {
Future<Void> future = futureRef.get();
if (future != null) {
if (future.isDone() || future.isCancelled()) {
return false; // Completed running or cancelled, so currently not running
}
return true; //Still running
}
return false; //Never started
}
private interface BackfillOperation {
String process(final NdBenchAbstractClient<?> client, final String key) throws Exception;
}
private class NormalBackfill implements BackfillOperation {
@Override
public String process(NdBenchAbstractClient<?> client, String key) throws Exception {
Object result = client.writeSingle(key);
return result == null ? "<null>" : result.toString();
}
}
private class ConditionalBackfill implements BackfillOperation {
@Override
public String process(NdBenchAbstractClient<?> client, String key) throws Exception {
String result = client.readSingle(key);
if (result == null) {
missCount.incrementAndGet();
Object writeResult = client.writeSingle(key);
return writeResult == null ? "<null>" : writeResult.toString();
}
return "done";
}
}
private class VerifyBackfill implements BackfillOperation {
@Override
public String process(NdBenchAbstractClient<?> client, String key) throws Exception {
Object result = client.writeSingle(key);
String value = client.readSingle(key);
if (value == null) {
missCount.incrementAndGet();
return "backfill miss: " + result;
} else {
return result == null ? "<null>" : result.toString();
}
}
}
private void initThreadPool(int numThreads) {
if (threadPool.get() != null) {
throw new RuntimeException("Backfill already started");
}
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setNameFormat("ndbench-backfill-pool-%d")
.setDaemon(false).build();
ExecutorService newPool = Executors.newFixedThreadPool(numThreads + 1, threadFactory);
boolean success = threadPool.compareAndSet(null, newPool);
if (!success) {
newPool.shutdownNow();
throw new RuntimeException("Backfill already started");
}
}
public void stopBackfill() {
stop.set(true);
Future<Void> future = futureRef.get();
if (future != null) {
future.cancel(true);
}
shutdown();
}
public void shutdown() {
if (threadPool.get() != null) {
threadPool.get().shutdownNow();
threadPool.set(null);
}
}
/**
* This method returns the key range to be back filled in [1 - IConfiguration.getNumKeys()] keyspace.
* Algorithm to determine keyrange:
* NumKeys/BackfillKeySlots --> This gives the key range slots to be processed.
* Each worker randomly picks the slot from above. With low #BackfillKeySlots there is high probability
* to cover keyspace without any misses.
* @return
*/
List<Pair<Integer, Integer>> getKeyRangesPerThread(int numThreads, int keySlots, int numKeys)
{
List<Pair<Integer, Integer>> keyRangesPerThread = new LinkedList<>();
int slotSize = numKeys / keySlots;
int randomSlot = random.nextInt(keySlots);
int startKey = randomSlot * slotSize;
int endKey = startKey + slotSize;
int numKeysToProcess = endKey - startKey;
int numKeysPerThread = numKeysToProcess / numThreads;
logger.info("Num keys (KEYSPACE): {}, Num threads: {}, Num slots: {}", numKeys, numThreads, keySlots);
logger.info("MyNode: Num keys to be processed: {}, Num keys per thread: {}, My key slot: {}",
numKeysToProcess, numKeysPerThread, randomSlot);
for (int i = 0; i < numThreads; i++)
{
int startKeyPerThread = startKey + (i * numKeysPerThread);
int endKeyPerThread = startKeyPerThread + numKeysPerThread;
keyRangesPerThread.add(Pair.of(startKeyPerThread, endKeyPerThread));
}
return keyRangesPerThread;
}
}
| 9,145 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/filters/CorsResponseFilter.java | package com.netflix.ndbench.core.filters;
import com.netflix.ndbench.core.config.IConfiguration;
import com.sun.jersey.spi.container.ContainerRequest;
import com.sun.jersey.spi.container.ContainerResponse;
import com.sun.jersey.spi.container.ContainerResponseFilter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.inject.Inject;
import javax.ws.rs.ext.Provider;
import java.util.Arrays;
import java.util.List;
@Provider
public class CorsResponseFilter implements ContainerResponseFilter {
public static final Logger LOGGER = LoggerFactory.getLogger(CorsResponseFilter.class);
@Inject IConfiguration config;
@Override
public ContainerResponse filter(ContainerRequest request, ContainerResponse response) {
List<String> allowedOrigins = Arrays.asList(config.getAllowedOrigins().split(";"));
String origin = request.getRequestHeaders().getFirst("Origin");
if (allowedOrigins.contains(origin)) {
response.getHttpHeaders().add("Access-Control-Allow-Origin", origin);
response.getHttpHeaders().add("Vary", "Origin");
}
return response;
}
}
| 9,146 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/util/NdbUtil.java | /**
* Copyright (c) 2018 Netflix, Inc. All rights reserved.
*/
package com.netflix.ndbench.core.util;
/**
* Util class for NdBench commonly used util methods
* @author vchella
*/
public class NdbUtil
{
private static final String[] BINARY_UNITS = { "bytes", "KB", "MB", "GB", "TB", "PB", "EB" };
/**
* FileUtils.byteCountToDisplaySize rounds down the size, hence using this for more precision.
* @param bytes bytes
* @return human readable bytes
*/
public static String humanReadableByteCount(final long bytes)
{
final int base = 1024;
// When using the smallest unit no decimal point is needed, because it's the exact number.
if (bytes < base) {
return bytes + " " + BINARY_UNITS[0];
}
final int exponent = (int) (Math.log(bytes) / Math.log(base));
final String unit = BINARY_UNITS[exponent];
return String.format("%.1f %s", bytes / Math.pow(base, exponent), unit);
}
}
| 9,147 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/util/CheckSumUtil.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.util;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Base64;
import java.util.zip.CRC32;
import java.util.zip.Checksum;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Sumanth Pasupuleti
*
* CheckSumUtil contains methods around generation and validation of CRC32 based checksum
*/
public class CheckSumUtil
{
private static final Logger logger = LoggerFactory.getLogger(CheckSumUtil.class);
/**
* Generates a checksum of the input string or an abridged version of the input string (depending upon append param)
* and returns a base64 encoded string of the input string (or an abridged version of it) + checksum.
* Returned string is usually longer than the input string. 33% overhead comes from base64 encoding, and the rest depends
* on append param.
*
* Future enhancement: This method can be further enhanced by generating checksum for every x-byte block of the input string. Validator can then
* validate checksum at block level and bail out of parsing further blocks when an invalid checksum is encountered.
* @param inputString string for which checksum has to be generated and appended to
* @param append If true, checksum is generated for the entire input string and checksum (8 bytes) is appended to the input string
* after which it is base64 encoded.
* If false, last 8 bytes of the input string are discarded to make it an abridged version of the input string
* and checksum (8 bytes) is appended to the input string after which it is base64 encoded.
* This is primarily useful to have a control on the length of the returned string relative to the length of the input string.
* @return Base64 encoded String of (input string + checksum)
*/
public static String appendCheckSumAndEncodeBase64(String inputString, boolean append)
{
if (!append)
{
// crc32 generates a checksum of type long (8 bytes), so we truncate the last 8 bytes of the original string
// and replace it with the checksum instead of just appending the checksum which is the case if append is false.
inputString = inputString.substring(0, inputString.length() - 8);
}
Checksum checksum = new CRC32();
byte[] inputStringInBytes = inputString.getBytes(StandardCharsets.UTF_8);
checksum.update(inputStringInBytes, 0, inputStringInBytes.length);
byte[] checksumInBytes = ByteBuffer.allocate(Long.SIZE / Byte.SIZE).putLong(checksum.getValue()).array();
// append input string bytes and checksum bytes
byte[] output = new byte[inputStringInBytes.length + checksumInBytes.length];
System.arraycopy(inputStringInBytes, 0, output, 0, inputStringInBytes.length);
System.arraycopy(checksumInBytes, 0, output, inputStringInBytes.length, checksumInBytes.length);
// return Base64 encoded string of the resulting concatenated bytes.
return Base64.getEncoder().encodeToString(output);
}
/**
* Assumes input string is Base64 encoded, and assumes checksum is the last 8 bytes.
* Base64 decodes the input string, extracts original string bytes and checksum bytes, generates checksum from the
* extracted string bytes, and validates against the extracted checksum bytes.
* @param encodedInput
* @return true if the checksum is correct or if the encodedInput is null, false otherwise.
*/
public static boolean isChecksumValid(String encodedInput)
{
// ignore null input
if (null == encodedInput)
return true;
try
{
byte[] inputInBytes = Base64.getDecoder().decode(encodedInput);
// assumes last 8 bytes to be checksum and remaining bytes to be the original input string
byte[] extractedInputStringInBytes = Arrays.copyOfRange(inputInBytes, 0, inputInBytes.length - 8);
byte[] extractedChecksumInBytes = Arrays.copyOfRange(inputInBytes, inputInBytes.length - 8, inputInBytes.length);
Checksum checksum = new CRC32();
checksum.update(extractedInputStringInBytes, 0, extractedInputStringInBytes.length);
byte[] generatedChecksumInBytes = ByteBuffer.allocate(Long.SIZE / Byte.SIZE).putLong(checksum.getValue()).array();
return Arrays.equals(extractedChecksumInBytes, generatedChecksumInBytes);
}
catch (Exception ex)
{
logger.error("Exception during checksum validation for encoded input string: {}", encodedInput, ex);
return false;
}
}
}
| 9,148 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/util/RestUtil.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.util;
import com.netflix.ndbench.core.config.IConfiguration;
import org.apache.log4j.DefaultThrowableRenderer;
import javax.ws.rs.core.Response;
import java.util.Arrays;
import java.util.stream.Collectors;
/**
* @author vchella
*/
public class RestUtil {
public static Response sendResult(Response.Status status, Result result, IConfiguration config) {
return sendResponse(status, result, config);
}
public static Response sendResult(Result result, IConfiguration config) {
return sendResult(result.isSuccess?Response.Status.OK:Response.Status.INTERNAL_SERVER_ERROR,result, config);
}
public static Response sendErrorResponse(String errorMessage, Exception exception, IConfiguration config)
{
return sendResponse(Response.Status.INTERNAL_SERVER_ERROR, new ErrorResponse(errorMessage, exception), config);
}
public static Response sendErrorResponse(String errorMessage, IConfiguration config)
{
return sendResponse(Response.Status.INTERNAL_SERVER_ERROR, new ErrorResponse(errorMessage), config);
}
public static Response sendSuccessResponse(String returnMessage, IConfiguration config)
{
return sendResponse(Response.Status.OK, new SuccessResponse(returnMessage), config);
}
public static Response sendErrorResponse(IConfiguration config)
{
return sendResponse(Response.Status.INTERNAL_SERVER_ERROR, new ErrorResponse("Unknown error occurred."), config);
}
static <T> Response sendResponse(Response.Status status, T object, IConfiguration config)
{
Response.ResponseBuilder builder = Response.status(status).type(javax.ws.rs.core.MediaType.APPLICATION_JSON).entity(object);
return builder.build();
}
public static <T> Response sendJson(T object, IConfiguration config)
{
return sendResponse(Response.Status.OK, object, config);
}
public static class ErrorResponse extends Result
{
public String detailedMessage = "NA";
public ErrorResponse(String errorMessage)
{
super(false,errorMessage);
}
public ErrorResponse(String errorMessage, Exception e)
{
super(false, errorMessage);
makeMessage(e);
}
private void makeMessage(Exception e) {
if (e != null) {
this.message = this.message + " " + e.getMessage() + " !!! ";
if (e.getCause() != null) {
this.message += e.getCause().getMessage();
}
DefaultThrowableRenderer dtr = new DefaultThrowableRenderer();
detailedMessage = Arrays.stream(dtr.doRender(e)).collect(Collectors.joining("\n"));
}
}
}
public static class SuccessResponse extends Result
{
public SuccessResponse(String successMessage)
{
super(true,successMessage);
}
}
public static class Result
{
public boolean isSuccess;
public String message;
Result(boolean result, String resultMessage)
{
this.isSuccess = result;
this.message=resultMessage;
}
public Result(boolean result)
{
this.isSuccess = result;
this.message="NA";
}
}
}
| 9,149 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/util/ConstantStepWiseRateIncreaser.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.util;
import javax.annotation.concurrent.Immutable;
/**
* Ramps a rate from a specified 'initRate' to a 'finalRate' in constant increments over the course of a
* given time period as calls are made to {@link #getRateForGivenClockTime}.
* <p>
* Internally the class computes a mapping table of clock time values to rates that will be returned by
* the aforementioned method. The mapping table uses a base time of zero to start, but each call to
* {@link #getRateForGivenClockTime} will typically be made using the current epoch time (as Long).
* Callers of this class should record the epoch time at which they wish to begin the rate increase ramp
* and specify this recorded "baseReferenceTime" as the first argument to {@link #getRateForGivenClockTime}.
* This method will adjust the target instances clock value table by adding the baseReferenceTime to each entry.
*/
@Immutable
public class ConstantStepWiseRateIncreaser {
public static final int MAX_STEPS = 10 * 1000; // don't want to much overhead in searching data structure
private final int initRate;
private final int finalRate;
private final int incrementIntervalMillisecs;
private final double rateIncrementPerStep;
/**
* Returns a step-wise rate increaser which will ramp from 'initRate' to 'finalRate' over the course of
* 'incrementIntervalMillisecs'. The number of steps by which the rate increases will be determined by
* the value rampPeriodMillisecs / incrementIntervalMillisecs (which MUST evaluate to an integral value with
* no remainder). At each step the rate will increase constantly by (finalRate - initRate) / number-of-steps.
*/
public ConstantStepWiseRateIncreaser(int rampPeriodMillisecs,
int incrementIntervalMillisecs,
int initRate,
int finalRate) {
if (!(initRate >= 0)) {
throw new IllegalArgumentException("initRate must be >= 0");
}
if (!(finalRate > 0)) {
throw new IllegalArgumentException("finalRate must be > 0");
}
if (!(finalRate > initRate)) {
throw new IllegalArgumentException("finalRate must be > initRate");
}
if (!(rampPeriodMillisecs > 0)) {
throw new IllegalArgumentException("rampPeriodMillisecs must be > 0");
}
if (!(incrementIntervalMillisecs > 0)) {
throw new IllegalArgumentException("incrementIntervalMillisecs must be > 0");
}
if (rampPeriodMillisecs % incrementIntervalMillisecs != 0) {
throw new IllegalArgumentException(
"rampPeriodMillisecs should be evenly divisible by incrementIntervalMillisecs");
}
if (rampPeriodMillisecs / incrementIntervalMillisecs > MAX_STEPS) {
throw new IllegalArgumentException(
"rampPeriodMillisecs / incrementIntervalMillisecs should not exceed MAX_STEPS (" + MAX_STEPS + ")");
}
int numSteps = rampPeriodMillisecs / incrementIntervalMillisecs;
double spread = (finalRate - initRate) * 1.0;
this.initRate = initRate;
this.finalRate = finalRate;
this.incrementIntervalMillisecs = incrementIntervalMillisecs;
this.rateIncrementPerStep = spread / numSteps;
}
public double getRateForGivenClockTime(long baseReferenceTime, long clockTime) {
if (baseReferenceTime > clockTime) {
throw new IllegalArgumentException(
"specified baseReferenceTime ("
+ baseReferenceTime + ") is greater than clockTime (" + clockTime + ")");
}
long desiredClockTimeRelativizedToTimeZero = clockTime - baseReferenceTime;
return Math.min(
initRate + desiredClockTimeRelativizedToTimeZero / incrementIntervalMillisecs * rateIncrementPerStep,
this.finalRate);
}
}
| 9,150 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/util/RateLimitUtil.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.util;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
/**
* @author vchella
*/
public class RateLimitUtil {
private static final Logger logger = LoggerFactory.getLogger(RateLimitUtil.class);
private final AtomicReference<InnerState> ref = new AtomicReference<>(null);
private RateLimitUtil(int rps) {
this.ref.set(new InnerState(rps));
}
public static RateLimitUtil create(int n) {
return new RateLimitUtil(n);
}
public int getRps() {
return ref.get().getRps();
}
public boolean acquire() {
if (ref.get().checkSameSecond()) {
long timeToSleepMs = ref.get().increment();
if (timeToSleepMs != -1) {
try {
Thread.sleep(timeToSleepMs);
return false;
} catch (InterruptedException e) {
// do nothing here
return false;
}
} else {
return true;
}
} else {
InnerState oldState = ref.get();
InnerState newState = new InnerState(oldState.limit);
ref.compareAndSet(oldState, newState);
return false;
}
}
private class InnerState {
private final AtomicInteger counter = new AtomicInteger();
private final AtomicLong second = new AtomicLong(0L);
private final AtomicLong origTime = new AtomicLong(0L);
private final int limit;
private InnerState(int limit) {
this.limit = limit;
counter.set(0);
origTime.set(System.currentTimeMillis());
second.set(origTime.get()/1000);
}
private boolean checkSameSecond() {
long time = System.currentTimeMillis();
return second.get() == time/1000;
}
private long increment() {
if (counter.get() < limit) {
counter.incrementAndGet();
return -1;
} else {
return System.currentTimeMillis() - origTime.get();
}
}
private int getRps() {
return limit;
}
}
public static class UnitTest {
@Test
public void testRate() throws Exception {
int nThreads = 5;
int expectedRps = 100;
final RateLimitUtil rateLimiter = RateLimitUtil.create(expectedRps);
final AtomicBoolean stop = new AtomicBoolean(false);
final AtomicLong counter = new AtomicLong(0L);
final CountDownLatch latch = new CountDownLatch(nThreads);
ExecutorService thPool = Executors.newFixedThreadPool(nThreads);
final CyclicBarrier barrier = new CyclicBarrier(nThreads+1);
final AtomicLong end = new AtomicLong(0L);
for (int i=0; i<nThreads; i++) {
thPool.submit(() -> {
barrier.await();
while (!stop.get()) {
if(rateLimiter.acquire()) {
counter.incrementAndGet();
}
}
latch.countDown();
return null;
});
}
long start = System.currentTimeMillis();
barrier.await();
Thread.sleep(10000);
stop.set(true);
latch.await();
end.set(System.currentTimeMillis());
thPool.shutdownNow();
long duration = end.get() - start;
long totalCount = counter.get();
double resultRps = ((double)(totalCount)/((double)duration/1000.0));
logger.info("Total Count : " + totalCount + ", duration: " + duration + ", getSuccess rps: " + resultRps);
double percentageDiff = Math.abs(expectedRps-resultRps)*100/resultRps;
logger.info("Percentage diff: " + percentageDiff);
Assert.assertTrue(percentageDiff < 12.0);
}
}
}
| 9,151 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/util/LoadPattern.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.util;
/**
* @author vchella
*/
public enum LoadPattern {
RANDOM("random"),
SLIDING_WINDOW("sliding_window"),
SLIDING_WINDOW_FLIP("sliding_window_flip"),
ZIPFIAN("zipfian");
private String text;
LoadPattern(String text) {
this.text = text;
}
public String getText() {
return this.text;
}
public static LoadPattern fromString(String text) {
if (text != null) {
for (LoadPattern b : LoadPattern.values()) {
if (text.equalsIgnoreCase(b.text)) {
return b;
}
}
}
return null;
}
}
| 9,152 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/config/TunableConfig.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.config;
/**
* @author vchella
*/
public class TunableConfig {
public int readRateLimit;
public int writeRateLimit;
public TunableConfig(IConfiguration configuration)
{
this.readRateLimit = configuration.getReadRateLimit();
this.writeRateLimit = configuration.getWriteRateLimit();
}
public int getReadRateLimit()
{
return readRateLimit;
}
public int getWriteRateLimit()
{
return writeRateLimit;
}
public TunableConfig setReadRateLimit(int readRateLimit) {
this.readRateLimit = readRateLimit;
return this;
}
public TunableConfig setWriteRateLimit(int writeRateLimit) {
this.writeRateLimit = writeRateLimit;
return this;
}
}
| 9,153 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/config/NdbenchConfigListener.java | /**
* Copyright (c) 2017 Netflix, Inc. All rights reserved.
*/
package com.netflix.ndbench.core.config;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.archaius.api.PropertyFactory;
import com.netflix.archaius.api.PropertyListener;
import com.netflix.ndbench.core.NdBenchDriver;
import static com.netflix.ndbench.api.plugin.common.NdBenchConstants.PROP_NAMESPACE;
/**
* @author vchella
*/
@Singleton
public class NdbenchConfigListener {
@Inject
public NdbenchConfigListener(PropertyFactory factory, NdBenchDriver ndBenchDriver)
{
factory.getProperty(PROP_NAMESPACE + "readRateLimit").asInteger(100).addListener(new PropertyListener<Integer>() {
@Override
public void onChange(Integer value) {
ndBenchDriver.onReadRateLimitChange();
}
@Override
public void onParseError(Throwable error) {
}
});
factory.getProperty(PROP_NAMESPACE + "writeRateLimit").asInteger(100).addListener(new PropertyListener<Integer>() {
@Override
public void onChange(Integer value) {
ndBenchDriver.onWriteRateLimitChange();
}
@Override
public void onParseError(Throwable error) {
}
});
}
}
| 9,154 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/config/IConfiguration.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.config;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
import com.netflix.archaius.api.annotations.PropertyName;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
/**
* @author vchella
*/
@Configuration(prefix = NdBenchConstants.PROP_NAMESPACE)
public interface IConfiguration {
void initialize();
// SAMPLE DATA CONFIG
@DefaultValue("1000")
int getNumKeys();
@DefaultValue("100")
int getNumValues();
@DefaultValue("128")
int getDataSize();
@DefaultValue("false")
boolean isPreloadKeys();
@DefaultValue("0.5")
double getZipfExponent();
// NUM WORKERS
default int getNumWriters() {
return Runtime.getRuntime().availableProcessors() * 4;
}
default int getNumReaders() {
return Runtime.getRuntime().availableProcessors() * 4;
}
//Backfill Settings
@DefaultValue("1")
//This configuration usually represents number of workers on your ndbench cluster working on backfill
int getBackfillKeySlots();
// TEST CASE CONFIG
@DefaultValue("true")
boolean isWriteEnabled();
@DefaultValue("true")
boolean isReadEnabled();
//Workers Config
@DefaultValue("5")
int getStatsUpdateFreqSeconds();
@DefaultValue("200")
int getStatsResetFreqSeconds();
//DataGenerator Configs
@DefaultValue("false")
boolean isUseVariableDataSize();
@DefaultValue("1000")
int getDataSizeLowerBound();
@DefaultValue("5000")
int getDataSizeUpperBound();
@DefaultValue("false")
boolean isGenerateChecksum();
@DefaultValue("false")
boolean isValidateChecksum();
//Tunable configs
@DefaultValue("100")
int getReadRateLimit();
@DefaultValue("100")
@PropertyName(name= NdBenchConstants.WRITE_RATE_LIMIT)
int getWriteRateLimit();
@DefaultValue("false")
boolean isAutoTuneEnabled();
@DefaultValue("60")
Integer getAutoTuneRampPeriodMillisecs();
@DefaultValue("1")
Integer getAutoTuneIncrementIntervalMillisecs();
@DefaultValue("1000")
Integer getAutoTuneFinalWriteRate();
/**
*
* Threshold write failure ratio beyond which no auto-tune increase will occur. By default if failure rate is
* grows larger than 1% auto tune triggered rate increases will cease.
*
*/
@DefaultValue("0.01")
Float getAutoTuneWriteFailureRatioThreshold();
/**
* Service config
*/
@DefaultValue("")
String getAllowedOrigins();
}
| 9,155 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/config/GuiceInjectorProvider.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.config;
import com.google.common.collect.Lists;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Module;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.ndbench.core.defaultimpl.NdBenchClientModule;
import com.netflix.ndbench.core.defaultimpl.NdBenchGuiceModule;
import java.util.Arrays;
import java.util.List;
public class GuiceInjectorProvider {
List<Module> getModuleList(AbstractModule... modules) {
List<Module> moduleList = Lists.newArrayList();
// Add default list of modules
moduleList.add(new NdBenchGuiceModule());
moduleList.add(new NdBenchClientModule());
moduleList.add(new ArchaiusModule()); //Archaius-2
// Add any additional caller specified modules
moduleList.addAll(Arrays.asList(modules));
return moduleList;
}
/**
* Creates an injector using modules obtained from the following sources: (1) the hard coded list of modules
* specified in the {@link GuiceInjectorProvider #getModulesList()} method of this class, (2) the 'modules'
* list passed as the first and only argument to this method
*
* @param modules - any additional Guice binding modules which will supplement the list of those added by default
*/
public Injector getInjector(AbstractModule ... modules) {
List<Module> moduleList = getModuleList(modules);
Injector injector = Guice.createInjector(moduleList);
injector.getInstance(IConfiguration.class).initialize();
return injector;
}
}
| 9,156 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/resources/NDBenchClusterResource.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.resources;
import com.netflix.ndbench.core.config.IConfiguration;
import com.netflix.ndbench.core.discovery.IClusterDiscovery;
import com.netflix.ndbench.core.util.RestUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.inject.Inject;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.*;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
/**
* @author vchella
*/
@Path("/ndbench/cluster")
public class NDBenchClusterResource {
private static final Logger logger = LoggerFactory.getLogger(NDBenchClusterResource.class);
private final IClusterDiscovery clusterManager;
private final IConfiguration config;
@Context
HttpServletRequest request;
@Inject
public NDBenchClusterResource(IClusterDiscovery clusterManager, IConfiguration config) {
this.clusterManager = clusterManager;
this.config = config;
}
@Path("/list")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response getApps() throws Exception {
logger.info("Getting cluster list");
try {
return sendJson(clusterManager.getApps());
} catch (Exception e) {
logger.error("Error getting Apps list from ClusterManager", e);
return sendErrorResponse("get cluster/list failed!");
}
}
@Path("/{appname}/list")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response getApps(@PathParam("appname") String appname) throws Exception {
logger.info("Getting nodes list for app: "+appname+", default Port used: "+ request.getServerPort());
try {
return sendJson(clusterManager.getEndpoints(appname, request.getServerPort()));
} catch (Exception e) {
logger.error("Error getting Host list from ClusterManager for app: "+appname, e);
return sendErrorResponse("get cluster host list failed!");
}
}
private Response sendErrorResponse(String errorMessage) {
return RestUtil.sendErrorResponse(errorMessage, this.config);
}
private <T> Response sendJson(T object) {
return RestUtil.sendJson(object, this.config);
}
}
| 9,157 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/resources/NdBenchResource.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.resources;
import com.netflix.ndbench.api.plugin.NdBenchAbstractClient;
import com.netflix.ndbench.api.plugin.NdBenchClient;
import com.netflix.ndbench.api.plugin.NdBenchMonitor;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPlugin;
import com.netflix.ndbench.core.DataBackfill;
import com.netflix.ndbench.core.NdBenchClientFactory;
import com.netflix.ndbench.core.NdBenchDriver;
import com.netflix.ndbench.core.config.IConfiguration;
import com.netflix.ndbench.core.generators.KeyGenerator;
import com.netflix.ndbench.core.util.LoadPattern;
import com.netflix.ndbench.core.util.RestUtil;
import com.sun.jersey.multipart.FormDataParam;
import groovy.lang.GroovyClassLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.inject.Inject;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import static com.netflix.ndbench.core.util.RestUtil.*;
/**
* @author vchella, pencal
*/
@Path("/ndbench/driver")
public class NdBenchResource {
private static final Logger logger = LoggerFactory.getLogger(NdBenchResource.class);
private final NdBenchClientFactory clientFactory;
private final NdBenchDriver ndBenchDriver;
private final DataBackfill dataBackfill;
private final IConfiguration config;
private final NdBenchMonitor ndBenchMonitor;
@Inject
public NdBenchResource(NdBenchClientFactory cFactory, NdBenchDriver ndBenchDriver,
DataBackfill dataBackfill, IConfiguration config, NdBenchMonitor ndBenchMonitor) {
this.clientFactory = cFactory;
this.ndBenchDriver = ndBenchDriver;
this.dataBackfill = dataBackfill;
this.config = config;
this.ndBenchMonitor = ndBenchMonitor;
}
@Path("/initfromscript")
@POST
@Consumes(MediaType.MULTIPART_FORM_DATA)
@Produces(MediaType.APPLICATION_JSON)
public Response initfromscript(@FormDataParam("dynamicplugin") String dynamicPlugin) throws Exception {
try {
GroovyClassLoader gcl = new GroovyClassLoader();
Class classFromScript = gcl.parseClass(dynamicPlugin);
Object objectFromScript = classFromScript.newInstance();
NdBenchClient client = (NdBenchClient) objectFromScript;
ndBenchDriver.init(client);
return sendSuccessResponse("NdBench client - dynamic plugin initiated with script!");
} catch (Exception e) {
logger.error("Error initializing dynamic plugin from script", e);
return sendErrorResponse("script initialization failed for dynamic plugin!", e);
}
}
@Path("/startDataFill")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response startDataFill() throws Exception {
logger.info("Starting NdBench data fill");
try {
NdBenchAbstractClient<?> client = ndBenchDriver.getClient();
dataBackfill.backfill(client);
return sendSuccessResponse("data fill done!");
} catch (Exception e) {
logger.error("Error starting datafill", e);
return sendErrorResponse("dataFill failed!", e);
}
}
@Path("/startDataFillAsync")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response startDataFillAsync() throws Exception {
logger.info("Starting NdBench data fill - Async");
try {
NdBenchAbstractClient<?> client = ndBenchDriver.getClient();
dataBackfill.backfillAsync(client);
return sendSuccessResponse( "Async data fill started !");
} catch (Exception e) {
logger.error("Error starting datafill", e);
return sendErrorResponse("Async dataFill failed to start!", e);
}
}
@Path("/startConditionalDataFill")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response conditionalBackfill() throws Exception {
logger.info("Starting NdBench data fill");
try {
NdBenchAbstractClient<?> client = ndBenchDriver.getClient();
dataBackfill.conditionalBackfill(client);
return sendSuccessResponse("data fill done!");
} catch (Exception e) {
logger.error("Error starting datafill", e);
return sendErrorResponse("dataFill failed!", e);
}
}
@Path("/startVerifyDataFill")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response verifyBackfill() throws Exception {
logger.info("Starting NdBench data fill");
try {
NdBenchAbstractClient<?> client = ndBenchDriver.getClient();
dataBackfill.verifyBackfill(client);
return sendSuccessResponse("data fill done!");
} catch (Exception e) {
logger.error("Error starting datafill", e);
return sendErrorResponse("dataFill failed!", e);
}
}
@Path("/stopDataFill")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response stopDataFill() throws Exception {
logger.info("Stop NdBench data fill");
try {
dataBackfill.stopBackfill();
return sendSuccessResponse("data fill stop!" );
} catch (Exception e) {
logger.error("Error stop datafill", e);
return sendErrorResponse("dataFill failed!", e);
}
}
@Path("/shutdownDataFill")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response shutdownDataFill() throws Exception {
logger.info("Shutdown NdBench data fill");
try {
dataBackfill.shutdown();
return sendSuccessResponse("data fill stop!" );
} catch (Exception e) {
logger.error("Error shutdown datafill", e);
return sendErrorResponse("dataFill failed!", e);
}
}
@Path("/init/{client}")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response init(@PathParam("client") String clientName) throws Exception {
try {
NdBenchAbstractClient<?> client = clientFactory.getClient(clientName);
ndBenchDriver.init(client);
return sendSuccessResponse("NdBench client initiated!");
} catch (Exception e) {
logger.error("Error initializing the client - "+clientName, e);
return sendErrorResponse("Client initialization failed!", e);
}
}
@Path("/start")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response start(@DefaultValue("random") @QueryParam("loadPattern") String loadPattern,
@DefaultValue("-1") @QueryParam("windowSize") int windowSize,
@DefaultValue("-1") @QueryParam("durationInSec") long durationInSec,
@DefaultValue("1") @QueryParam("bulkSize") int bulkSize) throws Exception {
try {
LoadPattern loadPatternType = LoadPattern.fromString(loadPattern);
Result validationResult = validateLoadPatternParams(loadPatternType, windowSize, durationInSec);
if (validationResult.isSuccess) {
ndBenchDriver.start(loadPatternType, windowSize, durationInSec, bulkSize);
logger.info("Starting NdBench test");
return sendSuccessResponse("NDBench test started");
} else {
return sendResult(validationResult);
}
} catch (Exception e) {
logger.error("Error starting NdBench test", e);
return sendErrorResponse("NdBench start failed! ", e);
}
}
@Path("/startReads")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response startReads(@DefaultValue("random") @QueryParam("loadPattern") String loadPattern,
@DefaultValue("-1") @QueryParam("windowSize") int windowSize,
@DefaultValue("-1") @QueryParam("durationInSec") long durationInSec,
@DefaultValue("1") @QueryParam("bulkSize") int bulkSize) throws Exception {
try {
LoadPattern loadPatternType = LoadPattern.fromString(loadPattern);
Result validationResult = validateLoadPatternParams(loadPatternType, windowSize, durationInSec);
if (validationResult.isSuccess) {
ndBenchDriver.startReads(loadPatternType, windowSize, durationInSec, bulkSize);
logger.info("Starting NdBench reads");
return sendSuccessResponse("NDBench reads started");
} else {
return sendResult(validationResult);
}
} catch (Exception e) {
logger.error("Error starting NdBench read test", e);
return sendErrorResponse("NdBench startReads failed! ", e);
}
}
@Path("/stopReads")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response stopReads() throws Exception {
logger.info("stopping NdBenchread test");
try {
ndBenchDriver.stopReads();
return sendSuccessResponse("NdBench reads stopped!");
} catch (Exception e) {
logger.error("Error stopping NdBench reads", e);
return sendErrorResponse("NdBench stopreads failed! ", e);
}
}
@Path("/startWrites")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response startWrites(@DefaultValue("random") @QueryParam("loadPattern") String loadPattern,
@DefaultValue("-1") @QueryParam("windowSize") int windowSize,
@DefaultValue("-1") @QueryParam("durationInSec") long durationInSec,
@DefaultValue("1") @QueryParam("bulkSize") int bulkSize) throws Exception {
try {
LoadPattern loadPatternType = LoadPattern.fromString(loadPattern);
Result validationResult = validateLoadPatternParams(loadPatternType, windowSize, durationInSec);
if (validationResult.isSuccess) {
ndBenchDriver.startWrites(loadPatternType, windowSize, durationInSec, bulkSize);
logger.info("Starting NdBench writes");
return sendSuccessResponse("NDBench writes started");
} else {
return sendResult(validationResult);
}
} catch (Exception e) {
logger.error("Error starting NdBench write test", e);
return sendErrorResponse("NdBench startWrites failed! ", e);
}
}
@Path("/stopWrites")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response stopWrites() throws Exception {
logger.info("stopping NdBenchwrite test");
try {
ndBenchDriver.stopWrites();
return sendSuccessResponse("NdBench writes stopped!");
} catch (Exception e) {
logger.error("Error stopping NdBench writes", e);
return sendErrorResponse("NdBench stopwrites failed! ", e);
}
}
@Path("/stop")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response stop() throws Exception {
logger.info("Stopping NdBench tests");
try {
ndBenchDriver.stop();
return sendSuccessResponse("NdBench test stopped!");
} catch (Exception e) {
logger.error("Error stopping NdBench test", e);
return sendErrorResponse("NdBench stop failed! ", e);
}
}
@Path("/readSingle/{key}")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response readSingle(@PathParam("key") String key) throws Exception {
try {
String value = ndBenchDriver.readSingle(key);
return sendSuccessResponse(value);
} catch (Exception e) {
return sendErrorResponse("NdBench readSingle failed! ", e);
}
}
@Path("/writeSingle/{key}")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response writeSingle(@PathParam("key") String key) throws Exception {
try {
String result = ndBenchDriver.writeSingle(key);
return sendSuccessResponse(result);
} catch (Exception e) {
logger.error("ERROR: " + e.getMessage());
return sendErrorResponse("NdBench writeSingle failed! ", e);
}
}
@Path("/stats")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response NdBenchStats() throws Exception {
try {
return sendJson(ndBenchMonitor);
} catch (Exception e) {
logger.error("Error getting NdBench stats", e);
return sendErrorResponse("NdBench status failed! ", e);
}
}
@Path("/getReadStatus")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response getReadStatus() throws Exception {
try {
if (ndBenchDriver.getIsReadRunning())
return sendSuccessResponse("Read process running");
else return sendSuccessResponse( "No Read process is running");
} catch (Exception e) {
logger.error("Error getting NdBench getReadStatus", e);
return sendErrorResponse("NdBench getReadStatus failed! ", e);
}
}
@Path("/getWriteStatus")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response getWriteStatus() throws Exception {
try {
if (ndBenchDriver.getIsWriteRunning())
return sendSuccessResponse("Writes process running");
else return sendSuccessResponse("No Write process is running");
} catch (Exception e) {
logger.error("Error getting NdBench getWriteStatus", e);
return sendErrorResponse("NdBench getWriteStatus failed! ", e);
}
}
@Path("/shutdownclient")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response shutdownClient() throws Exception {
try {
ndBenchDriver.stop();
ndBenchDriver.shutdownClient();
ndBenchMonitor.resetStats();
return sendSuccessResponse("NdBench client uninitialized");
} catch (Exception e) {
logger.error("Error shutting down NdBench client", e);
return sendErrorResponse("NdBench shutdownClient failed! ", e);
}
}
@Path("/getdrivers")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response getDrivers() throws Exception {
try {
return sendJson(clientFactory.getClientDrivers());
} catch (Exception e) {
logger.error("Error in getting Client drivers", e);
return sendErrorResponse("NdBench getDrivers failed! ", e);
}
}
@Path("/getserverstatus")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response getServerStatus() throws Exception {
try {
Map<String, Object> serverStatusJson = new HashMap<>();
serverStatusJson.put("ClientDrivers",clientFactory.getClientDrivers());
serverStatusJson.put("LoadPatterns", Arrays.asList(LoadPattern.values()));
String currentRunningDriver="NA",connectionInfo="NA", currentWriteLoadPattern="NA", currentReadLoadPattern="NA";
NdBenchAbstractClient<?> NdBenchClient= ndBenchDriver.getClient();
if(NdBenchClient!=null)
{
if(NdBenchClient.getClass().getAnnotation(NdBenchClientPlugin.class)!=null)
{
currentRunningDriver=NdBenchClient.getClass().getAnnotation(NdBenchClientPlugin.class).value();
}
else
{
currentRunningDriver=NdBenchClient.getClass().getSimpleName();
}
connectionInfo=NdBenchClient.getConnectionInfo();
}
KeyGenerator writeLoadPattern=ndBenchDriver.getWriteLoadPattern();
if(null!=writeLoadPattern)
{
currentWriteLoadPattern= writeLoadPattern.getClass().getSimpleName();
}
KeyGenerator readLoadPattern=ndBenchDriver.getReadLoadPattern();
if(null!=readLoadPattern)
{
currentReadLoadPattern= readLoadPattern.getClass().getSimpleName();
}
serverStatusJson.put("RunningDriver",currentRunningDriver);
serverStatusJson.put("RunningWriteLoadPattern",currentWriteLoadPattern);
serverStatusJson.put("RunningReadLoadPattern",currentReadLoadPattern);
serverStatusJson.put("ConnectionInfo",connectionInfo);
serverStatusJson.put("IsReadsRunning", ndBenchDriver.getIsReadRunning());
serverStatusJson.put("IsWritesRunning", ndBenchDriver.getIsWriteRunning());
serverStatusJson.put("Stats",ndBenchMonitor);
serverStatusJson.put("DriverConfig",config);
serverStatusJson.put("IsBackfillRunning",dataBackfill.getIsBackfillRunning());
return sendJson(serverStatusJson);
} catch (Exception e) {
logger.error("Error in getting getServerStatus", e);
return sendErrorResponse("NdBench getServerStatus failed! ", e);
}
}
@Path("/runworkflow")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response runWorkflow() throws Exception {
try {
NdBenchAbstractClient<?> client = ndBenchDriver.getClient();
return sendSuccessResponse(client.runWorkFlow());
} catch (Exception e) {
logger.error("Error in running workflow", e);
return sendErrorResponse("NdBench runworkflow failed! ", e);
}
}
private Result validateLoadPatternParams(LoadPattern loadPattern, long windowSize, long durationInSec)
{
String returnMsg = "Input validation Failure:";
if(loadPattern==null)
{
returnMsg+="loadpattern parameter is not available";
logger.error(returnMsg);
return new ErrorResponse(returnMsg);
}
if(loadPattern.equals(LoadPattern.SLIDING_WINDOW) && (windowSize < 1 || durationInSec < 1)) {
returnMsg += "WindowSize and DurationInSeconds can not be less than 1, provided: windowSize: "+windowSize+", durationInSec: "+durationInSec;
logger.error(returnMsg);
return new ErrorResponse(returnMsg);
}
return new SuccessResponse("");
}
private Response sendSuccessResponse(String returnMessage) {
return RestUtil.sendSuccessResponse(returnMessage, this.config);
}
private Response sendErrorResponse(String errorMessage, Exception e) {
return RestUtil.sendErrorResponse(errorMessage, e, this.config);
}
private Response sendResult(RestUtil.Result result) {
return RestUtil.sendResult(result, this.config);
}
private <T> Response sendJson(T object) {
return RestUtil.sendJson(object, this.config);
}
} | 9,158 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/resources/NDBenchConfigResource.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.resources;
import com.netflix.archaius.api.config.SettableConfig;
import com.netflix.archaius.api.inject.RuntimeLayer;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
import com.netflix.ndbench.core.config.IConfiguration;
import com.netflix.ndbench.core.config.TunableConfig;
import com.netflix.ndbench.core.util.RestUtil;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.inject.Inject;
import javax.ws.rs.*;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import java.util.Map;
/**
* @author vchella
*/
@Path("/ndbench/config")
public class NDBenchConfigResource {
private static final Logger logger = LoggerFactory.getLogger(NDBenchConfigResource.class);
private final IConfiguration config;
private final SettableConfig settableConfig;
@Inject
public NDBenchConfigResource(IConfiguration config,
@RuntimeLayer SettableConfig settableConfig
) {
this.config = config;
this.settableConfig = settableConfig;
}
@Path("/list")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response getConfigOptions() throws Exception {
logger.info("Getting Configuration list");
try {
return sendJson(config);
} catch (Exception e) {
logger.error("Error getting Configuration", e);
return sendErrorResponse("get config/list failed!");
}
}
@Path("/set")
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response setConfigOptions(Map<String,String> propsToSet) throws Exception {
logger.info("Setting Configuration list");
try {
for (Map.Entry<String,String> entry: propsToSet.entrySet())
{
if (entry.getKey()!=null && !entry.getKey().isEmpty()
&& entry.getValue()!=null && !entry.getValue().isEmpty()) {
settableConfig.setProperty(NdBenchConstants.PROP_NAMESPACE + entry.getKey(), entry.getValue());
}
}
return sendSuccessResponse("Properties have been applied");
} catch (Exception e) {
logger.error("Error setting Configuration", e);
return sendErrorResponse("get config/set failed!");
}
}
@Path("/set")
@OPTIONS
public Response setConfigOptionsPreflight() throws Exception {
return sendSuccessResponse("OK");
}
@Path("/tunable/list")
@GET
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response getTunableOptions() throws Exception {
logger.info("Getting Tunable Configuration list");
try {
TunableConfig tunableConfig = new TunableConfig(config);
return sendJson(tunableConfig);
} catch (Exception e) {
logger.error("Error getting Configuration", e);
return sendErrorResponse("get config/list failed!");
}
}
@Path("/tunable/set")
@POST
@Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON)
public Response setTunableOptions(Map<String,String> propsToSet) throws Exception {
logger.info("Setting Tunable Configuration list");
try {
for (Map.Entry<String,String> entry: propsToSet.entrySet())
{
if (entry.getKey()!=null && !entry.getKey().isEmpty()
&& entry.getValue()!=null && !entry.getValue().isEmpty()) {
settableConfig.setProperty(NdBenchConstants.PROP_NAMESPACE +entry.getKey(), entry.getValue());
}
}
return sendSuccessResponse("Tunable Properties have been applied");
} catch (Exception e) {
logger.error("Error setting Tunable Configuration", e);
return sendErrorResponse("get config/tunable/set failed!");
}
}
@Path("/tunable/set")
@OPTIONS
public Response setTunableOptionsPreflight() throws Exception {
return sendSuccessResponse("OK");
}
private Response sendSuccessResponse(String returnMessage) {
return RestUtil.sendSuccessResponse(returnMessage, this.config);
}
private Response sendErrorResponse(String errorMessage) {
return RestUtil.sendErrorResponse(errorMessage, this.config);
}
private <T> Response sendJson(T object) {
return RestUtil.sendJson(object, this.config);
}
}
| 9,159 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/discovery/LocalClusterDiscovery.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.discovery;
import java.net.Inet4Address;
import java.util.Arrays;
import java.util.List;
/**
* @author vchella
*/
public class LocalClusterDiscovery implements IClusterDiscovery {
@Override
public List<String> getApps() {
return Arrays.asList(getLocalhostName());
}
private String getLocalhostName() {
return Inet4Address.getLoopbackAddress().getHostAddress();
}
@Override
public List<String> getEndpoints(String appName, int defaultPort)
{
return Arrays.asList(getLocalhostName() + ":" + defaultPort);
}
}
| 9,160 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/discovery/IClusterDiscovery.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.discovery;
import java.util.List;
/**
* @author vchella
*/
public interface IClusterDiscovery {
List<String> getApps();
List<String> getEndpoints(String appName, int defaultPort);
}
| 9,161 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/discovery/CfClusterDiscovery.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.discovery;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* @author Pulkit Chandra
*/
public class CfClusterDiscovery implements IClusterDiscovery {
private static final Logger logger = LoggerFactory.getLogger(CfClusterDiscovery.class.getName());
@Override
public List<String> getApps() {
return Arrays.asList(getVmRouteName());
}
private String getVmRouteName() {
String vcap_application = System.getenv("VCAP_APPLICATION");
ObjectMapper mapper = new ObjectMapper();
Map<String, List<String>> vcap_map = new HashMap<>();
try {
vcap_map = mapper.readValue(vcap_application.getBytes(), HashMap.class);
} catch (IOException e) {
logger.error("Exception while reading vcap_application to Map" + e);
}
List<String> uris = vcap_map.get("uris");
return uris.get(0);
}
@Override
public List<String> getEndpoints(String appName, int defaultPort) {
return Arrays.asList(getVmRouteName());
}
}
| 9,162 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/discovery/AWSUtil.java | /**
* Copyright (c) 2018 Netflix, Inc. All rights reserved.
*/
package com.netflix.ndbench.core.discovery;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
import java.net.URL;
/**
* @author vchella
*/
public class AWSUtil {
private static final Logger logger = LoggerFactory.getLogger(LocalClusterDiscovery.class.getName());
public static String getLocalhostName() {
String urlPublic = "http://169.254.169.254/latest/meta-data/public-hostname";
String urlLocal = "http://169.254.169.254/latest/meta-data/local-hostname";
try {
return parseAwsMetadataByURL(urlPublic);
}
catch (Exception e) {
logger.error("Unable to get the public hostname name. Trying local...",e);
return parseAwsMetadataByURL(urlLocal);
}
}
public static String getLocalInstanceId() {
String instanceId = "http://169.254.169.254/latest/meta-data/instance-id";
try {
return parseAwsMetadataByURL(instanceId);
}
catch (Exception e) {
logger.error("Unable to get the public hostname name. Trying local...",e);
}
return null;
}
private static String parseAwsMetadataByURL(String urlPublic){
BufferedReader in = null;
try{
HttpURLConnection con = (HttpURLConnection) new URL(urlPublic).openConnection();
con.setRequestMethod("GET");
in = new BufferedReader(new InputStreamReader(con.getInputStream()));
String inputLine;
StringBuilder response = new StringBuilder();
while ((inputLine = in.readLine()) != null) {
response.append(inputLine);
}
return response.toString().trim();
}catch(Exception e){
throw new RuntimeException(e);
}finally{
try {
in.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
| 9,163 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/discovery/AwsAsgDiscovery.java | /**
* Copyright (c) 2018 Netflix, Inc. All rights reserved.
*/
package com.netflix.ndbench.core.discovery;
import com.amazonaws.services.autoscaling.AmazonAutoScaling;
import com.amazonaws.services.autoscaling.AmazonAutoScalingClientBuilder;
import com.amazonaws.services.autoscaling.model.*;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2ClientBuilder;
import com.amazonaws.services.ec2.model.DescribeInstancesRequest;
import com.amazonaws.services.ec2.model.DescribeInstancesResult;
import com.amazonaws.services.ec2.model.Instance;
import com.google.common.collect.Lists;
import com.google.inject.Inject;
import com.netflix.ndbench.core.config.IConfiguration;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.stream.Collectors;
/**
* AWSAsgDiscovery assumes you have enough permissions to run autoscaling:DescribeAutoScalingInstances and
* describeInstances request on AWS.
*
* This class also assumes that NdBench is deployed in an ASG.
*
* <b>Important:</b> Be sure to fill in your AWS access credentials in
* ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows
* users) before you try to run this sample.
* @author vchella
*/
public class AwsAsgDiscovery implements IClusterDiscovery {
private static final Logger logger = LoggerFactory.getLogger(LocalClusterDiscovery.class.getName());
IConfiguration config;
@Inject
public AwsAsgDiscovery(IConfiguration configuration)
{
this.config = configuration;
}
@Override
public List<String> getApps() {
return Arrays.asList(getCurrentAsgName());
}
@Override
public List<String> getEndpoints(String appName, int defaultPort) {
return getRacMembership().stream().map(s -> s+":"+defaultPort).collect(Collectors.toList());
}
public List<String> getRacMembership()
{
/*
* Create your credentials file at ~/.aws/credentials (C:\Users\USER_NAME\.aws\credentials for Windows users)
* and save the following lines after replacing the underlined values with your own.
*
* [default]
* aws_access_key_id = YOUR_ACCESS_KEY_ID
* aws_secret_access_key = YOUR_SECRET_ACCESS_KEY
*/
AmazonAutoScaling client = null;
AmazonEC2 ec2Client = null;
try
{
client = getAutoScalingClient();
ec2Client = AmazonEC2ClientBuilder.standard().build();
String myAsgName = getCurrentAsgName();
DescribeAutoScalingGroupsRequest asgReq = new DescribeAutoScalingGroupsRequest().withAutoScalingGroupNames(myAsgName);
DescribeAutoScalingGroupsResult res = client.describeAutoScalingGroups(asgReq);
List<String> instanceIds = Lists.newArrayList();
for (AutoScalingGroup asg : res.getAutoScalingGroups())
{
for (com.amazonaws.services.autoscaling.model.Instance ins : asg.getInstances())
if (!(ins.getLifecycleState().equalsIgnoreCase("Terminating") || ins.getLifecycleState().equalsIgnoreCase("shutting-down") || ins.getLifecycleState()
.equalsIgnoreCase("Terminated")))
instanceIds.add(ins.getInstanceId());
}
logger.info(String.format("Querying Amazon returned following instance in the ASG: %s --> %s", myAsgName, StringUtils.join(instanceIds, ",")));
DescribeInstancesRequest insReq = new DescribeInstancesRequest().withInstanceIds(instanceIds);
DescribeInstancesResult insRes = ec2Client.describeInstances(insReq);
return insRes.getReservations().stream()
.flatMap(r -> r.getInstances().stream())
.map(Instance::getPublicDnsName).distinct().collect(Collectors.toList());
}
catch (Exception e)
{
logger.error("Exception in getting private IPs from current ASG",e);
return Collections.emptyList();
}
finally
{
if (client != null)
client.shutdown();
if(ec2Client !=null)
ec2Client.shutdown();
}
}
private String getCurrentAsgName()
{
DescribeAutoScalingInstancesRequest asgInsReq = new DescribeAutoScalingInstancesRequest()
.withInstanceIds(AWSUtil.getLocalInstanceId());
DescribeAutoScalingInstancesResult asgInsRes = getAutoScalingClient().describeAutoScalingInstances(asgInsReq);
String myAsgName = asgInsRes.getAutoScalingInstances().get(0).getAutoScalingGroupName();
return myAsgName!=null && myAsgName.length() > 0 ? myAsgName : "NdBench_Aws_cluster";
}
protected AmazonAutoScaling getAutoScalingClient() {
AmazonAutoScaling client = AmazonAutoScalingClientBuilder.standard().build();
return client;
}
}
| 9,164 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/discovery/ConfigFileDiscovery.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.discovery;
import com.google.common.collect.Maps;
import com.google.gson.JsonArray;
import com.google.gson.JsonElement;
import com.google.gson.JsonParser;
import com.google.inject.Singleton;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.ByteArrayOutputStream;
import java.io.InputStream;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
/**
* @author vchella
*/
@Singleton
public class ConfigFileDiscovery implements IClusterDiscovery {
private final Map<String, List<String>> clusterMap = Maps.newConcurrentMap();
private static final Logger logger = LoggerFactory.getLogger(ConfigFileDiscovery.class.getName());
public ConfigFileDiscovery()
{
InputStream in = this.getClass().getClassLoader().getResourceAsStream(NdBenchConstants.CONFIG_CLUSTER_DISCOVERY_NAME);
String strClusterData = streamToString(in);
JsonParser parser = new JsonParser();
JsonElement jsonElement = parser.parse(strClusterData);
if(jsonElement!=null)
{
for (Map.Entry<String, JsonElement> entry: jsonElement.getAsJsonObject().entrySet())
{
if(!entry.getKey().isEmpty()) {
List<String> lstEndpoints = new LinkedList<>();
JsonArray jsonArray = entry.getValue().getAsJsonArray();
for (JsonElement ele : jsonArray) {
if(!ele.getAsString().isEmpty())
{
lstEndpoints.add(ele.getAsString());
}
}
clusterMap.put(entry.getKey(), lstEndpoints);
}
}
}
}
@Override
public List<String> getApps() {
LinkedList<String> returnLst = new LinkedList<>();
returnLst.addAll(clusterMap.keySet());
return returnLst;
}
@Override
public List<String> getEndpoints(String appName, int defaultPort) {
return clusterMap.get(appName);
}
private String streamToString(InputStream inputStream) {
String returnStr = null;
try {
ByteArrayOutputStream result = new ByteArrayOutputStream();
byte[] buffer = new byte[1024];
int length;
while ((length = inputStream.read(buffer)) != -1) {
result.write(buffer, 0, length);
}
returnStr = result.toString("UTF-8");
} catch (Exception e) {
logger.error(String.format("Exception while loading %s file for cluster discovery", NdBenchConstants.CONFIG_CLUSTER_DISCOVERY_NAME), e);
}
return returnStr;
}
}
| 9,165 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/discovery/AWSLocalClusterDiscovery.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ndbench.core.discovery;
import java.util.Arrays;
import java.util.List;
/**
* This class does Cluster discovery at AWS VPC cloud env. <BR>
* First try to resolve the public-hostname if present otherwise it gets the local-hostname IP address.
*
* @author diegopacheco
* @since 10/20/2016
*
*/
public class AWSLocalClusterDiscovery implements IClusterDiscovery {
@Override
public List<String> getApps() {
return Arrays.asList(AWSUtil.getLocalhostName());
}
@Override
public List<String> getEndpoints(String appName, int defaultPort) {
return Arrays.asList(AWSUtil.getLocalhostName() + ":" + defaultPort);
}
}
| 9,166 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/operations/ReadOperation.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.operations;
import com.google.common.util.concurrent.RateLimiter;
import com.netflix.ndbench.api.plugin.NdBenchAbstractClient;
import com.netflix.ndbench.api.plugin.NdBenchMonitor;
import com.netflix.ndbench.core.NdBenchDriver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
/**
* @author vchella
*/
public class ReadOperation implements NdBenchDriver.NdBenchOperation {
private static final Logger logger = LoggerFactory.getLogger(ReadOperation.class);
private final NdBenchAbstractClient<?> client;
public ReadOperation(NdBenchAbstractClient<?> pClient) {
client = pClient;
}
@Override
public boolean process(NdBenchDriver driver,
NdBenchMonitor monitor,
List<String> keys,
AtomicReference<RateLimiter> ignoredForNow,
boolean isAutoTuneEnabled) {
try {
if (keys.size() > 1) {
//Bulk requests
List<String> values = new ArrayList<>(keys.size());
Long startTime = System.nanoTime();
values.addAll(client.readBulk(keys));
monitor.recordReadLatency((System.nanoTime() - startTime) / 1000);
for (String value : values) {
processCacheStats(value, monitor);
}
} else {
//Single requests
Long startTime = System.nanoTime();
String value = client.readSingle(keys.get(0));
monitor.recordReadLatency((System.nanoTime() - startTime) / 1000);
processCacheStats(value, monitor);
}
monitor.incReadSuccess();
return true;
} catch (Exception e) {
if (driver.getIsReadRunning()) {
monitor.incReadFailure();
logger.error("Failed to process NdBench read operation", e);
} else {
logger.warn("Caught exception while stopping reads: " + e.getMessage());
}
return false;
}
}
private void processCacheStats(String value, NdBenchMonitor monitor)
{
if (value != null) {
monitor.incCacheHit();
} else {
monitor.incCacheMiss();
}
}
@Override
public boolean isReadType() {
return true;
}
@Override
public boolean isWriteType() {
return false;
}
} | 9,167 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/operations/WriteOperation.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.operations;
import com.google.common.util.concurrent.RateLimiter;
import com.netflix.ndbench.api.plugin.NdBenchAbstractClient;
import com.netflix.ndbench.api.plugin.NdBenchMonitor;
import com.netflix.ndbench.core.NdBenchDriver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
/**
* Operation to write given the bulk size
*
* @author vchella, pencal
*/
public class WriteOperation<W> implements NdBenchDriver.NdBenchOperation {
private static final Logger logger = LoggerFactory.getLogger(WriteOperation.class);
private final NdBenchAbstractClient<W> client;
public WriteOperation(NdBenchAbstractClient<W> pClient) {
this.client = pClient;
}
@Override
public boolean process(NdBenchDriver driver,
NdBenchMonitor stats,
List<String> keys,
AtomicReference<RateLimiter> rateLimiter,
boolean isAutoTuneEnabled) {
try {
Long startTime = System.nanoTime();
List<W> result;
if (keys.size() > 1) {
// bulk
result = client.writeBulk(keys);
} else {
// single
result = new ArrayList<>(1);
result.add(client.writeSingle(keys.get(0)));
}
stats.recordWriteLatency((System.nanoTime() - startTime)/1000);
if (isAutoTuneEnabled) {
Double newRateLimit;
double currentRate = rateLimiter.get().getRate();
if ((newRateLimit = client.autoTuneWriteRateLimit(currentRate, result, stats)) > 0
&& newRateLimit != currentRate) {
driver.updateWriteRateLimit(newRateLimit);
}
}
stats.incWriteSuccess();
return true;
} catch (Exception e) {
if (driver.getIsWriteRunning()) {
stats.incWriteFailure();
logger.error("Failed to process NdBench write operation", e);
} else {
logger.warn("Caught exception while stopping writes: " + e.getMessage());
}
return false;
}
}
@Override
public boolean isReadType() {
return false;
}
@Override
public boolean isWriteType() {
return true;
}
} | 9,168 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/defaultimpl/NdBenchClientModule.java | /**
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ndbench.core.defaultimpl;
import com.google.inject.AbstractModule;
import com.google.inject.TypeLiteral;
import com.google.inject.multibindings.MapBinder;
import com.netflix.ndbench.api.plugin.NdBenchAbstractClient;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPlugin;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPluginGuiceModule;
import org.reflections.Reflections;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Set;
/**
* Uses reflection to discover all NdBench client plugins which (a) reside within the package namespace
* "com.netflix.ndbench", and (b) are annotated with {@link com.netflix.ndbench.core.defaultimpl.NdBenchClientModule}.
* The implementing class of each thusly discovered client plugin and the plugin's name (extracted as the
* parameter to each annotation) are used as entries in a map that enables the plugin's class to be looked up by name.
* <p>
* This class uses similar reflection-based discovery to find all Guice modules required by client plugins.
* Any plugin client which needs Guice bindings only needs to annotate its Guice module with
* {@link com.netflix.ndbench.api.plugin.annotations.NdBenchClientPluginGuiceModule}, and that module will be
* auto-installed by this class.
*/
public class NdBenchClientModule extends AbstractModule {
private static final Logger logger = LoggerFactory.getLogger(NdBenchClientModule.class);
private MapBinder<String, NdBenchAbstractClient<?>> maps;
private String getAnnotationValue(Class<?> ndBenchClientImpl) {
String name = ndBenchClientImpl.getName();
try {
NdBenchClientPlugin annot = ndBenchClientImpl.getAnnotation(NdBenchClientPlugin.class);
name = annot.value();
logger.info("Installing NdBenchClientPlugin: " + ndBenchClientImpl.getName() + " with Annotation: " + name);
} catch (Exception e) {
logger.warn("No Annotation found for class :" + name + ", so loading default class name");
}
return name;
}
private <T> void installNdBenchClientPlugin(Class<?> ndBenchClientImpl) {
if (maps == null) {
TypeLiteral<String> stringTypeLiteral = new TypeLiteral<String>() {
};
TypeLiteral<NdBenchAbstractClient<?>> ndbClientTypeLiteral = (new TypeLiteral<NdBenchAbstractClient<?>>() {
});
maps = MapBinder.newMapBinder(binder(), stringTypeLiteral, ndbClientTypeLiteral);
}
String name = getAnnotationValue(ndBenchClientImpl);
maps.addBinding(name).to((Class<? extends NdBenchAbstractClient<?>>) ndBenchClientImpl);
}
@Override
protected void configure() {
//Get all implementations of NdBenchClient Interface and install them as Plugins
Reflections reflections = new Reflections("com.netflix.ndbench.plugin");
final Set<Class<?>> classes = reflections.getTypesAnnotatedWith(NdBenchClientPlugin.class);
for (Class<?> ndb : classes) {
installNdBenchClientPlugin(ndb);
}
installGuiceBindingsRequiredByClientPlugins();
}
private void installGuiceBindingsRequiredByClientPlugins() {
// Discover guice binding modules for ndbench client plugins, and add them to list
Reflections reflections = new Reflections("com.netflix.ndbench.plugin");
final Set<Class<?>> classes = reflections.getTypesAnnotatedWith(NdBenchClientPluginGuiceModule.class);
for (Class<?> ndb : classes) {
AbstractModule e = instantiateGuiceModule(ndb);
install(e);
}
}
private AbstractModule instantiateGuiceModule(Class moduleClass) {
logger.info("adding ndbench client plugin guice module: {}", moduleClass.getCanonicalName());
Object object;
try {
object = moduleClass.newInstance();
} catch (InstantiationException | IllegalAccessException e) {
throw new RuntimeException(
"Failed to invoke no argument constructor of Guice binding module class " +
moduleClass.getCanonicalName());
}
return (AbstractModule) object;
}
}
| 9,169 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/defaultimpl/NdBenchGuiceModule.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.defaultimpl;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.ndbench.api.plugin.DataGenerator;
import com.netflix.ndbench.api.plugin.NdBenchMonitor;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
import com.netflix.ndbench.core.config.IConfiguration;
import com.netflix.ndbench.core.config.NdbenchConfigListener;
import com.netflix.ndbench.core.discovery.*;
import com.netflix.ndbench.core.generators.DefaultDataGenerator;
import com.netflix.ndbench.core.monitoring.FakeMonitor;
import com.netflix.ndbench.core.monitoring.NdBenchDefaultMonitor;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This module defines the default bindings, this can be customized internally if one has specific services for Service discovery, metrics etc.,
* @author vchella
*/
public class NdBenchGuiceModule extends AbstractModule {
private static final Logger logger = LoggerFactory.getLogger(NdBenchGuiceModule.class);
@Override
protected void configure() {
bind(NdBenchMonitor.class).to(NdBenchDefaultMonitor.class);
String discoveryEnv = System.getenv(NdBenchConstants.DISCOVERY_ENV);
logger.info("DISCOVERY_ENV is set to: " + discoveryEnv);
if (discoveryEnv != null && discoveryEnv.equals(NdBenchConstants.DISCOVERY_ENV_CF)) {
bind(IClusterDiscovery.class).to(CfClusterDiscovery.class);
} else if (discoveryEnv != null && discoveryEnv.equals(NdBenchConstants.DISCOVERY_ENV_AWS)) {
bind(IClusterDiscovery.class).to(AWSLocalClusterDiscovery.class);
} else if (discoveryEnv != null && discoveryEnv.equals(NdBenchConstants.DISCOVERY_ENV_AWS_ASG)) {
bind(IClusterDiscovery.class).to(AwsAsgDiscovery.class);
} else if (discoveryEnv != null && discoveryEnv.equals(NdBenchConstants.DISCOVERY_ENV_AWS_CONFIG_FILE)) {
bind(IClusterDiscovery.class).to(ConfigFileDiscovery.class);
}
else {
bind(IClusterDiscovery.class).to(LocalClusterDiscovery.class);
}
bind(DataGenerator.class).to(DefaultDataGenerator.class);
bind(NdbenchConfigListener.class).asEagerSingleton();
}
@Provides
IConfiguration getIConfiguration(ConfigProxyFactory proxyFactory) {
return proxyFactory.newProxy(IConfiguration.class);
}
}
| 9,170 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/generators/KeyGenerator.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.generators;
/**
* @author vchella
*/
public interface KeyGenerator<T> {
void init();
T getNextKey();
boolean hasNextKey();
boolean isPreLoadKeys();
int getNumKeys();
}
| 9,171 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/generators/RandomStringKeyGenerator.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.generators;
import java.util.Random;
/**
* @author vchella
*/
public class RandomStringKeyGenerator extends StringKeyGenerator {
private final Random kRandom = new Random();
public RandomStringKeyGenerator(boolean preLoadKeys, int numKeys) {
super(numKeys, preLoadKeys);
}
@Override
public String getNextKey() {
int randomKeyIndex = kRandom.nextInt(numKeys);
if (isPreLoadKeys()) {
return keys.get(randomKeyIndex);
} else {
return "T" + randomKeyIndex;
}
}
}
| 9,172 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/generators/DefaultDataGenerator.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.generators;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.ndbench.api.plugin.DataGenerator;
import com.netflix.ndbench.core.config.IConfiguration;
import com.netflix.ndbench.core.util.CheckSumUtil;
import org.joda.time.DateTime;
/**
* @author vchella
*/
@Singleton
public class DefaultDataGenerator implements DataGenerator
{
private static Logger logger = LoggerFactory.getLogger(DefaultDataGenerator.class);
protected final IConfiguration config;
private final List<String> values = new ArrayList<>();
private final Random vRandom = new Random();
private final Random vvRandom = new Random(DateTime.now().getMillis()); // variable value random
@Inject
public DefaultDataGenerator(IConfiguration config)
{
this.config = config;
initialize();
//Schedule a task to upsert/ modify random entries from the pre generated values
ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
logger.info("Scheduling a thread to modify random values from generated values data set");
executor.scheduleAtFixedRate(this::upsertRandomString, 10, 10, TimeUnit.MILLISECONDS);
}
@Override
public String getRandomValue()
{
int randomValueIndex = vRandom.nextInt(config.getNumValues());
return values.get(randomValueIndex);
}
@Override
public Integer getRandomInteger()
{
return vRandom.nextInt();
}
@Override
public Integer getRandomIntegerValue()
{
return vRandom.nextInt(config.getNumValues());
}
@Override
public String getRandomString()
{
return generateRandomString(getValueSize());
}
private void initialize()
{
Instant start = Instant.now();
for (int i = 0; i < config.getNumValues(); i++)
{
if (i % 1000 == 0)
{
logger.info("Still initializing sample data for values. So far: " + i + " /" + config.getNumValues());
}
values.add(generateRandomString(getValueSize()));
}
Instant end = Instant.now();
logger.info("Duration to initialize the dataset of random data (ISO-8601 format): " + Duration.between(start, end));
}
private int getValueSize()
{
if (config.isUseVariableDataSize())
{
return vvRandom.nextInt(
Math.abs(config.getDataSizeUpperBound() - config.getDataSizeLowerBound()))
+ config.getDataSizeLowerBound();
}
return config.getDataSize();
}
private void upsertRandomString()
{
values.set(vRandom.nextInt(config.getNumValues()), generateRandomString(getValueSize()));
}
private String generateRandomString(int length)
{
StringBuilder builder = new StringBuilder();
while (builder.length() < length)
{
builder.append(Long.toHexString(vRandom.nextLong()));
}
String randomString = builder.toString().substring(0, length);
return config.isGenerateChecksum() ? CheckSumUtil.appendCheckSumAndEncodeBase64(randomString, false) : randomString;
}
}
| 9,173 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/generators/SlidingWindowStringKeyGenerator.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.generators;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Random;
/**
* @author vchella
*/
public class SlidingWindowStringKeyGenerator extends StringKeyGenerator {
private static Logger logger = LoggerFactory.getLogger(SlidingWindowStringKeyGenerator.class);
private final int windowSize;
private final long testDurationInSeconds;
private final Random kRandom = new Random();
private long startTime;
private long endTime;
public SlidingWindowStringKeyGenerator(int windowSize, long testDurationInSeconds, boolean preLoadKeys, int numKeys)
{
super(numKeys, preLoadKeys);
logger.info("Initialized SlidingWindowKeyGenerator with WindowSize: "+windowSize+", Test Duration (Secs): "+testDurationInSeconds+", NumKeys: "+numKeys);
this.windowSize = windowSize;
this.testDurationInSeconds = testDurationInSeconds;
}
@Override
public void init() {
super.init();
startTime = System.currentTimeMillis();
endTime = startTime + (testDurationInSeconds*1000);
}
@Override
public String getNextKey() {
int min = getCurrentRecord();
int max = min + this.windowSize;
int nextKey = randomnum(min, max);
logger.debug("NumKeys: "+numKeys+" | CurrentKeySet: [" +min +" - " +max+"] | getNextKey(): "+nextKey);
return "T"+nextKey;
}
@Override
public boolean hasNextKey() {
long currentTime = System.currentTimeMillis();
if ( endTime < currentTime ) {
logger.info("No more keys to process since endtime :"+endTime+" < currentTime: "+currentTime);
return false;
}
return true;
}
private int randomnum(int minNum, int maxNum) {
return kRandom.nextInt(maxNum - minNum) + minNum;
}
/*
Gets the currentWindow, Window number starts from 0.
*/
private int getCurrentRecord()
{
//Get the current time
long currentTime = System.currentTimeMillis();
//How far along has the test run?
long currentDuration=currentTime-startTime;
//How far along the test are we?
double currentRelativePosition=(currentDuration/1000d)/testDurationInSeconds;
//determine the position of the test window
double currentRecordRaw=currentRelativePosition*(numKeys-windowSize);
Long currentRecord=Math.round(currentRecordRaw);
return currentRecord.intValue();
}
}
| 9,174 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/generators/KeyGeneratorFactory.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.generators;
import com.netflix.ndbench.core.util.LoadPattern;
import org.slf4j.LoggerFactory;
/**
* @author vchella
*/
public class KeyGeneratorFactory {
private static final org.slf4j.Logger Logger = LoggerFactory.getLogger(KeyGeneratorFactory.class);
public KeyGenerator<String> getKeyGenerator(LoadPattern loadPattern, int numKeys, int windowSize, long durationInSec, boolean preLoadKeys, double zipfExponent) {
Logger.info("Loading "+loadPattern.toString()+" KeyGenerator");
if (loadPattern.equals(LoadPattern.SLIDING_WINDOW)) {
return new SlidingWindowStringKeyGenerator(windowSize, durationInSec, preLoadKeys, numKeys);
}
else if (loadPattern.equals(LoadPattern.SLIDING_WINDOW_FLIP)) {
return new SlidingWindowFlipStringKeyGenerator(windowSize, durationInSec, preLoadKeys, numKeys);
} else if (loadPattern.equals(LoadPattern.ZIPFIAN)) {
return new ZipfianStringKeyGenerator(preLoadKeys, numKeys, zipfExponent);
} else {
return new RandomStringKeyGenerator(preLoadKeys, numKeys);
}
}
}
| 9,175 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/generators/StringKeyGenerator.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.generators;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
public abstract class StringKeyGenerator implements KeyGenerator<String> {
private static Logger logger = LoggerFactory.getLogger(StringKeyGenerator.class);
protected final List<String> keys;
protected final int numKeys;
private final boolean preloadKeys;
protected StringKeyGenerator(int numKeys, boolean preloadKeys) {
this.numKeys = numKeys;
this.preloadKeys = preloadKeys;
this.keys = preloadKeys ? new ArrayList<>(numKeys) : new ArrayList<>();
}
@Override
public void init() {
if (this.isPreLoadKeys()) {
logger.info("Preloading " + numKeys + " keys");
for (int i = 0; i < getNumKeys(); i++) {
if (i % 10000 == 0)
logger.info("Still initializing sample data for Keys. So far: "+ i+" /"+numKeys);
keys.add("T" + i);
}
logger.info("Preloaded " + numKeys + " keys");
}
}
@Override
public boolean isPreLoadKeys() {
return preloadKeys;
}
@Override
public int getNumKeys() {
return numKeys;
}
@Override
public boolean hasNextKey() {
return true;
}
}
| 9,176 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/generators/SlidingWindowFlipStringKeyGenerator.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.generators;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Random;
/**
* @author vchella
*/
public class SlidingWindowFlipStringKeyGenerator extends StringKeyGenerator {
private static Logger logger = LoggerFactory.getLogger(SlidingWindowFlipStringKeyGenerator.class);
private final int windowSize;
private final long windowDurationInMs;
private final Random kRandom = new Random();
private long startTime;
public SlidingWindowFlipStringKeyGenerator(int windowSize, long windowDurationInMs, boolean preLoadKeys, int numKeys)
{
super(numKeys, preLoadKeys);
this.windowSize = windowSize;
this.windowDurationInMs = windowDurationInMs;
}
@Override
public void init() {
super.init();
startTime = System.currentTimeMillis();
}
@Override
public String getNextKey() {
//Algo:
// 1) Calculate my CurrentKeySet[min-max]
// 1.1) CurrentKeySet calculation: min=currentWindow*windowSize, max=min+windowSize
// 2) Get the Random number in my CurrentKeySet
int currentWindow = getCurrentWindowIndex();
int min = currentWindow * this.windowSize;
int max = min + this.windowSize;
int nextKey = randomnum(min, max);
logger.debug("Current Window: "+currentWindow+"" + "| CurrentKeySet: [" +min +" - " +max+"] | getNextKey(): "+nextKey);
return "T"+nextKey;
}
@Override
public boolean hasNextKey() {
return true;
}
private int randomnum(int minNum, int maxNum) {
return kRandom.nextInt(maxNum - minNum) + minNum;
}
/*
Gets the currentWindow, Window number starts from 0.
*/
private int getCurrentWindowIndex()
{
long currentTime = System.currentTimeMillis();
long currentWindow =((currentTime - startTime) / windowDurationInMs);
return (int) currentWindow%(numKeys/windowSize);
}
}
| 9,177 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/generators/ZipfianStringKeyGenerator.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.generators;
import org.apache.commons.math3.distribution.ZipfDistribution;
/**
* @author jolynch
*/
public class ZipfianStringKeyGenerator extends StringKeyGenerator {
private final ZipfDistribution zipf;
public ZipfianStringKeyGenerator(boolean preLoadKeys, int numKeys, double exponent) {
super(numKeys, preLoadKeys);
this.zipf = new ZipfDistribution(numKeys, exponent);
}
@Override
public String getNextKey() {
int keyIndex = zipf.sample();
if (isPreLoadKeys()) {
return keys.get(keyIndex);
} else {
return "T" + keyIndex;
}
}
}
| 9,178 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/monitoring/NdBenchDefaultMonitor.java | /**
* Copyright (c) 2018 Netflix, Inc. All rights reserved.
*/
package com.netflix.ndbench.core.monitoring;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.JmxReporter;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricRegistry;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.ndbench.api.plugin.NdBenchMonitor;
import com.netflix.ndbench.core.config.IConfiguration;
import static com.codahale.metrics.MetricRegistry.name;
/**
* @author vchella
*/
@Singleton
public class NdBenchDefaultMonitor implements NdBenchMonitor
{
private final IConfiguration config;
private final MetricRegistry metrics;
private final Histogram readHistogram;
private final Histogram writeHistogram;
private final Counter readSuccess;
private final Counter readFailure;
private final Counter writeSuccess;
private final Counter writeFailure;
private final Meter cacheHits;
private final Meter cacheMiss;
private final Meter readRPS;
private final Meter writeRPS;
@Inject
public NdBenchDefaultMonitor(IConfiguration config)
{
this.config = config;
this.metrics = new MetricRegistry();
readHistogram = metrics.histogram(name("NdBenchDefaultMonitor", "readlatency"));
writeHistogram = metrics.histogram(name("NdBenchDefaultMonitor", "writelatency"));
readSuccess = metrics.counter(name("NdBenchDefaultMonitor", "readSuccess"));
readFailure = metrics.counter(name("NdBenchDefaultMonitor", "readFailure"));
writeSuccess = metrics.counter(name("NdBenchDefaultMonitor", "writeSuccess"));
writeFailure = metrics.counter(name("NdBenchDefaultMonitor", "writeFailure"));
cacheHits = metrics.meter(name("NdBenchDefaultMonitor", "cacheHits"));
cacheMiss = metrics.meter(name("NdBenchDefaultMonitor", "cacheMiss"));
readRPS = metrics.meter(name("NdBenchDefaultMonitor", "readRPS"));
writeRPS = metrics.meter(name("NdBenchDefaultMonitor", "writeRPS"));
//Starting JMXReporter
final JmxReporter reporter = JmxReporter.forRegistry(metrics).inDomain("netflix.ndbench.metrics").build();
reporter.start();
}
@Override
public void initialize()
{
}
@Override
public void incReadSuccess() {
readSuccess.inc();
readRPS.mark();
}
@Override
public long getReadSuccess() {
return readSuccess.getCount();
}
@Override
public void incReadFailure() {
readFailure.inc();
readRPS.mark();
}
@Override
public long getReadFailure() {
return readFailure.getCount();
}
@Override
public void incWriteSuccess() {
writeSuccess.inc();
writeRPS.mark();
}
@Override
public long getWriteSuccess() {
return writeSuccess.getCount();
}
@Override
public void incWriteFailure() {
writeFailure.inc();
writeRPS.mark();
}
@Override
public long getWriteFailure() {
return writeFailure.getCount();
}
@Override
public void incCacheHit() {
cacheHits.mark();
}
@Override
public long getCacheHits() {
return cacheHits.getCount();
}
@Override
public void incCacheMiss() {
cacheMiss.mark();
}
@Override
public long getCacheMiss() {
return cacheMiss.getCount();
}
@Override
public void recordReadLatency(long duration) {
readHistogram.update(duration);
}
@Override
public long getReadLatAvg() {
return longValueOfDouble(readHistogram.getSnapshot().getMean());
}
@Override
public long getReadLatP50() {
return longValueOfDouble(readHistogram.getSnapshot().getMedian());
}
@Override
public long getReadLatP95() {
return longValueOfDouble(readHistogram.getSnapshot().get95thPercentile());
}
@Override
public long getReadLatP99() {
return longValueOfDouble(readHistogram.getSnapshot().get99thPercentile());
}
@Override
public long getReadLatP995() {
return longValueOfDouble(readHistogram.getSnapshot().getValue(.995));
}
@Override
public long getReadLatP999() {
return longValueOfDouble(readHistogram.getSnapshot().get999thPercentile());
}
@Override
public long getWriteLatAvg() {
return longValueOfDouble(writeHistogram.getSnapshot().getMean());
}
@Override
public long getWriteLatP50() {
return longValueOfDouble(writeHistogram.getSnapshot().getMedian());
}
@Override
public long getWriteLatP95() {
return longValueOfDouble(writeHistogram.getSnapshot().get95thPercentile());
}
@Override
public long getWriteLatP99() {
return longValueOfDouble(writeHistogram.getSnapshot().get99thPercentile());
}
@Override
public long getWriteLatP995() {
return longValueOfDouble(writeHistogram.getSnapshot().getValue(0.995));
}
@Override
public long getWriteLatP999() {
return longValueOfDouble(writeHistogram.getSnapshot().get999thPercentile());
}
@Override
public long getWriteRPS() {
return longValueOfDouble(writeRPS.getOneMinuteRate());
}
@Override
public long getReadRPS() {
return longValueOfDouble(readRPS.getOneMinuteRate());
}
@Override
public void setWriteRPS(long writeRPS) {
// setting RPS does not apply here since, we are tracking RPS via writeSuccess and writeFailure calls
}
@Override
public void setReadRPS(long readRPS) {
// setting RPS does not apply here since, we are tracking RPS via readSuccess and readFailure calls
}
@Override
public void recordWriteLatency(long duration) {
writeHistogram.update(duration);
}
@Override
public int getCacheHitRatioInt() {
return (int) getCacheHitRatio();
}
@Override
public void resetStats() {
}
private float getCacheHitRatio() {
long hits = cacheHits.getCount();
long miss = cacheMiss.getCount();
if (hits + miss == 0) {
return 0;
}
return (float) (hits * 100L) / (float) (hits + miss);
}
private long longValueOfDouble(double d) {
return Double.valueOf(d).longValue();
}
}
| 9,179 |
0 | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core | Create_ds/ndbench/ndbench-core/src/main/java/com/netflix/ndbench/core/monitoring/FakeMonitor.java | /*
* Copyright 2016 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.core.monitoring;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.SlidingTimeWindowReservoir;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.ndbench.api.plugin.NdBenchMonitor;
import com.netflix.ndbench.core.config.IConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
/**
* @author vchella
*/
@Singleton
public class FakeMonitor implements NdBenchMonitor {
private static final Logger logger = LoggerFactory.getLogger(FakeMonitor.class);
private final AtomicReference<ScheduledExecutorService> timerRef = new AtomicReference<>(null);
private final AtomicReference<Histogram> readHistogram = new AtomicReference<>();
private final AtomicReference<Histogram> writeHistogram = new AtomicReference<>();
private final AtomicLong readSuccess = new AtomicLong(0L);
private final AtomicLong readFailure = new AtomicLong(0L);
private final AtomicLong writeSuccess = new AtomicLong(0L);
private final AtomicLong writeFailure = new AtomicLong(0L);
private final AtomicLong cacheHits = new AtomicLong(0L);
private final AtomicLong cacheMiss = new AtomicLong(0L);
private final AtomicLong readRPS = new AtomicLong(0L);
private final AtomicLong writeRPS = new AtomicLong(0L);
private final IConfiguration config;
@Inject
public FakeMonitor(IConfiguration config)
{
this.config = config;
checkAndInitTimer();
}
@Override
public void initialize() {
}
private void checkAndInitTimer() {
/** CODE TO PERIODICALLY RESET Histograms */
ScheduledExecutorService timer = timerRef.get();
if (timer == null) {
timer = Executors.newScheduledThreadPool(1);
logger.info(String.format("Initializing NdBenchMonitor with timing counter reset frequency %d seconds",
config.getStatsResetFreqSeconds()));
timer.scheduleAtFixedRate(this::setReadWriteHistograms, 1, config.getStatsResetFreqSeconds(),
TimeUnit.SECONDS);
timerRef.set(timer);
}
}
private void setReadWriteHistograms() {
readHistogram.set(createHistogramFromConfig());
writeHistogram.set(createHistogramFromConfig());
}
@Override
public void incReadSuccess() {
readSuccess.incrementAndGet();
}
@Override
public long getReadSuccess() {
return readSuccess.get();
}
@Override
public void incReadFailure() {
readFailure.incrementAndGet();
}
@Override
public long getReadFailure() {
return readFailure.get();
}
@Override
public void incWriteSuccess() {
writeSuccess.incrementAndGet();
}
@Override
public long getWriteSuccess() {
return writeSuccess.get();
}
@Override
public void incWriteFailure() {
writeFailure.incrementAndGet();
}
@Override
public long getWriteFailure() {
return writeFailure.get();
}
@Override
public void incCacheHit() {
cacheHits.incrementAndGet();
}
@Override
public long getCacheHits() {
return cacheHits.get();
}
@Override
public void incCacheMiss() {
cacheMiss.incrementAndGet();
}
@Override
public long getCacheMiss() {
return cacheMiss.get();
}
@Override
public void recordReadLatency(long duration) {
readHistogram.get().update(duration);
}
@Override
public long getReadLatAvg() {
return longValueOfDouble(readHistogram.get().getSnapshot().getMean());
}
@Override
public long getReadLatP50() {
return longValueOfDouble(readHistogram.get().getSnapshot().getMedian());
}
@Override
public long getReadLatP95() {
return longValueOfDouble(readHistogram.get().getSnapshot().get95thPercentile());
}
@Override
public long getReadLatP99() {
return longValueOfDouble(readHistogram.get().getSnapshot().get99thPercentile());
}
@Override
public long getReadLatP995() {
return longValueOfDouble(readHistogram.get().getSnapshot().getValue(.995));
}
@Override
public long getReadLatP999() {
return longValueOfDouble(readHistogram.get().getSnapshot().get999thPercentile());
}
@Override
public long getWriteLatAvg() {
return longValueOfDouble(writeHistogram.get().getSnapshot().getMean());
}
@Override
public long getWriteLatP50() {
return longValueOfDouble(writeHistogram.get().getSnapshot().getMedian());
}
@Override
public long getWriteLatP95() {
return longValueOfDouble(writeHistogram.get().getSnapshot().get95thPercentile());
}
@Override
public long getWriteLatP99() {
return longValueOfDouble(writeHistogram.get().getSnapshot().get99thPercentile());
}
@Override
public long getWriteLatP995() {
return longValueOfDouble(writeHistogram.get().getSnapshot().getValue(0.995));
}
@Override
public long getWriteLatP999() {
return longValueOfDouble(writeHistogram.get().getSnapshot().get999thPercentile());
}
@Override
public long getWriteRPS() {
return writeRPS.get();
}
@Override
public long getReadRPS() {
return readRPS.get();
}
@Override
public void setWriteRPS(long writeRPS) {
this.writeRPS.set(writeRPS);
}
@Override
public void setReadRPS(long readRPS) {
this.readRPS.set(readRPS);
}
@Override
public void recordWriteLatency(long duration) {
writeHistogram.get().update(duration);
}
@Override
public int getCacheHitRatioInt() {
return (int) getCacheHitRatio();
}
@Override
public void resetStats() {
readSuccess.set(0L);
readFailure.set(0L);
writeSuccess.set(0L);
writeFailure.set(0L);
cacheHits.set(0L);
cacheMiss.set(0L);
readRPS.set(0L);
writeRPS.set(0L);
setReadWriteHistograms();
}
private float getCacheHitRatio() {
long hits = cacheHits.get();
long miss = cacheMiss.get();
if (hits + miss == 0) {
return 0;
}
return (float) (hits * 100L) / (float) (hits + miss);
}
private long longValueOfDouble(double d) {
return Double.valueOf(d).longValue();
}
private Histogram createHistogramFromConfig() {
return new Histogram(new SlidingTimeWindowReservoir(config.getStatsResetFreqSeconds(), TimeUnit.SECONDS));
}
}
| 9,180 |
0 | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb/operations/CockroachDBRetryableTransaction.java | package com.netflix.ndbench.plugin.cockroachdb.operations;
import java.sql.Connection;
import java.sql.SQLException;
public interface CockroachDBRetryableTransaction {
void run(Connection conn)
throws SQLException;
}
| 9,181 |
0 | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb/operations/CockroachDBSimplePlugin.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ndbench.plugin.cockroachdb.operations;
import java.sql.Connection;
import java.sql.ResultSet;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPlugin;
import com.netflix.ndbench.plugin.cockroachdb.configs.CockroachDBConfiguration;
/**
* @author Sumanth Pasupuleti
*/
@Singleton
@NdBenchClientPlugin("CockroachDBSimplePlugin")
public class CockroachDBSimplePlugin extends CockroachDBPluginBase
{
private static String readQuery = "SELECT key, column1, %s FROM %s where key = ";
private static String writeQuery = "UPSERT INTO %s (key, column1, %s) VALUES ";
@Inject
public CockroachDBSimplePlugin(CockroachDBConfiguration cockroachDBConfiguration) {
super(cockroachDBConfiguration);
}
@Override
public String readSingle(String key) throws Exception
{
Connection connection = null;
try
{
connection = ds.getConnection();
ResultSet rs = connection.createStatement().executeQuery(readQuery + "'" + key + "'");
int rsSize = 0;
while (rs.next())
{
rsSize++;
}
if (rsSize == 0)
{
connection.close();
return CacheMiss;
}
if (rsSize > 1)
{
connection.close();
throw new Exception("Expecting only 1 row with a given key: " + key);
}
connection.close();
return ResultOK;
}
catch (Exception ex)
{
if (connection != null)
{
connection.close();
}
throw ex;
}
}
@Override
public String writeSingle(String key) throws Exception
{
Connection connection = null;
try
{
String values = getNDelimitedStrings(config.getColsPerRow());
connection = ds.getConnection();
connection
.createStatement()
.executeUpdate(writeQuery + "('" + key + "', 1 ," + values + ")");
connection.close();
return ResultOK;
}
catch (Exception ex)
{
if (connection != null)
{
connection.close();
}
throw ex;
}
}
public void createTables() throws Exception
{
Connection connection = null;
try
{
String values = IntStream.range(0, config.getColsPerRow()).mapToObj(i -> "value" + i + " STRING").collect(Collectors.joining(", "));
connection = ds.getConnection();
connection
.createStatement()
.execute(String.format("CREATE TABLE IF NOT EXISTS %s.%s (key STRING PRIMARY KEY, column1 INT, %s)", config.getDBName(), config.getTableName(), values));
connection.close();
}
catch (Exception ex)
{
if (connection != null)
{
connection.close();
}
throw ex;
}
}
public void prepareStatements()
{
String values = IntStream.range(0, config.getColsPerRow()).mapToObj(i -> "value" + i).collect(Collectors.joining(", "));
readQuery = String.format(readQuery, values, config.getTableName());
writeQuery = String.format(writeQuery, config.getTableName(), values);
}
}
| 9,182 |
0 | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb/operations/CockroachDBSecondaryIndexPlugin.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ndbench.plugin.cockroachdb.operations;
import java.sql.Connection;
import java.sql.ResultSet;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPlugin;
import com.netflix.ndbench.plugin.cockroachdb.configs.CockroachDBConfiguration;
/**
* @author Sumanth Pasupuleti
*/
@Singleton
@NdBenchClientPlugin("CockroachDBSecondaryIndexPlugin")
public class CockroachDBSecondaryIndexPlugin extends CockroachDBPluginBase
{
private static String readFromMainQuery = "SELECT key, %s FROM %s where key = ";
private static String writeToMainQuery = "UPSERT INTO %s (key, %s) VALUES ";
@Inject
public CockroachDBSecondaryIndexPlugin(CockroachDBConfiguration cockroachDBConfiguration) {
super(cockroachDBConfiguration);
}
@Override
public String readSingle(String key) throws Exception
{
Connection connection = null;
try
{
connection = ds.getConnection();
ResultSet rs = connection.createStatement().executeQuery(readFromMainQuery + "'" + key + "'");
int rsSize = 0;
while (rs.next())
{
rsSize++;
}
if (rsSize == 0)
{
connection.close();
return CacheMiss;
}
if (rsSize > 1)
{
connection.close();
throw new Exception("Expecting only 1 row with a given key: " + key);
}
connection.close();
return ResultOK;
}
catch (Exception ex)
{
if (connection != null)
{
connection.close();
}
throw ex;
}
}
@Override
public String writeSingle(String key) throws Exception
{
Connection connection = null;
try
{
String columns = getNDelimitedStrings(config.getColsPerRow());
connection = ds.getConnection();
connection
.createStatement()
.executeUpdate(writeToMainQuery + "('" + key + "', " + columns + ")");
connection.close();
return ResultOK;
}
catch (Exception ex)
{
if (connection != null)
{
connection.close();
}
throw ex;
}
}
public void createTables() throws Exception
{
Connection connection = null;
try
{
connection = ds.getConnection();
String columns = IntStream.range(0, config.getColsPerRow()).mapToObj(i -> "column" + i + " STRING").collect(Collectors.joining(", "));
connection
.createStatement()
.execute(String.format("CREATE TABLE IF NOT EXISTS %s.%s (key STRING PRIMARY KEY, %s)", config.getDBName(), config.getTableName(), columns));
// create secondary indices
for (int i = 0; i < config.getColsPerRow(); i++)
{
connection
.createStatement()
.execute(String.format("CREATE INDEX IF NOT EXISTS %s_column%d_index on %s (column%d)", config.getTableName(), i, config.getTableName(), i));
}
connection.close();
}
catch (Exception ex)
{
if (connection != null)
{
connection.close();
}
throw ex;
}
}
public void prepareStatements()
{
String columns = IntStream.range(0, config.getColsPerRow()).mapToObj(i -> "column" + i).collect(Collectors.joining(", "));
readFromMainQuery = String.format(readFromMainQuery, columns, config.getTableName());
writeToMainQuery = String.format(writeToMainQuery, config.getTableName(), columns);
}
}
| 9,183 |
0 | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb/operations/CockroachDBPluginBase.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.cockroachdb.operations;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Properties;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.ndbench.api.plugin.DataGenerator;
import com.netflix.ndbench.api.plugin.NdBenchClient;
import com.netflix.ndbench.plugin.cockroachdb.configs.CockroachDBConfiguration;
/**
* @author Sumanth Pasupuleti
*/
public abstract class CockroachDBPluginBase implements NdBenchClient
{
protected static final String ResultOK = "Ok";
protected static final String ResultFailed = "Failed";
protected static final String CacheMiss = null;
protected static final String ResultAmbiguous = "Failed";
private static final Logger logger = LoggerFactory.getLogger(CockroachDBPluginBase.class);
protected DataGenerator dataGenerator;
protected final CockroachDBConfiguration config;
protected static HikariDataSource ds;
protected CockroachDBPluginBase(CockroachDBConfiguration cockroachDBConfiguration) {
this.config = cockroachDBConfiguration;
}
@Override
public void init(DataGenerator dataGenerator) throws Exception
{
this.dataGenerator = dataGenerator;
logger.info("Initializing the CockroachDB client");
Properties props = new Properties();
props.setProperty("dataSourceClassName", "org.postgresql.ds.PGSimpleDataSource");
props.setProperty("dataSource.serverName", config.getLoadBalancer());
props.setProperty("dataSource.user", config.getUser());
props.setProperty("dataSource.password", config.getPassword());
props.setProperty("dataSource.databaseName", config.getDBName());
props.setProperty("dataSource.portNumber", config.getPort());
props.setProperty("maximumPoolSize", config.getPoolSize());
props.setProperty("leakDetectionThreshold", "2000");
try
{
ds = new HikariDataSource(new HikariConfig(props));
}
catch (Exception e)
{
throw new RuntimeException("Exception during connection initialization", e);
}
logger.info("Connected to cockroach db, initilizing/ creating the table");
createTables();
logger.info("Created tables");
prepareStatements();
}
/**
* Shutdown the client
*/
@Override
public void shutdown()
{
ds.close();
}
/**
* Get connection info
*/
@Override
public String getConnectionInfo() throws Exception
{
Connection connection = ds.getConnection();
String info = String.format("Connected to database: %s using driver: %s as user :%s",
connection.getMetaData().getDatabaseProductName(),
connection.getMetaData().getDriverName(),
connection.getMetaData().getUserName());
connection.close();
return info;
}
@Override
public String runWorkFlow()
{
return null;
}
public abstract void createTables() throws Exception;
public abstract void prepareStatements();
/**
* Assumes delimiter to be comma since that covers all the usecase for now.
* Will parameterize if use cases differ on delimiter.
* @param n
* @return
*/
public String getNDelimitedStrings(int n)
{
return IntStream.range(0, config.getColsPerRow()).mapToObj(i -> "'" + dataGenerator.getRandomValue() + "'").collect(Collectors.joining(","));
}
}
| 9,184 |
0 | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb/operations/CockroachDBTransactionPlugin.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.ndbench.plugin.cockroachdb.operations;
import java.sql.*;
import java.util.ArrayList;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.lang.StringUtils;
import com.google.inject.Inject;
import com.google.inject.Singleton;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPlugin;
import com.netflix.ndbench.plugin.cockroachdb.configs.CockroachDBConfiguration;
/**
* @author Sumanth Pasupuleti
*/
@Singleton
@NdBenchClientPlugin("CockroachDBTransactionPlugin")
public class CockroachDBTransactionPlugin extends CockroachDBPluginBase
{
private static String readFromMainQuery = "SELECT key, %s FROM %s where key = ";
private static String writeToMainQuery = "UPSERT INTO %s (key, %s) VALUES ";
private static String writeToChildQuery = "UPSERT INTO child%d (key, column1, value) VALUES ";
@Inject
public CockroachDBTransactionPlugin(CockroachDBConfiguration cockroachDBConfiguration) {
super(cockroachDBConfiguration);
}
@Override
public String readSingle(String key) throws Exception
{
Connection connection = null;
try
{
connection = ds.getConnection();
ResultSet rs = connection.createStatement().executeQuery(readFromMainQuery + "'" + key + "'");
int rsSize = 0;
while (rs.next())
{
rsSize++;
}
if (rsSize == 0)
{
connection.close();
return CacheMiss;
}
if (rsSize > 1)
{
connection.close();
throw new Exception("Expecting only 1 row with a given key: " + key);
}
connection.close();
return ResultOK;
}
catch (Exception ex)
{
if (connection != null)
{
connection.close();
}
throw ex;
}
}
@Override
public String writeSingle(String key) throws Exception
{
Connection connection = null;
try
{
//execute transaction
String[] childKeys = new String[config.getColsPerRow()];
for (int i = 0; i < config.getColsPerRow(); i++)
{
childKeys[i] = "'" + dataGenerator.getRandomValue() + "'";
}
connection = ds.getConnection();
connection.setAutoCommit(false);
Connection closurePtr = connection;
CockroachDBRetryableTransaction transaction = conn -> {
Statement statement = closurePtr.createStatement();
// write to main table
statement.addBatch(writeToMainQuery + "('" + key + "', " + StringUtils.join(childKeys, ',') + ")");
// writes to child tables
for (int i = 0; i < config.getColsPerRow(); i++)
{
statement.addBatch(String.format(writeToChildQuery, i) + "(" + childKeys[i] + ", 1, '" + dataGenerator.getRandomValue() + "')");
}
statement.executeBatch();
};
Savepoint sp = connection.setSavepoint("cockroach_restart");
while (true)
{
boolean releaseAttempted = false;
try
{
transaction.run(connection);
releaseAttempted = true;
connection.releaseSavepoint(sp);
break;
}
catch (SQLException e)
{
String sqlState = e.getSQLState();
// Check if the error code indicates a SERIALIZATION_FAILURE.
if (sqlState.equals("40001"))
{
// Signal the database that we will attempt a retry.
connection.rollback(sp);
}
else if (releaseAttempted)
{
connection.close();
// ResultAmbiguous;
throw e;
}
else
{
connection.close();
// ResultFailed;
throw e;
}
}
}
connection.commit();
connection.setAutoCommit(true);
connection.close();
return ResultOK;
}
catch (Exception ex)
{
if (connection != null)
{
connection.close();
}
throw ex;
}
}
public void createTables() throws Exception
{
Connection connection = ds.getConnection();
String columns = IntStream.range(0, config.getColsPerRow()).mapToObj(i -> "column" + i + " STRING").collect(Collectors.joining(", "));
connection
.createStatement()
.execute(String.format("CREATE TABLE IF NOT EXISTS %s.%s (key STRING PRIMARY KEY, %s)", config.getDBName(), config.getTableName(), columns));
// create child tables
for (int i = 0; i < config.getColsPerRow(); i++)
{
connection
.createStatement()
.execute(String.format("CREATE TABLE IF NOT EXISTS %s.child%d (key STRING PRIMARY KEY, column1 INT, value STRING)", config.getDBName(), i));
}
connection.close();
}
public void prepareStatements()
{
String columns = IntStream.range(0, config.getColsPerRow()).mapToObj(i -> "column" + i).collect(Collectors.joining(", "));
readFromMainQuery = String.format(readFromMainQuery, columns, config.getTableName());
writeToMainQuery = String.format(writeToMainQuery, config.getTableName(), columns);
}
}
| 9,185 |
0 | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb/configs/CockroachDBConfiguration.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.cockroachdb.configs;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
import com.netflix.archaius.api.annotations.PropertyName;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
/**
* Configurations for CockroachDB benchmarks
*
* @author Sumanth Pasupuleti
*/
@Configuration(prefix = NdBenchConstants.PROP_NAMESPACE + "cockroachdb")
public interface CockroachDBConfiguration {
@DefaultValue("perftest")
String getDBName();
@DefaultValue("test")
String getTableName();
@DefaultValue("test-loadbalancer")
String getLoadBalancer();
@DefaultValue("maxroach")
String getUser();
@DefaultValue("26257")
String getPort();
@DefaultValue("")
String getPassword();
@DefaultValue("5")
Integer getColsPerRow();
@DefaultValue("100")
String getPoolSize();
} | 9,186 |
0 | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb | Create_ds/ndbench/ndbench-cockroachdb-plugins/src/main/java/com/netflix/ndbench/plugin/cockroachdb/configs/CockroachDBModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.cockroachdb.configs;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPluginGuiceModule;
/**
* @author Sumanth Pasupuleti
*/
@NdBenchClientPluginGuiceModule
public class CockroachDBModule extends AbstractModule {
@Override
protected void configure() {
}
@Provides
CockroachDBConfiguration getCockroachDBConfiguration(ConfigProxyFactory factory) {
return factory.newProxy(CockroachDBConfiguration.class);
}
} | 9,187 |
0 | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin/janusgraph/IJanusGraphBuilder.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.janusgraph;
import org.janusgraph.core.JanusGraphFactory;
/**
* @author pencal
*/
public interface IJanusGraphBuilder {
JanusGraphFactory.Builder getGraphBuilder();
}
| 9,188 |
0 | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin/janusgraph/JanusGraphBasePlugin.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.janusgraph;
import org.apache.tinkerpop.gremlin.structure.Vertex;
import org.janusgraph.core.JanusGraph;
import org.janusgraph.core.PropertyKey;
import org.janusgraph.core.schema.JanusGraphManagement;
/**
* Common logic for all JanusGraph plugins, regardless of which storage backend being used.
*
* @author pencal
*/
public abstract class JanusGraphBasePlugin {
static final String COMPOSITE_INDEX_NAME = "idx_customId";
static final String VERTEX_LABEL_LEVEL_1 = "level1";
static final String OK = "ok";
static final String CACHE_MISS = null;
static final String PROP_CUSTOM_ID_KEY = "prop_customId";
static final String PROP_METADATA_KEY = "metadata";
final String storageBackend;
final String storageHost;
final String storagePort;
protected JanusGraphBasePlugin(String backend, String host, String port) {
this.storageBackend = backend;
this.storageHost = host;
this.storagePort = port;
}
protected void createSchema(JanusGraph graph) {
JanusGraphManagement mgmt = graph.openManagement();
if (!mgmt.containsGraphIndex(COMPOSITE_INDEX_NAME)) {
final PropertyKey customId = mgmt.makePropertyKey(PROP_CUSTOM_ID_KEY).dataType(String.class).make();
JanusGraphManagement.IndexBuilder customIdIndexBuilder = mgmt.buildIndex(COMPOSITE_INDEX_NAME, Vertex.class).addKey(customId);
customIdIndexBuilder.buildCompositeIndex();
mgmt.makeVertexLabel(VERTEX_LABEL_LEVEL_1).make();
mgmt.commit();
}
}
protected String getConnectionInfo(JanusGraph graph) {
String status = graph.isOpen() ? "opened" : "closed";
return String.format("Backend: %s, Host: %s, Port: %s, Graph Status: %s",
storageBackend,
storageHost,
storagePort,
status);
}
}
| 9,189 |
0 | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin/janusgraph/NdBenchJanusGraphModule.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.janusgraph;
import com.google.inject.AbstractModule;
import com.google.inject.Provides;
import com.netflix.archaius.ConfigProxyFactory;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPluginGuiceModule;
import com.netflix.ndbench.plugin.janusgraph.configs.IJanusGraphConfig;
import com.netflix.ndbench.plugin.janusgraph.configs.cql.ICQLConfig;
/**
* @author pencal
*/
@NdBenchClientPluginGuiceModule
public class NdBenchJanusGraphModule extends AbstractModule {
@Override
protected void configure() {
}
@Provides
IJanusGraphConfig getJanusGraphConfig(ConfigProxyFactory factory) {
return factory.newProxy(IJanusGraphConfig.class);
}
@Provides
ICQLConfig getCQLConfig(ConfigProxyFactory factory) {
return factory.newProxy(ICQLConfig.class);
}
}
| 9,190 |
0 | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin/janusgraph/JanusGraphPluginCQL.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.janusgraph;
import com.netflix.ndbench.api.plugin.DataGenerator;
import com.netflix.ndbench.api.plugin.NdBenchClient;
import com.netflix.ndbench.api.plugin.annotations.NdBenchClientPlugin;
import com.netflix.ndbench.plugin.janusgraph.configs.IJanusGraphConfig;
import com.netflix.ndbench.plugin.janusgraph.cql.JanusGraphBuilderCQLProvider;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.structure.T;
import org.apache.tinkerpop.gremlin.structure.Vertex;
import org.janusgraph.core.JanusGraph;
import org.janusgraph.core.JanusGraphFactory;
import org.janusgraph.core.JanusGraphTransaction;
import org.janusgraph.core.JanusGraphVertex;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.inject.Inject;
import javax.inject.Singleton;
import java.net.Inet4Address;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/***
* JanusGraph benchmarking plugin to measure throughput of write and read by
* single ID using JanusGraph Core API
*
* @author pencal
*/
@Singleton
@NdBenchClientPlugin("janusgraph-cql")
public class JanusGraphPluginCQL extends JanusGraphBasePlugin implements NdBenchClient {
private static final Logger logger = LoggerFactory.getLogger(JanusGraphPluginCQL.class);
private static String BACKEND = "cql";
private final JanusGraphFactory.Builder graphBuilder;
private DataGenerator dataGenerator;
private GraphTraversalSource traversalSource;
private JanusGraph graph;
private boolean useJanusgraphTransaction;
@Inject
public JanusGraphPluginCQL(IJanusGraphConfig config, JanusGraphBuilderCQLProvider builderProvider) {
super(BACKEND, Optional.ofNullable(config.getStorageHostname()).orElse(Inet4Address.getLoopbackAddress().getHostAddress()),
config.getStoragePort());
this.graphBuilder = builderProvider.getGraphBuilder();
this.useJanusgraphTransaction = config.useJanusgraphTransaction();
}
@Override
public void init(DataGenerator dataGenerator) throws Exception {
this.graph = graphBuilder.open();
this.traversalSource = graph.traversal();
this.dataGenerator = dataGenerator;
createSchema(graph);
logger.info("Initing JanusGraph Plugin CQL");
}
@Override
public String readSingle(String key) throws Exception {
JanusGraphTransaction tx = useJanusgraphTransaction ? graph.newTransaction() : null;
try {
return readSingleInternal(key, tx);
} finally {
if (tx != null)
tx.close();
}
}
private String readSingleInternal(String key, JanusGraphTransaction transaction) throws Exception {
String response = OK;
if (useJanusgraphTransaction) {
if (transaction == null) {
throw new IllegalArgumentException("JanusGraph transaction in read operation is null");
}
JanusGraphVertex vertex = (JanusGraphVertex) transaction.query().has(PROP_CUSTOM_ID_KEY, key).vertices();
if (vertex == null) {
throw new Exception("Internal error when reading data with key" + key + " using JanusGraph Core API");
}
if (vertex.keys().isEmpty())
response = CACHE_MISS;
} else {
List<Vertex> results = traversalSource.V().has(PROP_CUSTOM_ID_KEY, key).toList();
if (results == null)
throw new Exception("Internal error when reading data with key" + key + " using TinkerPop API");
else if (results.size() == 0)
response = CACHE_MISS;
}
return response;
}
@Override
public String writeSingle(String key) throws Exception {
if (useJanusgraphTransaction) {
graph.addVertex(T.label, VERTEX_LABEL_LEVEL_1, PROP_CUSTOM_ID_KEY, key, PROP_METADATA_KEY,
dataGenerator.getRandomValue()); //Automatically opens a new transaction
graph.tx().commit();
} else {
traversalSource.getGraph().addVertex(T.label, VERTEX_LABEL_LEVEL_1, PROP_CUSTOM_ID_KEY, key,
PROP_METADATA_KEY, dataGenerator.getRandomValue());
traversalSource.getGraph().tx().commit();
}
return OK;
}
/**
* Perform a bulk read operation
* @return a list of response codes
* @throws Exception
*/
public List<String> readBulk(final List<String> keys) throws Exception {
List<String> responses = new ArrayList<>(keys.size());
JanusGraphTransaction transaction = useJanusgraphTransaction ? graph.newTransaction() : null;
try {
for (String key : keys) {
String response = readSingleInternal(key, transaction);
responses.add(response);
}
} finally {
if (transaction != null)
transaction.close();
}
return responses;
}
/**
* Perform a bulk write operation
* @return a list of response codes
* @throws Exception
*/
public List<String> writeBulk(final List<String> keys) throws Exception {
List<String> responses = new ArrayList<>(keys.size());
for (String key : keys) {
String response = writeSingle(key);
responses.add(response);
}
return responses;
}
@Override
public void shutdown() throws Exception {
graph.close();
logger.info("JanusGraph DB shutdown");
}
@Override
public String getConnectionInfo() throws Exception {
return super.getConnectionInfo(graph);
}
@Override
public String runWorkFlow() throws Exception {
return null;
}
}
| 9,191 |
0 | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin/janusgraph | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin/janusgraph/cql/JanusGraphBuilderCQLProvider.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.janusgraph.cql;
import com.netflix.ndbench.plugin.janusgraph.IJanusGraphBuilder;
import com.netflix.ndbench.plugin.janusgraph.configs.IJanusGraphConfig;
import com.netflix.ndbench.plugin.janusgraph.configs.cql.ICQLConfig;
import org.janusgraph.core.JanusGraphFactory;
import javax.inject.Inject;
import javax.inject.Singleton;
import java.net.Inet4Address;
import java.util.Optional;
/***
* Provides a JanusGraph builder backed by CQL
*
* @author pencal
*/
@Singleton
public class JanusGraphBuilderCQLProvider implements IJanusGraphBuilder {
protected JanusGraphFactory.Builder graphBuilder;
public JanusGraphBuilderCQLProvider() {
}
@Inject
public JanusGraphBuilderCQLProvider(IJanusGraphConfig storageConfig, ICQLConfig config) {
graphBuilder = JanusGraphFactory.build().set("storage.cql.keyspace", config.getKeyspace())
.set("storage.backend", "cql").set("storage.cql.cluster-name", config.getClusterName())
.set("storage.hostname", Optional.ofNullable(storageConfig.getStorageHostname()).orElse(Inet4Address.getLoopbackAddress().getHostAddress()))
.set("storage.port", storageConfig.getStoragePort()).set("storage.lock.wait-time", 300)
.set("cache.db-cache", false).set("query.batch", false).set("query.smart-limit", false)
.set("query.force-index", false).set("query.fast-property", false);
}
public JanusGraphFactory.Builder getGraphBuilder() {
return graphBuilder;
}
}
| 9,192 |
0 | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin/janusgraph | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin/janusgraph/configs/IJanusGraphConfig.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.janusgraph.configs;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
/**
* Common configs for JanusGraph benchmark
*
* @author pencal
*/
@Configuration(prefix = NdBenchConstants.PROP_NAMESPACE + "janusgraph")
public interface IJanusGraphConfig {
// One can benchmark either the Tinkerpop API or the JanusGraph Core API if
// needed
@DefaultValue("false")
boolean useJanusgraphTransaction();
String getStorageHostname();
@DefaultValue("9042")
String getStoragePort();
}
| 9,193 |
0 | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin/janusgraph/configs | Create_ds/ndbench/ndbench-janusgraph-plugins/src/main/java/com/netflix/ndbench/plugin/janusgraph/configs/cql/ICQLConfig.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.netflix.ndbench.plugin.janusgraph.configs.cql;
import com.netflix.archaius.api.annotations.Configuration;
import com.netflix.archaius.api.annotations.DefaultValue;
import com.netflix.ndbench.api.plugin.common.NdBenchConstants;
/**
* Specific configs for JanusGraph's CQL backend
*
* @author pencal
*/
@Configuration(prefix = NdBenchConstants.PROP_NAMESPACE + ".janusgraph.storage.cql")
public interface ICQLConfig {
@DefaultValue("ndbench_cql")
String getKeyspace();
@DefaultValue("na")
String getClusterName();
}
| 9,194 |
0 | Create_ds/archaius/archaius2-persisted2/src/test/java/com/netflix/archaius | Create_ds/archaius/archaius2-persisted2/src/test/java/com/netflix/archaius/persisted2/FirstScopePropertyValueResolverTest.java | package com.netflix.archaius.persisted2;
import java.util.Arrays;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Set;
import org.apache.commons.lang3.StringUtils;
import org.junit.Assert;
import org.junit.Test;
public class FirstScopePropertyValueResolverTest {
private final ScopePriorityPropertyValueResolver resolver = new ScopePriorityPropertyValueResolver();
@Test
public void testSingle() {
List<ScopedValue> variations = Arrays.asList(
create("1", "s1", "a")
);
Assert.assertEquals("1", resolver.resolve("propName", variations));
}
@Test
public void testIdentical() {
List<ScopedValue> variations = Arrays.asList(
create("1", "s1", "a"),
create("2", "s1", "a")
);
Assert.assertEquals("1", resolver.resolve("propName", variations));
}
@Test
public void testFirst() {
List<ScopedValue> variations = Arrays.asList(
create("1", "s1", "a"),
create("2", "s1", "")
);
Assert.assertEquals("1", resolver.resolve("propName", variations));
}
@Test
public void testSecond() {
List<ScopedValue> variations = Arrays.asList(
create("1", "s1", ""),
create("2", "s1", "a")
);
Assert.assertEquals("2", resolver.resolve("propName", variations));
}
@Test
public void test2Scopes() {
List<ScopedValue> variations = Arrays.asList(
create("1", "s1", "", "s2", "", "s3", "b"),
create("2", "s1", "", "s2", "a,b", "s3", "")
);
Assert.assertEquals("2", resolver.resolve("propName", variations));
}
ScopedValue create(String value, String... keyValuePairs) {
LinkedHashMap<String, Set<String>> map = new LinkedHashMap<String, Set<String>>();
for (int i = 0; i < keyValuePairs.length; i += 2) {
Set<String> set = new HashSet<String>();
set.addAll(Arrays.asList(StringUtils.split(keyValuePairs[i+1], ",")));
map.put(keyValuePairs[i], set);
}
return new ScopedValue(value, map);
}
}
| 9,195 |
0 | Create_ds/archaius/archaius2-persisted2/src/test/java/com/netflix/archaius | Create_ds/archaius/archaius2-persisted2/src/test/java/com/netflix/archaius/persisted2/JsonPersistedV2ReaderTest.java | package com.netflix.archaius.persisted2;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.netflix.archaius.config.polling.PollingResponse;
import org.junit.Assert;
import org.junit.Test;
import java.io.ByteArrayInputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public class JsonPersistedV2ReaderTest {
@Test
public void idFieldReturnedWhenPresent() throws Exception {
List<TestProperty> propertyList = new ArrayList<>();
propertyList.add(new TestProperty("key1", "value3", "id3", "app1", ""));
// The next two properties are the only two that are actually resolved, as the other two are overridden due to
// the presence of the region field.
propertyList.add(new TestProperty("key1", "value1", "id1", "app1", "region1"));
propertyList.add(new TestProperty("key2", "value2", "id2", "app1", "region1"));
propertyList.add(new TestProperty("key2", "value4", "id4", "app1", ""));
TestPropertyList properties = new TestPropertyList(propertyList);
JsonPersistedV2Reader reader =
JsonPersistedV2Reader.builder(
() -> new ByteArrayInputStream(
new ObjectMapper().writeValueAsBytes(properties)))
.withPath("propertiesList")
.withReadIdField(true)
.build();
PollingResponse response = reader.call();
Map<String, String> props = response.getToAdd();
Assert.assertEquals(2, props.size());
Assert.assertEquals("value1", props.get("key1"));
Assert.assertEquals("value2", props.get("key2"));
Map<String, String> propIds = response.getNameToIdsMap();
Assert.assertEquals(2, propIds.size());
Assert.assertEquals("id1", propIds.get("key1"));
Assert.assertEquals("id2", propIds.get("key2"));
}
@Test
public void idFieldAbsent() throws Exception {
List<TestProperty> propertyList = new ArrayList<>();
propertyList.add(new TestProperty("key1", "value3", "id3", "app1", ""));
// The next two properties are the only two that are actually resolved, as the other two are overridden due to
// the presence of the region field.
propertyList.add(new TestProperty("key1", "value1", "id1", "app1", "region1"));
propertyList.add(new TestProperty("key2", "value2", "id2", "app1", "region1"));
propertyList.add(new TestProperty("key2", "value4", "id4", "app1", ""));
TestPropertyList properties = new TestPropertyList(propertyList);
JsonPersistedV2Reader reader =
JsonPersistedV2Reader.builder(
() -> new ByteArrayInputStream(
new ObjectMapper().writeValueAsBytes(properties)))
.withPath("propertiesList")
.build();
PollingResponse response = reader.call();
Map<String, String> props = response.getToAdd();
Assert.assertEquals(2, props.size());
Assert.assertEquals("value1", props.get("key1"));
Assert.assertEquals("value2", props.get("key2"));
Assert.assertTrue(response.getNameToIdsMap().isEmpty());
}
public static class TestPropertyList {
public List<TestProperty> propertiesList;
public TestPropertyList(List<TestProperty> propertiesList) {
this.propertiesList = propertiesList;
}
}
public static class TestProperty {
public String key;
public String value;
public String propertyId;
public String appId;
public String region;
public TestProperty(String key, String value, String propertyId, String appId, String region) {
this.key = key;
this.value = value;
this.propertyId = propertyId;
this.appId = appId;
this.region = region;
}
}
}
| 9,196 |
0 | Create_ds/archaius/archaius2-persisted2/src/test/java/com/netflix/archaius | Create_ds/archaius/archaius2-persisted2/src/test/java/com/netflix/archaius/persisted2/PlatformServiceTest.java | package com.netflix.archaius.persisted2;
import org.junit.Ignore;
import org.junit.Test;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
import com.google.inject.Injector;
import com.google.inject.Scopes;
import com.netflix.archaius.api.Config;
import com.netflix.archaius.api.exceptions.ConfigException;
import com.netflix.archaius.api.inject.RemoteLayer;
import com.netflix.archaius.guice.ArchaiusModule;
import com.netflix.archaius.visitor.PrintStreamVisitor;
public class PlatformServiceTest {
// TODO: Provide an embedded version of this service. For now these tests are run
// manually against internal Netflix systems
@Test
@Ignore
public void test() throws ConfigException {
final Persisted2ClientConfig config = new DefaultPersisted2ClientConfig()
.withServiceUrl("http://platformservice.us-east-1.dyntest.netflix.net:7001/platformservice/REST/v2/properties/jsonFilterprops")
.withQueryScope("env", "test")
.withQueryScope("region", "us-east-1")
.withQueryScope("appId", "NCCP")
.withScope("env", "test")
.withScope("region", "us-east-1")
.withScope("appId", "NCCP")
.withPrioritizedScopes("env", "region", "asg", "stack", "serverId")
// .withSkipPropsWithExtraScopes(true)
;
Injector injector = Guice.createInjector(
new ArchaiusModule(),
new AbstractModule() {
@Override
protected void configure() {
bind(Persisted2ClientConfig.class).toInstance(config);
bind(Config.class).annotatedWith(RemoteLayer.class).toProvider(Persisted2ConfigProvider.class).in(Scopes.SINGLETON);
}
});
Config c = injector.getInstance(Config.class);
c.accept(new PrintStreamVisitor());
}
}
| 9,197 |
0 | Create_ds/archaius/archaius2-persisted2/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-persisted2/src/main/java/com/netflix/archaius/persisted2/ScopePredicates.java | package com.netflix.archaius.persisted2;
import java.util.HashMap;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import com.netflix.archaius.api.Config;
/**
* Utility class for creating common ScopePredicates
*
* @author elandau
*
*/
public abstract class ScopePredicates {
public static ScopePredicate alwaysTrue() {
return new ScopePredicate() {
@Override
public boolean evaluate(Map<String, Set<String>> attrs) {
return true;
}
};
}
public static ScopePredicate fromConfig(final Config config) {
final HashMap<String, String> lookup = new HashMap<String, String>();
return new AbstractScopePredicate() {
@Override
public String getScope(String key) {
String value = lookup.get(key);
if (value == null) {
value = config.getString(key, "");
lookup.put(key, value.toLowerCase());
}
return value;
}
};
}
public static ScopePredicate fromMap(final Map<String, String> values) {
final Map<String, String> lowerCaseValues = new HashMap<String, String>();
for (Entry<String, String> entry : values.entrySet()) {
lowerCaseValues.put(entry.getKey(), entry.getValue().toLowerCase());
}
return new AbstractScopePredicate() {
@Override
public String getScope(String key) {
String value = lowerCaseValues.get(key);
return value == null ? "" : value;
}
};
}
}
| 9,198 |
0 | Create_ds/archaius/archaius2-persisted2/src/main/java/com/netflix/archaius | Create_ds/archaius/archaius2-persisted2/src/main/java/com/netflix/archaius/persisted2/Persisted2ClientConfig.java | package com.netflix.archaius.persisted2;
import java.util.List;
import java.util.Map;
import java.util.Set;
import com.netflix.archaius.api.annotations.Configuration;
@Configuration(prefix="archaius.persisted")
public interface Persisted2ClientConfig {
/**
* @return True if the client is enabled. This is checked only once at startup
*/
boolean isEnabled();
/**
* @return Polling rate for getting updates
*/
int getRefreshRate();
/**
* @return Priority ordered list of scopes to be evaluated on the client
*/
List<String> getPrioritizedScopes();
/**
* @return List of scopes to which this instance belongs
*/
Map<String, String> getScopes();
/**
* @return List of query scopes to 'and' and possible values to 'or'
*/
Map<String, Set<String>> getQueryScopes();
/**
* When set to true the server will match only properties for which the list of
* scopes matches exactly the query scope. Otherwise the server will match
* properties for which the query scopes is an subset.
* @return
*/
boolean getSkipPropsWithExtraScopes();
/**
* URL of persisted2 format service
* @return
*/
String getServiceUrl();
}
| 9,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.