index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel/resultset/ResultSetGetTypeInfo.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common.gremlindatamodel.resultset;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.resultset.GremlinResultSetMetadata;
import software.aws.neptune.jdbc.ResultSet;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Types;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static java.sql.DatabaseMetaData.typeNullable;
import static java.sql.DatabaseMetaData.typeSearchable;
import static software.aws.neptune.jdbc.ResultSetMetaData.getTypePrecision;
public abstract class ResultSetGetTypeInfo extends ResultSet {
/**
* TYPE_NAME String => Type name
* DATA_TYPE int => SQL data type from java.sql.Types
* PRECISION int => maximum precision
* LITERAL_PREFIX String => prefix used to quote a literal (may be null)
* LITERAL_SUFFIX String => suffix used to quote a literal (may be null)
* CREATE_PARAMS String => parameters used in creating the type (may be null)
* NULLABLE short => can you use NULL for this type
* CASE_SENSITIVE boolean => is it case sensitive
* SEARCHABLE short => can you use "WHERE" based on this type
* UNSIGNED_ATTRIBUTE boolean => is it unsigned
* FIXED_PREC_SCALE boolean => can it be a money value
* AUTO_INCREMENT boolean => can it be used for an auto-increment value
* LOCAL_TYPE_NAME String => localized version of type name (may be null)
* MINIMUM_SCALE short => minimum scale supported
* MAXIMUM_SCALE short => maximum scale supported
* SQL_DATA_TYPE int => unused
* SQL_DATETIME_SUB int => unused
* NUM_PREC_RADIX int => usually 2 or 10
*/
private static final String TYPE_NAME = "TYPE_NAME";
private static final String DATA_TYPE = "DATA_TYPE";
private static final String PRECISION = "PRECISION";
private static final String LITERAL_PREFIX = "LITERAL_PREFIX";
private static final String LITERAL_SUFFIX = "LITERAL_SUFFIX";
private static final String CREATE_PARAMS = "CREATE_PARAMS";
private static final String NULLABLE = "NULLABLE";
private static final String CASE_SENSITIVE = "CASE_SENSITIVE";
private static final String SEARCHABLE = "SEARCHABLE";
private static final String UNSIGNED_ATTRIBUTE = "UNSIGNED_ATTRIBUTE";
private static final String FIXED_PREC_SCALE = "FIXED_PREC_SCALE";
private static final String AUTO_INCREMENT = "AUTO_INCREMENT";
private static final String LOCAL_TYPE_NAME = "LOCAL_TYPE_NAME";
private static final String MINIMUM_SCALE = "MINIMUM_SCALE";
private static final String MAXIMUM_SCALE = "MAXIMUM_SCALE";
private static final String SQL_DATA_TYPE = "SQL_DATA_TYPE";
private static final String SQL_DATETIME_SUB = "SQL_DATETIME_SUB";
private static final String NUM_PREC_RADIX = "NUM_PREC_RADIX";
private static final Logger LOGGER = LoggerFactory.getLogger(ResultSetGetTypeInfo.class);
private static final List<String> ORDERED_COLUMNS = new ArrayList<>();
private static final Map<String, Class<?>> COLUMN_TYPE_MAP = new HashMap<>();
private boolean wasNull = false;
private final List<Map<String, Object>> typeInformation;
static {
ORDERED_COLUMNS.add(TYPE_NAME);
ORDERED_COLUMNS.add(DATA_TYPE);
ORDERED_COLUMNS.add(PRECISION);
ORDERED_COLUMNS.add(LITERAL_PREFIX);
ORDERED_COLUMNS.add(LITERAL_SUFFIX);
ORDERED_COLUMNS.add(CREATE_PARAMS);
ORDERED_COLUMNS.add(NULLABLE);
ORDERED_COLUMNS.add(CASE_SENSITIVE);
ORDERED_COLUMNS.add(SEARCHABLE);
ORDERED_COLUMNS.add(UNSIGNED_ATTRIBUTE);
ORDERED_COLUMNS.add(FIXED_PREC_SCALE);
ORDERED_COLUMNS.add(AUTO_INCREMENT);
ORDERED_COLUMNS.add(LOCAL_TYPE_NAME);
ORDERED_COLUMNS.add(MINIMUM_SCALE);
ORDERED_COLUMNS.add(MAXIMUM_SCALE);
ORDERED_COLUMNS.add(SQL_DATA_TYPE);
ORDERED_COLUMNS.add(SQL_DATETIME_SUB);
ORDERED_COLUMNS.add(NUM_PREC_RADIX);
COLUMN_TYPE_MAP.put(TYPE_NAME, String.class);
COLUMN_TYPE_MAP.put(DATA_TYPE, Types.class);
COLUMN_TYPE_MAP.put(PRECISION, Integer.class);
COLUMN_TYPE_MAP.put(LITERAL_PREFIX, String.class);
COLUMN_TYPE_MAP.put(LITERAL_SUFFIX, String.class);
COLUMN_TYPE_MAP.put(CREATE_PARAMS, String.class);
COLUMN_TYPE_MAP.put(NULLABLE, Short.class);
COLUMN_TYPE_MAP.put(CASE_SENSITIVE, Boolean.class);
COLUMN_TYPE_MAP.put(SEARCHABLE, Short.class);
COLUMN_TYPE_MAP.put(UNSIGNED_ATTRIBUTE, Boolean.class);
COLUMN_TYPE_MAP.put(FIXED_PREC_SCALE, Boolean.class);
COLUMN_TYPE_MAP.put(AUTO_INCREMENT, Boolean.class);
COLUMN_TYPE_MAP.put(LOCAL_TYPE_NAME, String.class);
COLUMN_TYPE_MAP.put(MINIMUM_SCALE, Short.class);
COLUMN_TYPE_MAP.put(MAXIMUM_SCALE, Short.class);
COLUMN_TYPE_MAP.put(SQL_DATA_TYPE, Integer.class);
COLUMN_TYPE_MAP.put(SQL_DATETIME_SUB, Integer.class);
COLUMN_TYPE_MAP.put(NUM_PREC_RADIX, Integer.class);
}
protected static void populateConstants(final List<Map<String, Object>> typeInfo) {
for (Map<String, Object> info : typeInfo) {
info.put("CREATE_PARAMS", null);
info.put("NULLABLE", typeNullable);
info.put("SEARCHABLE", typeSearchable);
info.put("FIXED_PREC_SCALE", false);
info.put("AUTO_INCREMENT", false);
info.put("LOCAL_TYPE_NAME", null);
info.put("SQL_DATA_TYPE", null);
info.put("SQL_DATETIME_SUB", null);
}
}
protected static void putInfo(final List<Map<String, Object>> typeInfo, final String typeName, final int dataType,
final boolean isText, final boolean isNumeric, final boolean isUnsignedAttribute) {
final Map<String, Object> info = new HashMap<>();
info.put(TYPE_NAME, typeName);
info.put("DATA_TYPE", dataType);
info.put("PRECISION", getTypePrecision(dataType));
info.put("UNSIGNED_ATTRIBUTE", isUnsignedAttribute);
info.put("CASE_SENSITIVE", isText);
info.put("LITERAL_PREFIX", isText ? "'" : null);
info.put("LITERAL_SUFFIX", isText ? "'" : null);
info.put("MINIMUM_SCALE", isNumeric ? 0 : null);
info.put("MAXIMUM_SCALE", isNumeric ? 0 : null);
info.put("NUM_PREC_RADIX", isNumeric ? 10 : null);
typeInfo.add(info);
}
protected static void putInfo(final List<Map<String, Object>> typeInfo, final String typeName, final int dataType,
final boolean isText, final boolean isNumeric) {
putInfo(typeInfo, typeName, dataType, isText, isNumeric, false);
}
/**
* ResultSetGetTypeInfo constructor, initializes super class.
*
* @param statement Statement Object.
* @param rows List of type information.
*/
public ResultSetGetTypeInfo(final Statement statement, final List<Map<String, Object>> rows) {
super(statement, ORDERED_COLUMNS, rows.size());
this.typeInformation = rows;
}
@Override
public Object getConvertedValue(final int columnIndex) throws SQLException {
verifyOpen();
final int index = getRowIndex();
if ((index < 0) || (index >= this.typeInformation.size())) {
throw SqlError
.createSQLException(LOGGER, SqlState.DATA_EXCEPTION, SqlError.INVALID_INDEX, getRowIndex() + 1,
this.typeInformation.size());
}
if ((columnIndex <= 0) || (columnIndex > ORDERED_COLUMNS.size())) {
throw SqlError
.createSQLException(LOGGER, SqlState.DATA_EXCEPTION, SqlError.INVALID_COLUMN_INDEX, columnIndex,
ORDERED_COLUMNS.size());
}
final String key = ORDERED_COLUMNS.get(columnIndex - 1);
if (this.typeInformation.get(index).containsKey(key)) {
final Object data = this.typeInformation.get(index).get(key);
this.wasNull = (data == null);
return data;
} else {
final String errorMessage = "Could not get TypeInfo column: " + key;
LOGGER.error(errorMessage);
throw new SQLException(errorMessage);
}
}
@Override
protected ResultSetMetaData getResultMetadata() {
final List<Class<?>> rowTypes = new ArrayList<>();
for (final String column : ORDERED_COLUMNS) {
rowTypes.add(COLUMN_TYPE_MAP.get(column));
}
return new GremlinResultSetMetadata(ORDERED_COLUMNS, rowTypes);
}
@Override
public boolean wasNull() throws SQLException {
return this.wasNull;
}
@Override
protected void doClose() throws SQLException {
}
}
| 7,500 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/OpenCypherConnectionProperties.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher;
import lombok.NonNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.jdbc.Connection;
import software.aws.neptune.jdbc.utilities.AuthScheme;
import software.aws.neptune.jdbc.utilities.ConnectionProperties;
import software.aws.neptune.jdbc.utilities.SqlError;
import java.net.URI;
import java.net.URISyntaxException;
import java.sql.SQLClientInfoException;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
/**
* OpenCypher connection properties class.
*/
public class OpenCypherConnectionProperties extends ConnectionProperties {
public static final String ENDPOINT_KEY = "endpoint";
public static final String USE_ENCRYPTION_KEY = "useEncryption";
public static final String CONNECTION_POOL_SIZE_KEY = "connectionPoolSize";
// TODO: Revisit. We should probably support these.
public static final String AWS_CREDENTIALS_PROVIDER_CLASS_KEY = "awsCredentialsProviderClass";
public static final String CUSTOM_CREDENTIALS_FILE_PATH_KEY = "customCredentialsFilePath";
public static final int DEFAULT_CONNECTION_POOL_SIZE = 1000;
public static final boolean DEFAULT_USE_ENCRYPTION = true;
public static final Map<String, Object> DEFAULT_PROPERTIES_MAP = new HashMap<>();
private static final Map<String, ConnectionProperties.PropertyConverter<?>> PROPERTY_CONVERTER_MAP =
new HashMap<>();
private static final Logger LOGGER = LoggerFactory.getLogger(OpenCypherConnectionProperties.class);
static {
PROPERTY_CONVERTER_MAP.put(AWS_CREDENTIALS_PROVIDER_CLASS_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(CUSTOM_CREDENTIALS_FILE_PATH_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(ENDPOINT_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(SERVICE_REGION_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(USE_ENCRYPTION_KEY, ConnectionProperties::toBoolean);
PROPERTY_CONVERTER_MAP.put(CONNECTION_POOL_SIZE_KEY, ConnectionProperties::toUnsigned);
}
static {
DEFAULT_PROPERTIES_MAP.put(ENDPOINT_KEY, "");
DEFAULT_PROPERTIES_MAP.put(USE_ENCRYPTION_KEY, DEFAULT_USE_ENCRYPTION);
DEFAULT_PROPERTIES_MAP.put(CONNECTION_POOL_SIZE_KEY, DEFAULT_CONNECTION_POOL_SIZE);
}
/**
* OpenCypherConnectionProperties constructor.
*/
public OpenCypherConnectionProperties() throws SQLException {
super(new Properties(), DEFAULT_PROPERTIES_MAP, PROPERTY_CONVERTER_MAP);
}
/**
* OpenCypherConnectionProperties constructor.
*
* @param properties Properties to examine and extract key details from.
*/
public OpenCypherConnectionProperties(final Properties properties) throws SQLException {
super(properties, DEFAULT_PROPERTIES_MAP, PROPERTY_CONVERTER_MAP);
}
protected static AuthScheme toAuthScheme(@NonNull final String key, @NonNull final String value)
throws SQLException {
if (isWhitespace(value)) {
return DEFAULT_AUTH_SCHEME;
}
if (AuthScheme.fromString(value) == null) {
throw invalidConnectionPropertyError(key, value);
}
return AuthScheme.fromString(value);
}
protected boolean isEncryptionEnabled() {
return getUseEncryption();
}
private URI getUri() throws SQLException {
try {
return new URI(getEndpoint());
} catch (final URISyntaxException e) {
throw new SQLException(e);
}
}
@Override
public String getHostname() throws SQLException {
return getUri().getHost();
}
@Override
public int getPort() throws SQLException {
return getUri().getPort();
}
@Override
public void sshTunnelOverride(final int port) throws SQLException {
setEndpoint(String.format("%s://%s:%d", getUri().getScheme(), getHostname(), port));
}
/**
* Gets the connection endpoint.
*
* @return The connection endpoint.
*/
public String getEndpoint() {
return getProperty(ENDPOINT_KEY);
}
/**
* Sets the connection endpoint.
*
* @param endpoint The connection endpoint.
* @throws SQLException if value is invalid.
*/
public void setEndpoint(@NonNull final String endpoint) throws SQLException {
setProperty(ENDPOINT_KEY,
(String) PROPERTY_CONVERTER_MAP.get(ENDPOINT_KEY).convert(ENDPOINT_KEY, endpoint));
}
/**
* Gets the AWS credentials provider class.
*
* @return The AWS credentials provider class.
*/
public String getAwsCredentialsProviderClass() {
if (!containsKey(AWS_CREDENTIALS_PROVIDER_CLASS_KEY)) {
return null;
}
return getProperty(AWS_CREDENTIALS_PROVIDER_CLASS_KEY);
}
/**
* Sets the AWS credentials provider class.
*
* @param awsCredentialsProviderClass The AWS credentials provider class.
* @throws SQLException if value is invalid.
*/
public void setAwsCredentialsProviderClass(@NonNull final String awsCredentialsProviderClass) throws SQLException {
setProperty(AWS_CREDENTIALS_PROVIDER_CLASS_KEY,
(String) PROPERTY_CONVERTER_MAP.get(AWS_CREDENTIALS_PROVIDER_CLASS_KEY)
.convert(AWS_CREDENTIALS_PROVIDER_CLASS_KEY, awsCredentialsProviderClass));
}
/**
* Gets the custom credentials filepath.
*
* @return The custom credentials filepath.
*/
public String getCustomCredentialsFilePath() {
if (!containsKey(CUSTOM_CREDENTIALS_FILE_PATH_KEY)) {
return null;
}
return getProperty(CUSTOM_CREDENTIALS_FILE_PATH_KEY);
}
/**
* Sets the custom credentials filepath.
*
* @param customCredentialsFilePath The custom credentials filepath.
* @throws SQLException if value is invalid.
*/
public void setCustomCredentialsFilePath(@NonNull final String customCredentialsFilePath) throws SQLException {
setProperty(CUSTOM_CREDENTIALS_FILE_PATH_KEY,
(String) PROPERTY_CONVERTER_MAP.get(CUSTOM_CREDENTIALS_FILE_PATH_KEY)
.convert(CUSTOM_CREDENTIALS_FILE_PATH_KEY, customCredentialsFilePath));
}
/**
* Gets the use encryption.
*
* @return The use encryption.
*/
public boolean getUseEncryption() {
return (boolean) get(USE_ENCRYPTION_KEY);
}
/**
* Sets the use encryption.
*
* @param useEncryption The use encryption.
*/
public void setUseEncryption(final boolean useEncryption) throws SQLClientInfoException {
if (!useEncryption && getAuthScheme().equals(AuthScheme.IAMSigV4)) {
throw SqlError.createSQLClientInfoException(
LOGGER,
Connection.getFailures("useEncrpytion", "true"),
SqlError.INVALID_CONNECTION_PROPERTY, "useEncrpytion",
"'false' when authScheme is set to 'IAMSigV4'");
}
put(USE_ENCRYPTION_KEY, useEncryption);
}
/**
* Gets the connection pool size.
*
* @return The connection pool size.
*/
public int getConnectionPoolSize() {
return (int) get(CONNECTION_POOL_SIZE_KEY);
}
/**
* Sets the connection pool size.
*
* @param connectionPoolSize The connection pool size.
* @throws SQLException if value is invalid.
*/
public void setConnectionPoolSize(final int connectionPoolSize) throws SQLException {
if (connectionPoolSize < 0) {
throw invalidConnectionPropertyError(CONNECTION_POOL_SIZE_KEY, connectionPoolSize);
}
put(CONNECTION_POOL_SIZE_KEY, connectionPoolSize);
}
/**
* Validate the supported properties.
*/
@Override
protected void validateProperties() throws SQLException {
if (getAuthScheme() != null && getAuthScheme().equals(AuthScheme.IAMSigV4)) {
// If IAMSigV4 is specified, we need the region provided to us.
validateServiceRegionEnvVariable();
if (!getUseEncryption()) {
throw invalidConnectionPropertyValueError(USE_ENCRYPTION_KEY,
"Encryption must be enabled if IAMSigV4 is used");
}
}
}
/**
* Check if the property is supported by the driver.
*
* @param name The name of the property.
* @return {@code true} if property is supported; {@code false} otherwise.
*/
@Override
public boolean isSupportedProperty(final String name) {
return containsKey(name);
}
}
| 7,501 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/OpenCypherQueryExecutor.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher;
import org.neo4j.driver.AuthToken;
import org.neo4j.driver.AuthTokens;
import org.neo4j.driver.Config;
import org.neo4j.driver.Driver;
import org.neo4j.driver.GraphDatabase;
import org.neo4j.driver.Record;
import org.neo4j.driver.Result;
import org.neo4j.driver.Session;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.common.gremlindatamodel.MetadataCache;
import software.aws.neptune.jdbc.utilities.AuthScheme;
import software.aws.neptune.jdbc.utilities.QueryExecutor;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import software.aws.neptune.opencypher.resultset.OpenCypherResultSet;
import software.aws.neptune.opencypher.resultset.OpenCypherResultSetGetCatalogs;
import software.aws.neptune.opencypher.resultset.OpenCypherResultSetGetColumns;
import software.aws.neptune.opencypher.resultset.OpenCypherResultSetGetSchemas;
import software.aws.neptune.opencypher.resultset.OpenCypherResultSetGetTableTypes;
import software.aws.neptune.opencypher.resultset.OpenCypherResultSetGetTables;
import software.aws.neptune.opencypher.resultset.OpenCypherResultSetGetTypeInfo;
import java.lang.reflect.Constructor;
import java.sql.SQLException;
import java.util.List;
import java.util.concurrent.TimeUnit;
public class OpenCypherQueryExecutor extends QueryExecutor {
private static final Logger LOGGER = LoggerFactory.getLogger(OpenCypherQueryExecutor.class);
private static final Object DRIVER_LOCK = new Object();
private static OpenCypherConnectionProperties previousOpenCypherConnectionProperties = null;
private static Driver driver = null;
private final OpenCypherConnectionProperties openCypherConnectionProperties;
private final Object sessionLock = new Object();
private Session session = null;
OpenCypherQueryExecutor(final OpenCypherConnectionProperties openCypherConnectionProperties) {
this.openCypherConnectionProperties = openCypherConnectionProperties;
}
/**
* Function to close down the driver.
*/
public static void close() {
synchronized (DRIVER_LOCK) {
if (driver != null) {
driver.close();
driver = null;
}
}
}
private static Driver createDriver(final Config config,
final OpenCypherConnectionProperties openCypherConnectionProperties)
throws SQLException {
AuthToken authToken = AuthTokens.none();
if (openCypherConnectionProperties.getAuthScheme().equals(AuthScheme.IAMSigV4)) {
LOGGER.info("Creating driver with IAMSigV4 authentication.");
authToken = OpenCypherIAMRequestGenerator
.createAuthToken(openCypherConnectionProperties.getEndpoint(),
openCypherConnectionProperties.getServiceRegion());
}
return GraphDatabase.driver(openCypherConnectionProperties.getEndpoint(), authToken, config);
}
private static Driver getDriver(final Config config,
final OpenCypherConnectionProperties openCypherConnectionProperties,
final boolean returnNew)
throws SQLException {
if (returnNew) {
return createDriver(config, openCypherConnectionProperties);
}
if ((driver == null) ||
!propertiesEqual(previousOpenCypherConnectionProperties, openCypherConnectionProperties)) {
previousOpenCypherConnectionProperties = openCypherConnectionProperties;
return createDriver(config, openCypherConnectionProperties);
}
return driver;
}
/**
* Function to return max fetch size.
*
* @return Max fetch size (Integer max value).
*/
@Override
public int getMaxFetchSize() {
return Integer.MAX_VALUE;
}
/**
* Verify that connection to database is functional.
*
* @param timeout Time in seconds to wait for the database operation used to validate the connection to complete.
* @return true if the connection is valid, otherwise false.
*/
public boolean isValid(final int timeout) {
try {
final Config config = createConfigBuilder().withConnectionTimeout(timeout, TimeUnit.SECONDS).build();
final Driver tempDriver;
synchronized (DRIVER_LOCK) {
tempDriver = getDriver(config, openCypherConnectionProperties, true);
}
tempDriver.verifyConnectivity();
return true;
} catch (final Exception e) {
LOGGER.error("Connection to database returned an error:", e);
return false;
}
}
private Config.ConfigBuilder createConfigBuilder() {
final Config.ConfigBuilder configBuilder = Config.builder();
final boolean useEncryption = openCypherConnectionProperties.getUseEncryption();
if (useEncryption) {
LOGGER.info("Creating driver with encryption.");
configBuilder.withEncryption();
configBuilder.withTrustStrategy(Config.TrustStrategy.trustAllCertificates());
} else {
LOGGER.info("Creating driver without encryption.");
configBuilder.withoutEncryption();
}
configBuilder.withMaxConnectionPoolSize(openCypherConnectionProperties.getConnectionPoolSize());
configBuilder
.withConnectionTimeout(openCypherConnectionProperties.getConnectionTimeoutMillis(),
TimeUnit.MILLISECONDS);
return configBuilder;
}
/**
* Function to execute query.
*
* @param sql Query to execute.
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet object returned from query execution.
* @throws SQLException if query execution fails, or it was cancelled.
*/
@Override
public java.sql.ResultSet executeQuery(final String sql, final java.sql.Statement statement) throws
SQLException {
final Constructor<?> constructor;
try {
constructor = OpenCypherResultSet.class
.getConstructor(java.sql.Statement.class, OpenCypherResultSet.ResultSetInfoWithRows.class);
} catch (final NoSuchMethodException e) {
throw SqlError.createSQLException(
LOGGER,
SqlState.INVALID_QUERY_EXPRESSION,
SqlError.QUERY_FAILED, e);
}
return runCancellableQuery(constructor, statement, sql);
}
/**
* Function to get tables.
*
* @param statement java.sql.Statement Object required for result set.
* @param tableName String table name with colon delimits.
* @return java.sql.ResultSet object returned from query execution.
* @throws SQLException if query execution fails, or it was cancelled.
*/
@Override
public java.sql.ResultSet executeGetTables(final java.sql.Statement statement, final String tableName)
throws SQLException {
final String endpoint = this.openCypherConnectionProperties.getEndpoint();
MetadataCache.updateCacheIfNotUpdated(openCypherConnectionProperties);
return new OpenCypherResultSetGetTables(statement,
MetadataCache.getFilteredCacheNodeColumnInfos(tableName, endpoint),
MetadataCache.getFilteredResultSetInfoWithoutRowsForTables(tableName, endpoint));
}
/**
* Function to get schema.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet Object containing schemas.
* @throws SQLException if query execution fails, or it was cancelled.
*/
@Override
public java.sql.ResultSet executeGetSchemas(final java.sql.Statement statement)
throws SQLException {
return new OpenCypherResultSetGetSchemas(statement);
}
/**
* Function to get catalogs.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet Object containing catalogs.
*/
@Override
public java.sql.ResultSet executeGetCatalogs(final java.sql.Statement statement) {
return new OpenCypherResultSetGetCatalogs(statement);
}
/**
* Function to get table types.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet Object containing table types.
*/
@Override
public java.sql.ResultSet executeGetTableTypes(final java.sql.Statement statement) {
return new OpenCypherResultSetGetTableTypes(statement);
}
/**
* Function to get table types.
*
* @param statement java.sql.Statement Object required for result set.
* @param nodes String containing nodes to get schema for.
* @return java.sql.ResultSet Object containing columns.
*/
@Override
public java.sql.ResultSet executeGetColumns(final java.sql.Statement statement, final String nodes)
throws SQLException {
final String endpoint = this.openCypherConnectionProperties.getEndpoint();
MetadataCache.updateCacheIfNotUpdated(openCypherConnectionProperties);
return new OpenCypherResultSetGetColumns(statement,
MetadataCache.getFilteredCacheNodeColumnInfos(nodes, endpoint),
MetadataCache.getFilteredResultSetInfoWithoutRowsForColumns(nodes, endpoint));
}
/**
* Function to get type info.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet Object containing type info.
*/
@Override
public java.sql.ResultSet executeGetTypeInfo(final java.sql.Statement statement)
throws SQLException {
return new OpenCypherResultSetGetTypeInfo(statement);
}
@Override
@SuppressWarnings("unchecked")
protected <T> T runQuery(final String query) throws SQLException {
synchronized (sessionLock) {
synchronized (DRIVER_LOCK) {
driver = getDriver(createConfigBuilder().build(), openCypherConnectionProperties, false);
}
session = driver.session();
}
final Result result = session.run(query);
final List<Record> rows = result.list();
final List<String> columns = result.keys();
final OpenCypherResultSet.ResultSetInfoWithRows openCypherResultSet =
new OpenCypherResultSet.ResultSetInfoWithRows(session, result, rows, columns);
synchronized (sessionLock) {
session = null;
}
return (T) openCypherResultSet;
}
@Override
protected void performCancel() throws SQLException {
synchronized (sessionLock) {
if (session != null) {
//noinspection deprecation
session.reset();
}
}
}
}
| 7,502 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/OpenCypherTypeMapping.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher;
import org.neo4j.driver.Value;
import org.neo4j.driver.internal.types.InternalTypeSystem;
import org.neo4j.driver.types.Node;
import org.neo4j.driver.types.Point;
import org.neo4j.driver.types.Relationship;
import org.neo4j.driver.types.Type;
import software.aws.neptune.jdbc.utilities.JdbcType;
import java.util.HashMap;
import java.util.Map;
/**
* OpenCypher type mapping class to simplify type conversion and mapping.
*/
public class OpenCypherTypeMapping {
public static final Map<Type, JdbcType> BOLT_TO_JDBC_TYPE_MAP = new HashMap<>();
public static final Map<Type, Class<?>> BOLT_TO_JAVA_TYPE_MAP = new HashMap<>();
public static final Map<Type, Converter<?>> BOLT_TO_JAVA_TRANSFORM_MAP = new HashMap<>();
public static final Converter<String> NODE_CONVERTER = new NodeConverter();
public static final Converter<String> RELATIONSHIP_CONVERTER = new RelationshipConverter();
public static final Converter<String> PATH_CONVERTER = new PathConverter();
public static final Converter<String> POINT_CONVERTER = new PointConverter();
static {
// Bolt->JDBC mapping.
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.ANY(), JdbcType.VARCHAR);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.BOOLEAN(), JdbcType.BIT);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.BYTES(), JdbcType.VARCHAR);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.STRING(), JdbcType.VARCHAR);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.NUMBER(), JdbcType.DOUBLE);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.INTEGER(), JdbcType.BIGINT);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.FLOAT(), JdbcType.DOUBLE);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.LIST(), JdbcType.VARCHAR);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.MAP(), JdbcType.VARCHAR);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.NODE(), JdbcType.VARCHAR);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.RELATIONSHIP(), JdbcType.VARCHAR);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.PATH(), JdbcType.VARCHAR);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.POINT(), JdbcType.VARCHAR);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.DATE(), JdbcType.DATE);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.TIME(), JdbcType.TIME);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.LOCAL_TIME(), JdbcType.TIME);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.LOCAL_DATE_TIME(), JdbcType.TIMESTAMP);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.DATE_TIME(), JdbcType.TIMESTAMP);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.DURATION(), JdbcType.VARCHAR);
BOLT_TO_JDBC_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.NULL(), JdbcType.NULL);
// Bolt->Java mapping.
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.ANY(), String.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.BOOLEAN(), Boolean.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.BYTES(), byte[].class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.STRING(), String.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.NUMBER(), Double.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.INTEGER(), Long.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.FLOAT(), Double.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.LIST(), String.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.MAP(), String.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.NODE(), String.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.RELATIONSHIP(), String.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.PATH(), String.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.POINT(), String.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.DATE(), java.sql.Date.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.TIME(), java.sql.Time.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.LOCAL_TIME(), java.sql.Time.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.LOCAL_DATE_TIME(), java.sql.Timestamp.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.DATE_TIME(), java.sql.Timestamp.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.DURATION(), String.class);
BOLT_TO_JAVA_TYPE_MAP.put(InternalTypeSystem.TYPE_SYSTEM.NULL(), Object.class);
BOLT_TO_JAVA_TRANSFORM_MAP.put(null, (Value v) -> null);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.ANY(), Value::toString);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.BOOLEAN(), Value::asBoolean);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.BYTES(), Value::asByteArray);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.STRING(), Value::asString);
BOLT_TO_JAVA_TRANSFORM_MAP
.put(InternalTypeSystem.TYPE_SYSTEM.NUMBER(), (Value v) -> v.asNumber().doubleValue());
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.INTEGER(), Value::asLong);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.FLOAT(), Value::asDouble);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.LIST(), Value::asList);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.MAP(), Value::asMap);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.NODE(), NODE_CONVERTER);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.RELATIONSHIP(), RELATIONSHIP_CONVERTER);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.PATH(), PATH_CONVERTER);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.POINT(), POINT_CONVERTER);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.DATE(), Value::asLocalDate);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.TIME(), Value::asOffsetTime);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.LOCAL_TIME(), Value::asLocalTime);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.LOCAL_DATE_TIME(), Value::asLocalDateTime);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.DATE_TIME(), Value::asZonedDateTime);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.DURATION(), Value::asIsoDuration);
BOLT_TO_JAVA_TRANSFORM_MAP.put(InternalTypeSystem.TYPE_SYSTEM.NULL(), (Value v) -> null);
}
/**
* Converter interface to convert a Value type to a Java type.
*
* @param <T> Java type to convert to.
*/
public interface Converter<T> {
/**
* Function to perform conversion.
*
* @param value Input value to convert.
* @return Converted value.
*/
T convert(Value value);
}
static class PointConverter implements Converter<String> {
@Override
public String convert(final Value value) {
return convert(value.asPoint());
}
public String convert(final Point value) {
return Double.isNaN(value.z()) ?
String.format("(%f, %f)", value.x(), value.y()) :
String.format("(%f, %f, %f)", value.x(), value.y(), value.z());
}
}
static class NodeConverter implements Converter<String> {
@Override
public String convert(final Value value) {
return convert(value.asNode());
}
public String convert(final Node value) {
// Nodes in OpenCypher typically are wrapped in parenthesis, ex. (node)-[relationship]->(node)
return String.format("(%s : %s)", value.labels().toString(), value.asMap().toString());
}
}
static class RelationshipConverter implements Converter<String> {
@Override
public String convert(final Value value) {
return convert(value.asRelationship());
}
public String convert(final Relationship value) {
// Relationships in OpenCypher typically are wrapped in brackets, ex. (node)-[relationship]->(node)
return String.format("[%s : %s]", value.type(), value.asMap());
}
}
static class PathConverter implements Converter<String> {
@Override
public String convert(final Value value) {
final StringBuilder stringBuilder = new StringBuilder();
value.asPath().iterator().forEachRemaining(segment -> {
final Relationship relationship = segment.relationship();
final Node start = segment.start();
final Node end = segment.end();
// If rel start == start node, direction is (start)-[rel]->(end)
// Else direction is (start)<-[rel]-(end)
final String format = (relationship.startNodeId() == start.id()) ? "-%s->%s" : "<-%s-%s";
// Append start if this is the first node.
if (stringBuilder.length() == 0) {
stringBuilder.append(((NodeConverter) NODE_CONVERTER).convert(start));
}
// Add relationship and end node of segment.
stringBuilder.append(String.format(format,
((RelationshipConverter) RELATIONSHIP_CONVERTER).convert(relationship),
((NodeConverter) NODE_CONVERTER).convert(end)));
});
return stringBuilder.toString();
}
}
}
| 7,503 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/OpenCypherPooledConnection.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher;
import software.aws.neptune.jdbc.PooledConnection;
import java.sql.SQLException;
/**
* OpenCypher implementation of PooledConnection.
*/
public class OpenCypherPooledConnection extends PooledConnection implements javax.sql.PooledConnection {
/**
* OpenCypherPooledConnection constructor, initializes super class.
*
* @param connection Connection Object.
*/
public OpenCypherPooledConnection(final java.sql.Connection connection) {
super(connection);
}
@Override
public java.sql.Connection getConnection() throws SQLException {
return new OpenCypherConnection(new OpenCypherConnectionProperties());
}
}
| 7,504 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/OpenCypherDataSource.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.NeptuneDriver;
import software.aws.neptune.jdbc.DataSource;
import software.aws.neptune.jdbc.utilities.AuthScheme;
import software.aws.neptune.jdbc.utilities.SqlError;
import javax.sql.PooledConnection;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLClientInfoException;
import java.sql.SQLException;
/**
* OpenCypher implementation of DataSource.
*/
public class OpenCypherDataSource extends DataSource
implements javax.sql.DataSource, javax.sql.ConnectionPoolDataSource {
public static final String OPEN_CYPHER_PREFIX = NeptuneDriver.CONN_STRING_PREFIX + "opencypher://";
private static final Logger LOGGER = LoggerFactory.getLogger(OpenCypherDataSource.class);
private final OpenCypherConnectionProperties connectionProperties;
/**
* OpenCypherDataSource constructor, initializes super class.
*/
OpenCypherDataSource() throws SQLException {
super();
this.connectionProperties = new OpenCypherConnectionProperties();
}
@Override
public java.sql.Connection getConnection() throws SQLException {
return DriverManager.getConnection(OPEN_CYPHER_PREFIX, connectionProperties);
}
@Override
public Connection getConnection(final String username, final String password) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public PooledConnection getPooledConnection() throws SQLException {
return new OpenCypherPooledConnection(getConnection());
}
@Override
public PooledConnection getPooledConnection(final String user, final String password) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
/**
* Sets the timeout for opening a connection.
*
* @return the connection timeout in seconds.
*/
@Override
public int getLoginTimeout() throws SQLException {
return connectionProperties.getConnectionTimeoutMillis();
}
/**
* Sets the timeout for opening a connection.
*
* @param seconds The connection timeout in seconds.
* @throws SQLException if timeout is negative.
*/
@Override
public void setLoginTimeout(final int seconds) throws SQLException {
connectionProperties.setConnectionTimeoutMillis(seconds);
}
/**
* Gets the application name.
*
* @return The application name.
*/
public String getApplicationName() {
return connectionProperties.getApplicationName();
}
/**
* Sets the application name.
*
* @param applicationName The application name.
* @throws SQLException if value is invalid.
*/
public void setApplicationName(final String applicationName) throws SQLException {
connectionProperties.setApplicationName(applicationName);
}
/**
* Gets the AWS credentials provider class.
*
* @return The AWS credentials provider class.
*/
public String getAwsCredentialsProviderClass() {
return connectionProperties.getAwsCredentialsProviderClass();
}
/**
* Sets the AWS credentials provider class.
*
* @param awsCredentialsProviderClass The AWS credentials provider class.
* @throws SQLException if value is invalid.
*/
public void setAwsCredentialsProviderClass(final String awsCredentialsProviderClass) throws SQLException {
connectionProperties.setAwsCredentialsProviderClass(awsCredentialsProviderClass);
}
/**
* Gets the custom credentials filepath.
*
* @return The custom credentials filepath.
*/
public String getCustomCredentialsFilePath() {
return connectionProperties.getCustomCredentialsFilePath();
}
/**
* Sets the custom credentials filepath.
*
* @param customCredentialsFilePath The custom credentials filepath.
* @throws SQLException if value is invalid.
*/
public void setCustomCredentialsFilePath(final String customCredentialsFilePath) throws SQLException {
connectionProperties.setCustomCredentialsFilePath(customCredentialsFilePath);
}
/**
* Gets the connection endpoint.
*
* @return The connection endpoint.
*/
public String getEndpoint() {
return connectionProperties.getEndpoint();
}
/**
* Sets the connection endpoint.
*
* @param endpoint The connection endpoint.
* @throws SQLException if value is invalid.
*/
public void setEndpoint(final String endpoint) throws SQLException {
connectionProperties.setEndpoint(endpoint);
}
/**
* Gets the connection timeout in milliseconds.
*
* @return The connection timeout in milliseconds.
*/
public int getConnectionTimeoutMillis() {
return connectionProperties.getConnectionTimeoutMillis();
}
/**
* Sets the connection timeout in milliseconds.
*
* @param timeoutMillis The connection timeout in milliseconds.
* @throws SQLException if value is invalid.
*/
public void setConnectionTimeoutMillis(final int timeoutMillis) throws SQLException {
connectionProperties.setConnectionTimeoutMillis(timeoutMillis);
}
/**
* Gets the connection retry count.
*
* @return The connection retry count.
*/
public int getConnectionRetryCount() {
return connectionProperties.getConnectionRetryCount();
}
/**
* Sets the connection retry count.
*
* @param retryCount The connection retry count.
* @throws SQLException if value is invalid.
*/
public void setConnectionRetryCount(final int retryCount) throws SQLException {
connectionProperties.setConnectionRetryCount(retryCount);
}
/**
* Gets the authentication scheme.
*
* @return The authentication scheme.
*/
public AuthScheme getAuthScheme() {
return connectionProperties.getAuthScheme();
}
/**
* Sets the authentication scheme.
*
* @param authScheme The authentication scheme.
* @throws SQLException if value is invalid.
*/
public void setAuthScheme(final AuthScheme authScheme) throws SQLException {
connectionProperties.setAuthScheme(authScheme);
}
/**
* Gets the use encryption.
*
* @return The use encryption.
*/
public boolean getUseEncryption() {
return (connectionProperties.getUseEncryption());
}
/**
* Sets the use encryption.
*
* @param useEncryption The use encryption.
*/
public void setUseEncryption(final boolean useEncryption) throws SQLClientInfoException {
connectionProperties.setUseEncryption(useEncryption);
}
}
| 7,505 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/OpenCypherConnection.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher;
import lombok.Getter;
import lombok.NonNull;
import software.aws.neptune.NeptuneDatabaseMetadata;
import software.aws.neptune.jdbc.Connection;
import software.aws.neptune.jdbc.utilities.ConnectionProperties;
import software.aws.neptune.jdbc.utilities.QueryExecutor;
import java.sql.DatabaseMetaData;
import java.sql.SQLException;
/**
* OpenCypher implementation of Connection.
*/
public class OpenCypherConnection extends Connection implements java.sql.Connection {
@Getter
private final OpenCypherConnectionProperties openCypherConnectionProperties;
/**
* OpenCypherConnection constructor, initializes super class.
*
* @param connectionProperties ConnectionProperties Object.
*/
public OpenCypherConnection(@NonNull final ConnectionProperties connectionProperties) throws SQLException {
super(connectionProperties);
openCypherConnectionProperties = new OpenCypherConnectionProperties(getConnectionProperties());
}
@Override
public void doClose() {
OpenCypherQueryExecutor.close();
}
@Override
public DatabaseMetaData getMetaData() {
return new NeptuneDatabaseMetadata(this);
}
@Override
public QueryExecutor getQueryExecutor() throws SQLException {
return new OpenCypherQueryExecutor(getOpenCypherConnectionProperties());
}
}
| 7,506 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/OpenCypherIAMRequestGenerator.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher;
import com.amazonaws.DefaultRequest;
import com.amazonaws.Request;
import com.amazonaws.auth.AWS4Signer;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.http.HttpMethodName;
import com.google.gson.Gson;
import org.neo4j.driver.AuthToken;
import org.neo4j.driver.AuthTokens;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import static com.amazonaws.auth.internal.SignerConstants.AUTHORIZATION;
import static com.amazonaws.auth.internal.SignerConstants.HOST;
import static com.amazonaws.auth.internal.SignerConstants.X_AMZ_DATE;
import static com.amazonaws.auth.internal.SignerConstants.X_AMZ_SECURITY_TOKEN;
/**
* Class to help with IAM authentication.
*/
public class OpenCypherIAMRequestGenerator {
private static final AWSCredentialsProvider AWS_CREDENTIALS_PROVIDER = new DefaultAWSCredentialsProviderChain();
static final String SERVICE_NAME = "neptune-db";
static final String HTTP_METHOD_HDR = "HttpMethod";
static final String DUMMY_USERNAME = "username";
private static final Gson GSON = new Gson();
/**
* Function to generate AuthToken using IAM authentication.
*
* @param url URL to point at.
* @param region Region to use.
* @return AuthToken for IAM authentication.
*/
public static AuthToken createAuthToken(final String url, final String region) {
final Request<Void> request = new DefaultRequest<>(SERVICE_NAME);
request.setHttpMethod(HttpMethodName.GET);
request.setEndpoint(URI.create(url));
request.setResourcePath("/opencypher");
final AWS4Signer signer = new AWS4Signer();
signer.setRegionName(region);
signer.setServiceName(request.getServiceName());
signer.sign(request, AWS_CREDENTIALS_PROVIDER.getCredentials());
return AuthTokens.basic(DUMMY_USERNAME, getAuthInfoJson(request));
}
private static String getAuthInfoJson(final Request<Void> request) {
final Map<String, Object> obj = new HashMap<>();
obj.put(AUTHORIZATION, request.getHeaders().get(AUTHORIZATION));
obj.put(HTTP_METHOD_HDR, request.getHttpMethod());
obj.put(X_AMZ_DATE, request.getHeaders().get(X_AMZ_DATE));
obj.put(HOST, request.getHeaders().get(HOST));
obj.put(X_AMZ_SECURITY_TOKEN, request.getHeaders().get(X_AMZ_SECURITY_TOKEN));
return GSON.toJson(obj);
}
}
| 7,507 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/resultset/OpenCypherResultSetMetadata.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher.resultset;
import org.neo4j.driver.types.Type;
import software.aws.neptune.jdbc.ResultSetMetaData;
import software.aws.neptune.opencypher.OpenCypherTypeMapping;
import java.sql.SQLException;
import java.util.List;
/**
* OpenCypher implementation of ResultSetMetadata.
*/
public class OpenCypherResultSetMetadata extends ResultSetMetaData
implements java.sql.ResultSetMetaData {
private final List<Type> columnTypes;
/**
* OpenCypherResultSetMetadata constructor.
*
* @param columns List of column names.
* @param columnTypes List of column types.
*/
public OpenCypherResultSetMetadata(final List<String> columns, final List<Type> columnTypes) {
super(columns);
this.columnTypes = columnTypes;
}
/**
* Get Bolt type of a given column.
*
* @param column the 1-based column index.
* @return Bolt Type Object for column.
*/
protected Type getColumnBoltType(final int column) {
// TODO: Loop rows to find common type and cache it.
return columnTypes.get(column - 1);
}
@Override
public int getColumnType(final int column) throws SQLException {
verifyColumnIndex(column);
return OpenCypherTypeMapping.BOLT_TO_JDBC_TYPE_MAP.get(getColumnBoltType(column)).getJdbcCode();
}
@Override
public String getColumnTypeName(final int column) throws SQLException {
verifyColumnIndex(column);
return getColumnBoltType(column).name();
}
@Override
public String getColumnClassName(final int column) throws SQLException {
verifyColumnIndex(column);
return OpenCypherTypeMapping.BOLT_TO_JAVA_TYPE_MAP.get(getColumnBoltType(column)).getName();
}
}
| 7,508 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/resultset/OpenCypherResultSetGetColumns.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher.resultset;
import org.neo4j.driver.internal.types.InternalTypeSystem;
import org.neo4j.driver.types.Type;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetColumns;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* OpenCypher ResultSet class for getColumns.
*/
public class OpenCypherResultSetGetColumns extends ResultSetGetColumns implements java.sql.ResultSet {
private static final Map<String, Type> COLUMN_TYPE_MAP = new HashMap<>();
static {
COLUMN_TYPE_MAP.put("TABLE_CAT", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("TABLE_SCHEM", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("TABLE_NAME", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("COLUMN_NAME", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("DATA_TYPE", InternalTypeSystem.TYPE_SYSTEM.INTEGER());
COLUMN_TYPE_MAP.put("TYPE_NAME", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("COLUMN_SIZE", InternalTypeSystem.TYPE_SYSTEM.INTEGER());
COLUMN_TYPE_MAP.put("BUFFER_LENGTH", InternalTypeSystem.TYPE_SYSTEM.INTEGER());
COLUMN_TYPE_MAP.put("DECIMAL_DIGITS", InternalTypeSystem.TYPE_SYSTEM.INTEGER());
COLUMN_TYPE_MAP.put("NUM_PREC_RADIX", InternalTypeSystem.TYPE_SYSTEM.INTEGER());
COLUMN_TYPE_MAP.put("NULLABLE", InternalTypeSystem.TYPE_SYSTEM.INTEGER());
COLUMN_TYPE_MAP.put("REMARKS", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("COLUMN_DEF", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("SQL_DATA_TYPE", InternalTypeSystem.TYPE_SYSTEM.INTEGER());
COLUMN_TYPE_MAP.put("SQL_DATETIME_SUB", InternalTypeSystem.TYPE_SYSTEM.INTEGER());
COLUMN_TYPE_MAP.put("CHAR_OCTET_LENGTH", InternalTypeSystem.TYPE_SYSTEM.INTEGER());
COLUMN_TYPE_MAP.put("ORDINAL_POSITION", InternalTypeSystem.TYPE_SYSTEM.INTEGER());
COLUMN_TYPE_MAP.put("IS_NULLABLE", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("SCOPE_CATALOG", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("SCOPE_SCHEMA", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("SCOPE_TABLE", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("SOURCE_DATA_TYPE", InternalTypeSystem.TYPE_SYSTEM.INTEGER());
COLUMN_TYPE_MAP.put("IS_AUTOINCREMENT", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("IS_GENERATEDCOLUMN", InternalTypeSystem.TYPE_SYSTEM.STRING());
}
/**
* OpenCypherResultSetGetColumns constructor, initializes super class.
*
* @param statement Statement Object.
* @param gremlinSchema GremlinSchema Object.
* @param resultSetInfoWithoutRows ResultSetInfoWithoutRows Object.
*/
public OpenCypherResultSetGetColumns(final Statement statement,
final GremlinSchema gremlinSchema,
final ResultSetInfoWithoutRows resultSetInfoWithoutRows)
throws SQLException {
super(statement, gremlinSchema, resultSetInfoWithoutRows);
}
@Override
protected ResultSetMetaData getResultMetadata() {
final List<String> orderedColumns = getColumns();
final List<Type> rowTypes = new ArrayList<>();
for (final String column : orderedColumns) {
rowTypes.add(COLUMN_TYPE_MAP.get(column));
}
return new OpenCypherResultSetMetadata(orderedColumns, rowTypes);
}
}
| 7,509 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/resultset/OpenCypherResultSetGetSchemas.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher.resultset;
import org.neo4j.driver.internal.types.InternalTypeSystem;
import org.neo4j.driver.types.Type;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetSchemas;
import java.sql.ResultSetMetaData;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
/**
* OpenCypher ResultSet class for getSchemas.
*/
public class OpenCypherResultSetGetSchemas extends ResultSetGetSchemas {
/**
* Constructor for OpenCypherResultSetGetSchemas.
*
* @param statement Statement Object.
*/
public OpenCypherResultSetGetSchemas(final Statement statement) {
super(statement);
}
@Override
protected ResultSetMetaData getResultMetadata() {
final List<Type> rowTypes = new ArrayList<>();
for (int i = 0; i < getColumns().size(); i++) {
rowTypes.add(InternalTypeSystem.TYPE_SYSTEM.STRING());
}
return new OpenCypherResultSetMetadata(getColumns(), rowTypes);
}
}
| 7,510 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/resultset/OpenCypherResultSet.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher.resultset;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.neo4j.driver.Record;
import org.neo4j.driver.Result;
import org.neo4j.driver.Session;
import org.neo4j.driver.Value;
import org.neo4j.driver.internal.types.InternalTypeSystem;
import org.neo4j.driver.types.Type;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.jdbc.ResultSet;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import software.aws.neptune.opencypher.OpenCypherTypeMapping;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* OpenCypher ResultSet class.
*/
public class OpenCypherResultSet extends ResultSet implements java.sql.ResultSet {
private static final Logger LOGGER = LoggerFactory.getLogger(OpenCypherResultSet.class);
private final List<String> columns;
private final List<Record> rows;
private final Result result;
private final Session session;
private boolean wasNull = false;
// TODO: Separate the result set without info to a common result set that this can use.
/**
* OpenCypherResultSet constructor, initializes super class.
*
* @param statement Statement Object.
* @param resultSetInfo ResultSetInfoWithRows Object.
*/
public OpenCypherResultSet(final java.sql.Statement statement, final ResultSetInfoWithRows resultSetInfo) {
super(statement, resultSetInfo.getColumns(), resultSetInfo.getRows().size());
this.session = resultSetInfo.getSession();
this.result = resultSetInfo.getResult();
this.columns = resultSetInfo.getColumns();
this.rows = resultSetInfo.getRows();
}
/**
* OpenCypherResultSet constructor, initializes super class.
*
* @param statement Statement Object.
* @param resultSetInfo ResultSetInfoWithoutRows Object.
*/
public OpenCypherResultSet(final java.sql.Statement statement, final ResultSetInfoWithoutRows resultSetInfo) {
super(statement, resultSetInfo.getColumns(), resultSetInfo.getRowCount());
this.session = null;
this.result = null;
this.columns = resultSetInfo.getColumns();
this.rows = null;
}
@Override
protected void doClose() throws SQLException {
if (result != null) {
result.consume();
}
if (session != null) {
session.close();
}
}
@Override
public boolean wasNull() throws SQLException {
return wasNull;
}
@Override
protected ResultSetMetaData getResultMetadata() throws SQLException {
final List<Type> rowTypes = new ArrayList<>();
if (rows == null) {
for (int i = 0; i < columns.size(); i++) {
rowTypes.add(InternalTypeSystem.TYPE_SYSTEM.STRING());
}
} else {
// TODO: Loop through records and do some sort of type promotion.
final Record record = rows.get(0);
for (int i = 0; i < columns.size(); i++) {
rowTypes.add(record.get(i).type());
}
}
return new OpenCypherResultSetMetadata(columns, rowTypes);
}
protected Object getConvertedValue(final int columnIndex) throws SQLException {
final Value value = getValue(columnIndex);
final OpenCypherTypeMapping.Converter<?> converter = getConverter(value);
return converter.convert(value);
}
private Value getValue(final int columnIndex) throws SQLException {
verifyOpen();
if (rows == null) {
// TODO: investigate and change exception error type if needed
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.UNSUPPORTED_RESULT_SET_TYPE);
}
validateRowColumn(columnIndex);
final Value value = rows.get(getRowIndex()).get(columnIndex - 1);
wasNull = value.isNull();
return value;
}
protected OpenCypherTypeMapping.Converter<?> getConverter(final Value value) {
return OpenCypherTypeMapping.BOLT_TO_JAVA_TRANSFORM_MAP.get(value.type());
}
@Override
public Object getObject(final int columnIndex, final Map<String, Class<?>> map) throws SQLException {
LOGGER.trace("Getting column {} as an Object using provided Map.", columnIndex);
final Value value = getValue(columnIndex);
return getObject(columnIndex, map.get(OpenCypherTypeMapping.BOLT_TO_JDBC_TYPE_MAP.get(value.type()).name()));
}
@AllArgsConstructor
@Getter
public static class ResultSetInfoWithRows {
private final Session session;
private final Result result;
private final List<Record> rows;
private final List<String> columns;
}
}
| 7,511 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/resultset/OpenCypherResultSetGetTypeInfo.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher.resultset;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetTypeInfo;
import java.sql.Statement;
import java.sql.Types;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class OpenCypherResultSetGetTypeInfo extends ResultSetGetTypeInfo {
private static final List<Map<String, Object>> TYPE_INFO = new ArrayList<>();
static {
// The order added to TYPE_INFO matters
putInfo(TYPE_INFO, "BOOLEAN", Types.BIT, false, false);
putInfo(TYPE_INFO, "NULL", Types.NULL, false, false);
putInfo(TYPE_INFO, "INTEGER", Types.INTEGER, false, true);
putInfo(TYPE_INFO, "NUMBER", Types.DOUBLE, false, true);
putInfo(TYPE_INFO, "FLOAT", Types.DOUBLE, false, true);
putInfo(TYPE_INFO, "STRING", Types.VARCHAR, true, false);
putInfo(TYPE_INFO, "ANY", Types.VARCHAR, true, false);
putInfo(TYPE_INFO, "LIST", Types.VARCHAR, true, false);
putInfo(TYPE_INFO, "MAP", Types.VARCHAR, true, false);
putInfo(TYPE_INFO, "NODE", Types.VARCHAR, true, false);
putInfo(TYPE_INFO, "RELATIONSHIP", Types.VARCHAR, true, false);
putInfo(TYPE_INFO, "PATH", Types.VARCHAR, true, false);
putInfo(TYPE_INFO, "POINT", Types.VARCHAR, true, false);
putInfo(TYPE_INFO, "DURATION", Types.VARCHAR, true, false);
putInfo(TYPE_INFO, "BYTES", Types.VARCHAR, false, false);
putInfo(TYPE_INFO, "DATE", Types.DATE, false, false);
putInfo(TYPE_INFO, "TIME", Types.TIME, false, false);
putInfo(TYPE_INFO, "LOCAL_TIME", Types.TIME, false, false);
putInfo(TYPE_INFO, "DATE_TIME", Types.TIMESTAMP, false, false);
putInfo(TYPE_INFO, "LOCAL_DATE_TIME", Types.TIMESTAMP, false, false);
populateConstants(TYPE_INFO);
}
/**
* OpenCypherResultSetGetTypeInfo constructor, initializes super class.
*
* @param statement Statement Object.
*/
public OpenCypherResultSetGetTypeInfo(final Statement statement) {
super(statement, new ArrayList<>(TYPE_INFO));
}
}
| 7,512 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/resultset/OpenCypherResultSetGetTables.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher.resultset;
import org.neo4j.driver.internal.types.InternalTypeSystem;
import org.neo4j.driver.types.Type;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetTables;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* OpenCypher ResultSet class for getTables.
*/
public class OpenCypherResultSetGetTables extends ResultSetGetTables implements java.sql.ResultSet {
private static final Map<String, Type> COLUMN_TYPE_MAP = new HashMap<>();
static {
COLUMN_TYPE_MAP.put("TABLE_CAT", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("TABLE_SCHEM", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("TABLE_NAME", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("TABLE_TYPE", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("REMARKS", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("TYPE_CAT", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("TYPE_SCHEM", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("TYPE_NAME", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("SELF_REFERENCING_COL_NAME", InternalTypeSystem.TYPE_SYSTEM.STRING());
COLUMN_TYPE_MAP.put("REF_GENERATION", InternalTypeSystem.TYPE_SYSTEM.STRING());
}
/**
* OpenCypherResultSetGetColumns constructor, initializes super class.
*
* @param statement Statement Object.
* @param gremlinSchema GremlinSchema Object.
* @param resultSetInfoWithoutRows ResultSetInfoWithoutRows Object.
*/
public OpenCypherResultSetGetTables(final Statement statement,
final GremlinSchema gremlinSchema,
final ResultSetInfoWithoutRows resultSetInfoWithoutRows)
throws SQLException {
super(statement, gremlinSchema, resultSetInfoWithoutRows);
}
@Override
protected ResultSetMetaData getResultMetadata() {
final List<String> orderedColumns = getColumns();
final List<Type> rowTypes = new ArrayList<>();
for (int i = 0; i < orderedColumns.size(); i++) {
// TODO Didn't use the above map?
rowTypes.add(InternalTypeSystem.TYPE_SYSTEM.STRING());
}
return new OpenCypherResultSetMetadata(orderedColumns, rowTypes);
}
}
| 7,513 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/resultset/OpenCypherResultSetGetTableTypes.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher.resultset;
import org.neo4j.driver.internal.types.InternalTypeSystem;
import org.neo4j.driver.types.Type;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetTableTypes;
import java.sql.ResultSetMetaData;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
/**
* OpenCypher ResultSet class for getTableTypes.
*/
public class OpenCypherResultSetGetTableTypes extends ResultSetGetTableTypes {
/**
* Constructor for OpenCypherResultSetGetTableTypes.
*
* @param statement Statement Object.
*/
public OpenCypherResultSetGetTableTypes(final Statement statement) {
super(statement);
}
@Override
protected ResultSetMetaData getResultMetadata() {
final List<Type> rowTypes = new ArrayList<>();
for (int i = 0; i < getColumns().size(); i++) {
rowTypes.add(InternalTypeSystem.TYPE_SYSTEM.STRING());
}
return new OpenCypherResultSetMetadata(getColumns(), rowTypes);
}
}
| 7,514 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/opencypher/resultset/OpenCypherResultSetGetCatalogs.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.opencypher.resultset;
import org.neo4j.driver.internal.types.InternalTypeSystem;
import org.neo4j.driver.types.Type;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetCatalogs;
import java.sql.ResultSetMetaData;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
/**
* OpenCypher ResultSet class for getCatalogs.
*/
public class OpenCypherResultSetGetCatalogs extends ResultSetGetCatalogs {
/**
* Constructor for OpenCypherResultSetGetCatalogs.
*
* @param statement Statement Object.
*/
public OpenCypherResultSetGetCatalogs(final Statement statement) {
super(statement);
}
@Override
protected ResultSetMetaData getResultMetadata() {
final List<Type> rowTypes = new ArrayList<>();
for (int i = 0; i < getColumns().size(); i++) {
rowTypes.add(InternalTypeSystem.TYPE_SYSTEM.STRING());
}
return new OpenCypherResultSetMetadata(getColumns(), rowTypes);
}
}
| 7,515 |
0 | Create_ds/plog/plog-server/src/test/groovy/com/airbnb/plog | Create_ds/plog/plog-server/src/test/groovy/com/airbnb/plog/handlers/MessageQueueProvider.java | package com.airbnb.plog.handlers;
import com.airbnb.plog.Message;
import com.eclipsesource.json.JsonObject;
import com.google.common.collect.Queues;
import com.typesafe.config.Config;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import lombok.Getter;
import java.util.Queue;
@SuppressWarnings("ClassOnlyUsedInOneModule")
public class MessageQueueProvider implements HandlerProvider {
@Getter
private static final Queue<Message> queue = Queues.newArrayDeque();
@Override
public Handler getHandler(Config config) throws Exception {
return new MessageQueueHandler();
}
private static class MessageQueueHandler extends SimpleChannelInboundHandler<Message> implements Handler {
@Override
public JsonObject getStats() {
return new JsonObject().add("queued", queue.size());
}
@Override
public String getName() {
return "mqueue";
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, Message msg) throws Exception {
msg.retain();
queue.add(msg);
}
}
}
| 7,516 |
0 | Create_ds/plog/plog-server/src/test/groovy/com/airbnb/plog | Create_ds/plog/plog-server/src/test/groovy/com/airbnb/plog/handlers/TruncationProvider.java | package com.airbnb.plog.handlers;
import com.airbnb.plog.Message;
import com.airbnb.plog.MessageImpl;
import com.eclipsesource.json.JsonObject;
import com.typesafe.config.Config;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import lombok.RequiredArgsConstructor;
@RequiredArgsConstructor
public class TruncationProvider implements HandlerProvider {
@Override
public Handler getHandler(Config config) throws Exception {
final int maxLength = config.getInt("max_length");
return new MessageSimpleChannelInboundHandler(maxLength);
}
private static class MessageSimpleChannelInboundHandler extends SimpleChannelInboundHandler<Message> implements Handler {
private final int maxLength;
public MessageSimpleChannelInboundHandler(int maxLength) {
super(false);
this.maxLength = maxLength;
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, Message msg) throws Exception {
final ByteBuf orig = msg.content();
final int length = orig.readableBytes();
if (length <= maxLength) {
ctx.fireChannelRead(msg);
} else {
final ByteBuf content = msg.content().slice(0, maxLength);
ctx.fireChannelRead(new MessageImpl(content, msg.getTags()));
}
}
@Override
public JsonObject getStats() {
return new JsonObject().add("max_length", maxLength);
}
@Override
public String getName() {
return "truncate";
}
}
}
| 7,517 |
0 | Create_ds/plog/plog-server/src/test/groovy/com/airbnb/plog | Create_ds/plog/plog-server/src/test/groovy/com/airbnb/plog/handlers/ReverseBytesProvider.java | package com.airbnb.plog.handlers;
import com.airbnb.plog.Message;
import com.airbnb.plog.MessageImpl;
import com.eclipsesource.json.JsonObject;
import com.typesafe.config.Config;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
@SuppressWarnings("ClassOnlyUsedInOneModule")
public class ReverseBytesProvider implements HandlerProvider {
@Override
public Handler getHandler(Config config) throws Exception {
return new ReverseBytesHandler();
}
private static class ReverseBytesHandler extends SimpleChannelInboundHandler<Message> implements Handler {
@Override
protected void channelRead0(ChannelHandlerContext ctx, Message msg) throws Exception {
final byte[] payload = msg.asBytes();
final int length = payload.length;
final byte[] reverse = new byte[length];
for (int i = 0; i < length; i++) {
reverse[i] = payload[length - i - 1];
}
final Message reversed = MessageImpl.fromBytes(ctx.alloc(), reverse, msg.getTags());
reversed.retain();
ctx.fireChannelRead(reversed);
}
@Override
public JsonObject getStats() {
return null;
}
@Override
public String getName() {
return "reverse";
}
}
}
| 7,518 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/PlogServer.java | package com.airbnb.plog.server;
import com.airbnb.plog.server.listeners.TCPListener;
import com.airbnb.plog.server.listeners.UDPListener;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Service;
import com.google.common.util.concurrent.ServiceManager;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import lombok.extern.slf4j.Slf4j;
import java.util.ArrayList;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@SuppressWarnings("CallToSystemExit")
@Slf4j
public final class PlogServer {
public static void main(String[] args) {
log.info("Starting...");
System.err.println(
" _\n" +
" _ __| |___ __ _\n" +
"| '_ \\ / _ \\/ _` |\n" +
"| .__/_\\___/\\__, |\n" +
"|_| |___/ server"
);
new PlogServer().run(ConfigFactory.load());
}
private void run(Config config) {
log.info("Starting with config {}", config);
final Config plogServer = config.getConfig("plog.server");
final Config globalDefaults = plogServer.getConfig("defaults");
final Config udpConfig = plogServer.getConfig("udp");
final Config udpDefaults = udpConfig.getConfig("defaults").withFallback(globalDefaults);
final Config tcpConfig = plogServer.getConfig("tcp");
final Config tcpDefaults = tcpConfig.getConfig("defaults").withFallback(globalDefaults);
final ArrayList<Service> services = Lists.newArrayList();
for (final Config cfg : udpConfig.getConfigList("listeners")) {
services.add(new UDPListener(cfg.withFallback(udpDefaults)));
}
for (final Config cfg : tcpConfig.getConfigList("listeners")) {
services.add(new TCPListener(cfg.withFallback(tcpDefaults)));
}
final long shutdownTime = plogServer.getDuration("shutdown_time", TimeUnit.MILLISECONDS);
final ServiceManager manager = new ServiceManager(services);
manager.addListener(new ServiceManager.Listener() {
@Override
public void healthy() {
log.info("All listeners started!");
}
@Override
public void failure(Service service) {
log.error("Failure for listener {}", service);
System.exit(1);
}
});
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
log.info("Shutting down...");
try {
manager.stopAsync().awaitStopped(shutdownTime, TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
log.warn("Did not shut down gracefully after {}ms!", shutdownTime, e);
Runtime.getRuntime().halt(2);
}
}
});
manager.startAsync();
}
}
| 7,519 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/pipeline/EndOfPipeline.java | package com.airbnb.plog.server.pipeline;
import com.airbnb.plog.server.stats.StatisticsReporter;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.io.IOException;
import java.util.regex.Pattern;
@ChannelHandler.Sharable
@Slf4j
@RequiredArgsConstructor
public final class EndOfPipeline extends SimpleChannelInboundHandler<Object> {
// This makes me excrutiatingly sad
private static final Pattern IGNORABLE_ERROR_MESSAGE = Pattern.compile(
"^.*(?:connection.*(?:reset|closed|abort|broken)|broken.*pipe).*$",
Pattern.CASE_INSENSITIVE
);
private final StatisticsReporter stats;
@Override
protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception {
log.warn("Unhandled {} down the pipeline: {}", msg.getClass().getName(), msg);
stats.unhandledObject();
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
final boolean ignored = cause instanceof IOException && IGNORABLE_ERROR_MESSAGE.matcher(cause.getMessage()).matches();
if (!ignored) {
log.error("Exception down the pipeline", cause);
stats.exception();
}
}
}
| 7,520 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/pipeline/ProtocolDecoder.java | package com.airbnb.plog.server.pipeline;
import com.airbnb.plog.MessageImpl;
import com.airbnb.plog.server.commands.FourLetterCommand;
import com.airbnb.plog.server.fragmentation.Fragment;
import com.airbnb.plog.server.stats.StatisticsReporter;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.socket.DatagramPacket;
import io.netty.handler.codec.MessageToMessageDecoder;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.util.List;
@RequiredArgsConstructor
@Slf4j
public final class ProtocolDecoder extends MessageToMessageDecoder<DatagramPacket> {
private final StatisticsReporter stats;
@Override
protected void decode(ChannelHandlerContext ctx, DatagramPacket msg, List<Object> out)
throws Exception {
final ByteBuf content = msg.content();
final byte versionIdentifier = content.getByte(0);
// versions are non-printable characters, push down the pipeline send as-is.
if (versionIdentifier < 0 || versionIdentifier > 31) {
log.debug("Unboxed UDP message");
stats.receivedUdpSimpleMessage();
msg.retain();
out.add(new MessageImpl(content, null));
} else if (versionIdentifier == 0) {
final byte typeIdentifier = content.getByte(1);
switch (typeIdentifier) {
case 0:
final FourLetterCommand cmd = readCommand(msg);
if (cmd != null) {
log.debug("v0 command");
out.add(cmd);
} else {
stats.receivedUnknownCommand();
}
break;
case 1:
log.debug("v0 multipart message: {}", msg);
try {
final Fragment fragment = Fragment.fromDatagram(msg);
stats.receivedV0MultipartFragment(fragment.getFragmentIndex());
msg.retain();
out.add(fragment);
} catch (IllegalArgumentException e) {
log.error("Invalid header", e);
stats.receivedV0InvalidMultipartHeader();
}
break;
default:
stats.receivedV0InvalidType();
}
} else {
stats.receivedUdpInvalidVersion();
}
}
private FourLetterCommand readCommand(DatagramPacket msg) {
final ByteBuf content = msg.content();
final int trailLength = content.readableBytes() - 6;
if (trailLength < 0) {
return null;
}
final byte[] trail = new byte[trailLength];
final byte[] cmdBuff = new byte[4];
content.getBytes(2, cmdBuff, 0, 4);
content.getBytes(6, trail, 0, trail.length);
return new FourLetterCommand(new String(cmdBuff), msg.sender(), trail);
}
}
| 7,521 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/packetloss/PortHoleDetector.java | package com.airbnb.plog.server.packetloss;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
import lombok.ToString;
import lombok.extern.slf4j.Slf4j;
import java.util.Arrays;
@Slf4j
@ToString
final class PortHoleDetector {
@Getter(AccessLevel.PACKAGE)
private final int[] entries;
@Getter(AccessLevel.PACKAGE)
private long minSeen;
@Getter(AccessLevel.PACKAGE)
private long maxSeen;
PortHoleDetector(final int capacity) {
/* we assume Integer.MIN_VALUE is absent from port IDs.
we'll have some false negatives */
if (capacity < 1) {
throw new IllegalArgumentException("Insufficient capacity " + capacity);
}
this.entries = new int[capacity];
reset(null);
}
private void reset(Integer value) {
if (value != null) {
log.info("Resetting {} for {}", this.entries, value);
}
this.minSeen = Long.MAX_VALUE;
this.maxSeen = Long.MIN_VALUE;
Arrays.fill(this.entries, Integer.MIN_VALUE);
}
/**
* Insert candidate if missing
*
* @param candidate The entry we want to track
* @param maxHole Larger holes are ignored
* @return The size of the hole (missing intermediate values)
* between the previously smallest and newly smallest entry
*/
@SuppressWarnings("OverlyLongMethod")
final int ensurePresent(int candidate, int maxHole) {
if (maxHole < 1) {
throw new MaxHoleTooSmall(maxHole);
}
final int purgedOut, newFirst;
synchronized (this.entries) {
// solve port reuse
if (candidate < minSeen) {
if (minSeen != Long.MAX_VALUE && minSeen - candidate > maxHole) {
reset(candidate);
} else {
minSeen = candidate;
}
}
if (candidate > maxSeen) {
if (maxSeen != Long.MIN_VALUE && candidate - maxSeen > maxHole) {
reset(candidate);
} else {
maxSeen = candidate;
}
}
final int index = Arrays.binarySearch(entries, candidate);
if (index >= 0) // found
{
return 0;
}
// index = (-(ipoint) - 1)
// <=> index + 1 = -(ipoint)
// <=> -(index + 1) = ipoint
final int ipoint = -1 - index;
// Before: a b c d e f g
// After: b c X d e f g
// ^ ipoint
if (ipoint == 0) {
purgedOut = candidate;
newFirst = entries[0];
} else {
purgedOut = entries[0];
if (ipoint > 1) {
System.arraycopy(entries, 1, entries, 0, ipoint - 1);
}
entries[ipoint - 1] = candidate;
newFirst = entries[0];
}
}
// magical value
if (purgedOut == Integer.MIN_VALUE) {
return 0;
}
final int hole = newFirst - purgedOut - 1;
if (hole > 0) {
if (hole <= maxHole) {
log.info("Pushed out hole between {} and {}", purgedOut, newFirst);
debugState();
return hole;
} else {
log.info("Pushed out and ignored hole between {} and {}", purgedOut, newFirst);
debugState();
return 0;
}
} else if (hole < 0) {
log.warn("Negative hole pushed out between {} and {}",
purgedOut, newFirst);
debugState();
}
return 0;
}
final int countTotalHoles(int maxHole) {
if (maxHole < 1) {
throw new MaxHoleTooSmall(maxHole);
}
int holes = 0;
synchronized (this.entries) {
for (int i = 0; i < this.entries.length - 1; i++) {
final long current = this.entries[i];
final long next = this.entries[i + 1];
// magical values
if (current == Integer.MIN_VALUE || next == Integer.MIN_VALUE) {
continue;
}
final long hole = next - current - 1;
if (hole > 0) {
if (hole <= maxHole) {
log.info("Scanned hole {} between {} and {}", hole, current, next);
debugState();
holes += hole;
} else {
log.info("Scanned and ignored hole {} between {} and {}", hole, current, next);
debugState();
}
} else if (hole < 0) {
log.warn("Scanned through negative hole {} between {} and {}",
hole, current, next);
debugState();
}
}
}
return holes;
}
final void debugState() {
log.debug("Current state: {}", this);
}
@RequiredArgsConstructor
private static final class MaxHoleTooSmall extends IllegalArgumentException {
@Getter
private final int maximumHole;
@Override
public String getMessage() {
return "Maximum hole too small: " + maximumHole;
}
}
}
| 7,522 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/packetloss/ListenerHoleDetector.java | package com.airbnb.plog.server.packetloss;
import com.airbnb.plog.server.stats.StatisticsReporter;
import com.google.common.cache.*;
import com.typesafe.config.Config;
import lombok.extern.slf4j.Slf4j;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
@Slf4j
public final class ListenerHoleDetector {
private final LoadingCache<Integer, PortHoleDetector> cache;
private final StatisticsReporter stats;
private final int maximumHole;
public ListenerHoleDetector(final Config config, final StatisticsReporter stats) {
final int portDetectorCapacity = config.getInt("ids_per_port");
maximumHole = config.getInt("maximum_hole");
this.cache = CacheBuilder.newBuilder()
.maximumSize(config.getLong("ports"))
.expireAfterAccess(
config.getDuration("expire_time", TimeUnit.MILLISECONDS),
TimeUnit.MILLISECONDS)
.recordStats()
.removalListener(new RemovalListener<Integer, PortHoleDetector>() {
@Override
public void onRemoval(RemovalNotification<Integer, PortHoleDetector> notification) {
final PortHoleDetector detector = notification.getValue();
if (detector != null) {
final int holesFound = detector.countTotalHoles(maximumHole);
if (holesFound > 0) {
stats.foundHolesFromDeadPort(holesFound);
}
}
}
})
.build(new CacheLoader<Integer, PortHoleDetector>() {
public PortHoleDetector load(Integer key) throws Exception {
return new PortHoleDetector(portDetectorCapacity);
}
});
this.stats = stats;
}
public int reportNewMessage(final long id) {
final int clientPort = (int) (id >> Integer.SIZE);
final int clientId = (int) (id & 0xffffffff);
try {
final int holesFound = this.cache.get(clientPort).ensurePresent(clientId, maximumHole);
if (holesFound > 0) {
stats.foundHolesFromNewMessage(holesFound);
}
return holesFound;
} catch (ExecutionException e) {
log.error("impossible is possible");
}
return 0; // still impossible
}
}
| 7,523 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/commands/FourLetterCommandHandler.java | package com.airbnb.plog.server.commands;
import com.airbnb.plog.server.stats.SimpleStatisticsReporter;
import com.google.common.base.Charsets;
import com.typesafe.config.Config;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.channel.socket.DatagramPacket;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@SuppressWarnings("CallToSystemExit")
@Slf4j
@RequiredArgsConstructor
public final class FourLetterCommandHandler extends SimpleChannelInboundHandler<FourLetterCommand> {
private static final byte[] PONG_BYTES = "PONG".getBytes();
private final SimpleStatisticsReporter stats;
private final Config config;
private DatagramPacket pong(ByteBufAllocator alloc, FourLetterCommand ping) {
final byte[] trail = ping.getTrail();
int respLength = PONG_BYTES.length + trail.length;
ByteBuf reply = alloc.buffer(respLength, respLength);
reply.writeBytes(PONG_BYTES);
reply.writeBytes(trail);
return new DatagramPacket(reply, ping.getSender());
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, FourLetterCommand cmd) throws Exception {
if (cmd.is(FourLetterCommand.KILL)) {
log.warn("KILL SWITCH!");
System.exit(1);
} else if (cmd.is(FourLetterCommand.PING)) {
ctx.writeAndFlush(pong(ctx.alloc(), cmd));
stats.receivedV0Command();
} else if (cmd.is(FourLetterCommand.STAT)) {
reply(ctx, cmd, stats.toJSON());
stats.receivedV0Command();
} else if (cmd.is(FourLetterCommand.ENVI)) {
reply(ctx, cmd, config.toString());
stats.receivedV0Command();
} else {
stats.receivedUnknownCommand();
}
}
private void reply(ChannelHandlerContext ctx, FourLetterCommand cmd, String response) {
final ByteBuf payload = Unpooled.wrappedBuffer(response.getBytes(Charsets.UTF_8));
final DatagramPacket packet = new DatagramPacket(payload, cmd.getSender());
ctx.writeAndFlush(packet);
}
}
| 7,524 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/commands/FourLetterCommand.java | package com.airbnb.plog.server.commands;
import lombok.Getter;
import lombok.ToString;
import java.net.InetSocketAddress;
@ToString
public final class FourLetterCommand {
public static final String PING = "PING";
public static final String STAT = "STAT";
public static final String KILL = "KILL";
public static final String ENVI = "ENVI";
@Getter
private final String command;
@Getter
private final InetSocketAddress sender;
@Getter
private final byte[] trail;
public FourLetterCommand(String command, InetSocketAddress sender, byte[] trail) {
this.command = command.toUpperCase();
this.sender = sender;
this.trail = trail;
}
boolean is(String cmd) {
return cmd.equals(this.getCommand());
}
}
| 7,525 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/listeners/StartReturn.java | package com.airbnb.plog.server.listeners;
import io.netty.channel.ChannelFuture;
import io.netty.channel.EventLoopGroup;
import lombok.Data;
@Data
final class StartReturn {
private final ChannelFuture bindFuture;
private final EventLoopGroup eventLoopGroup;
}
| 7,526 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/listeners/TCPListener.java | package com.airbnb.plog.server.listeners;
import com.typesafe.config.Config;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.LineBasedFrameDecoder;
import java.net.InetSocketAddress;
public final class TCPListener extends Listener {
public TCPListener(Config config) {
super(config);
}
@Override
protected StartReturn start() {
final Config config = getConfig();
final NioEventLoopGroup group = new NioEventLoopGroup();
final ChannelFuture bindFuture = new ServerBootstrap()
.group(group)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.TCP_NODELAY, true)
.option(ChannelOption.SO_REUSEADDR, true)
.option(ChannelOption.SO_KEEPALIVE, true)
.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel channel) throws Exception {
final ChannelPipeline pipeline = channel.pipeline();
pipeline
.addLast(new LineBasedFrameDecoder(config.getInt("max_line")))
.addLast(new ByteBufToMessageDecoder());
finalizePipeline(pipeline);
}
}).bind(new InetSocketAddress(config.getString("host"), config.getInt("port")));
return new StartReturn(bindFuture, group);
}
}
| 7,527 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/listeners/UDPListener.java | package com.airbnb.plog.server.listeners;
import com.airbnb.plog.server.commands.FourLetterCommandHandler;
import com.airbnb.plog.server.fragmentation.Defragmenter;
import com.airbnb.plog.server.pipeline.ProtocolDecoder;
import com.airbnb.plog.server.stats.SimpleStatisticsReporter;
import com.typesafe.config.Config;
import io.netty.bootstrap.Bootstrap;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.*;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.DatagramPacket;
import io.netty.channel.socket.nio.NioDatagramChannel;
import lombok.Getter;
import java.net.InetSocketAddress;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
public final class UDPListener extends Listener {
@Getter
private NioEventLoopGroup group = new NioEventLoopGroup(1);
public UDPListener(Config config) {
super(config);
}
@Override
protected StartReturn start() {
final Config config = getConfig();
final SimpleStatisticsReporter stats = getStats();
final ProtocolDecoder protocolDecoder = new ProtocolDecoder(stats);
final Defragmenter defragmenter = new Defragmenter(stats, config.getConfig("defrag"));
stats.withDefrag(defragmenter);
final FourLetterCommandHandler flch = new FourLetterCommandHandler(stats, config);
final ExecutorService threadPool =
Executors.newFixedThreadPool(config.getInt("threads"));
final ChannelFuture bindFuture = new Bootstrap()
.group(group)
.channel(NioDatagramChannel.class)
.option(ChannelOption.SO_REUSEADDR, true)
.option(ChannelOption.SO_RCVBUF,
config.getInt("SO_RCVBUF"))
.option(ChannelOption.SO_SNDBUF,
config.getInt("SO_SNDBUF"))
.option(ChannelOption.RCVBUF_ALLOCATOR,
new FixedRecvByteBufAllocator(config.getInt("RECV_SIZE")))
.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
.handler(new ChannelInitializer<NioDatagramChannel>() {
@Override
protected void initChannel(NioDatagramChannel channel) throws Exception {
final ChannelPipeline pipeline = channel.pipeline();
pipeline
.addLast(new SimpleChannelInboundHandler<DatagramPacket>(false) {
@Override
protected void channelRead0(final ChannelHandlerContext ctx,
final DatagramPacket msg)
throws Exception {
threadPool.submit(new Runnable() {
@Override
public void run() {
ctx.fireChannelRead(msg);
}
});
}
})
.addLast(protocolDecoder)
.addLast(defragmenter)
.addLast(flch);
finalizePipeline(pipeline);
}
})
.bind(new InetSocketAddress(config.getString("host"), config.getInt("port")));
return new StartReturn(bindFuture, group);
}
}
| 7,528 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/listeners/Listener.java | package com.airbnb.plog.server.listeners;
import com.airbnb.plog.handlers.Handler;
import com.airbnb.plog.handlers.HandlerProvider;
import com.airbnb.plog.server.pipeline.EndOfPipeline;
import com.airbnb.plog.server.stats.SimpleStatisticsReporter;
import com.google.common.util.concurrent.AbstractService;
import com.typesafe.config.Config;
import io.netty.channel.*;
import io.netty.util.concurrent.Future;
import io.netty.util.concurrent.GenericFutureListener;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import java.lang.reflect.Constructor;
@Slf4j
abstract class Listener extends AbstractService {
@Getter
private final Config config;
@Getter
private final SimpleStatisticsReporter stats;
private final EndOfPipeline eopHandler;
private EventLoopGroup eventLoopGroup = null;
public Listener(Config config) {
this.config = config;
this.stats = new SimpleStatisticsReporter();
this.eopHandler = new EndOfPipeline(stats);
}
protected abstract StartReturn start();
void finalizePipeline(ChannelPipeline pipeline)
throws Exception {
int i = 0;
for (Config handlerConfig : config.getConfigList("handlers")) {
final String providerName = handlerConfig.getString("provider");
log.debug("Loading provider for {}", providerName);
final Class<?> providerClass = Class.forName(providerName);
final Constructor<?> providerConstructor = providerClass.getConstructor();
final HandlerProvider provider = (HandlerProvider) providerConstructor.newInstance();
final Handler handler = provider.getHandler(handlerConfig);
pipeline.addLast(i + ':' + handler.getName(), handler);
stats.appendHandler(handler);
i++;
}
pipeline.addLast(eopHandler);
}
@Override
protected void doStart() {
final StartReturn startReturn = start();
final ChannelFuture bindFuture = startReturn.getBindFuture();
bindFuture.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (bindFuture.isDone()) {
if (bindFuture.isSuccess()) {
log.info("{} bound successful", this);
notifyStarted();
} else if (bindFuture.isCancelled()) {
log.info("{} bind cancelled", this);
notifyFailed(new ChannelException("Cancelled"));
} else {
final Throwable cause = bindFuture.cause();
log.error("{} failed to bind", this, cause);
notifyFailed(cause);
}
}
}
});
this.eventLoopGroup = startReturn.getEventLoopGroup();
}
@Override
protected void doStop() {
//noinspection unchecked
eventLoopGroup.shutdownGracefully().addListener(new GenericFutureListener() {
@Override
public void operationComplete(Future future) throws Exception {
if (future.isSuccess()) {
notifyStopped();
} else {
Throwable failure = new Exception("Netty event loop did not shutdown properly", future.cause());
log.error("Shutdown failed", failure);
notifyFailed(failure);
}
}
});
}
}
| 7,529 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/listeners/ByteBufToMessageDecoder.java | package com.airbnb.plog.server.listeners;
import com.airbnb.plog.MessageImpl;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.MessageToMessageDecoder;
import java.util.List;
@ChannelHandler.Sharable
final class ByteBufToMessageDecoder extends MessageToMessageDecoder<ByteBuf> {
@Override
protected final void decode(ChannelHandlerContext ctx, ByteBuf buf, List<Object> out) throws Exception {
buf.retain();
out.add(new MessageImpl(buf, null));
}
}
| 7,530 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/stats/SimpleStatisticsReporter.java | package com.airbnb.plog.server.stats;
import com.airbnb.plog.handlers.Handler;
import com.airbnb.plog.server.fragmentation.Defragmenter;
import com.eclipsesource.json.JsonArray;
import com.eclipsesource.json.JsonObject;
import com.google.common.cache.CacheStats;
import com.google.common.collect.Lists;
import lombok.extern.slf4j.Slf4j;
import java.io.IOException;
import java.net.URL;
import java.util.Enumeration;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicLongArray;
import java.util.jar.Attributes;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
@Slf4j
public final class SimpleStatisticsReporter implements StatisticsReporter {
private final AtomicLong
holesFromDeadPort = new AtomicLong(),
holesFromNewMessage = new AtomicLong(),
udpSimpleMessages = new AtomicLong(),
udpInvalidVersion = new AtomicLong(),
v0InvalidType = new AtomicLong(),
v0InvalidMultipartHeader = new AtomicLong(),
unknownCommand = new AtomicLong(),
v0Commands = new AtomicLong(),
v0MultipartMessages = new AtomicLong(),
exceptions = new AtomicLong(),
unhandledObjects = new AtomicLong();
private final AtomicLongArray
v0MultipartMessageFragments = new AtomicLongArray(Short.SIZE + 1),
v0InvalidChecksum = new AtomicLongArray(Short.SIZE + 1),
droppedFragments = new AtomicLongArray((Short.SIZE + 1) * (Short.SIZE + 1)),
invalidFragments = new AtomicLongArray((Short.SIZE + 1) * (Short.SIZE + 1));
private final long startTime = System.currentTimeMillis();
private String MEMOIZED_PLOG_VERSION = null;
private Defragmenter defragmenter = null;
private List<Handler> handlers = Lists.newArrayList();
private static int intLog2(int i) {
return Integer.SIZE - Integer.numberOfLeadingZeros(i);
}
private static JsonArray arrayForLogStats(AtomicLongArray data) {
final JsonArray result = new JsonArray();
for (int i = 0; i < data.length(); i++) {
result.add(data.get(i));
}
return result;
}
private static JsonArray arrayForLogLogStats(AtomicLongArray data) {
final JsonArray result = new JsonArray();
for (int packetCountLog = 0; packetCountLog <= Short.SIZE; packetCountLog++) {
final JsonArray entry = new JsonArray();
result.add(entry);
for (int packetIndexLog = 0; packetIndexLog <= packetCountLog; packetIndexLog++) {
entry.add(data.get(packetCountLog * (Short.SIZE + 1) + packetIndexLog));
}
}
return result;
}
@Override
public final long receivedUdpSimpleMessage() {
return this.udpSimpleMessages.incrementAndGet();
}
@Override
public final long receivedUdpInvalidVersion() {
return this.udpInvalidVersion.incrementAndGet();
}
@Override
public final long receivedV0InvalidType() {
return this.v0InvalidType.incrementAndGet();
}
@Override
public final long receivedV0InvalidMultipartHeader() {
return this.v0InvalidMultipartHeader.incrementAndGet();
}
@Override
public final long receivedV0Command() {
return this.v0Commands.incrementAndGet();
}
@Override
public final long receivedUnknownCommand() {
return this.unknownCommand.incrementAndGet();
}
@Override
public final long receivedV0MultipartMessage() {
return this.v0MultipartMessages.incrementAndGet();
}
@Override
public long exception() {
return this.exceptions.incrementAndGet();
}
@Override
public long foundHolesFromDeadPort(int holesFound) {
return holesFromDeadPort.addAndGet(holesFound);
}
@Override
public long foundHolesFromNewMessage(int holesFound) {
return holesFromNewMessage.addAndGet(holesFound);
}
@Override
public final long receivedV0MultipartFragment(final int index) {
return v0MultipartMessageFragments.incrementAndGet(intLog2(index));
}
@Override
public final long receivedV0InvalidChecksum(int fragments) {
return this.v0InvalidChecksum.incrementAndGet(intLog2(fragments - 1));
}
@Override
public long receivedV0InvalidMultipartFragment(final int fragmentIndex, final int expectedFragments) {
final int target = ((Short.SIZE + 1) * intLog2(expectedFragments - 1)) + intLog2(fragmentIndex);
return invalidFragments.incrementAndGet(target);
}
@Override
public long missingFragmentInDroppedMessage(final int fragmentIndex, final int expectedFragments) {
final int target = ((Short.SIZE + 1) * intLog2(expectedFragments - 1)) + intLog2(fragmentIndex);
return droppedFragments.incrementAndGet(target);
}
@Override
public long unhandledObject() {
return unhandledObjects.incrementAndGet();
}
public final String toJSON() {
final JsonObject result = new JsonObject()
.add("version", getPlogVersion())
.add("uptime", System.currentTimeMillis() - startTime)
.add("udp_simple_messages", udpSimpleMessages.get())
.add("udp_invalid_version", udpInvalidVersion.get())
.add("v0_invalid_type", v0InvalidType.get())
.add("v0_invalid_multipart_header", v0InvalidMultipartHeader.get())
.add("unknown_command", unknownCommand.get())
.add("v0_commands", v0Commands.get())
.add("exceptions", exceptions.get())
.add("unhandled_objects", unhandledObjects.get())
.add("holes_from_dead_port", holesFromDeadPort.get())
.add("holes_from_new_message", holesFromNewMessage.get())
.add("v0_fragments", arrayForLogStats(v0MultipartMessageFragments))
.add("v0_invalid_checksum", arrayForLogStats(v0InvalidChecksum))
.add("v0_invalid_fragments", arrayForLogLogStats(invalidFragments))
.add("dropped_fragments", arrayForLogLogStats(droppedFragments));
if (defragmenter != null) {
final CacheStats cacheStats = defragmenter.getCacheStats();
result.add("defragmenter", new JsonObject()
.add("evictions", cacheStats.evictionCount())
.add("hits", cacheStats.hitCount())
.add("misses", cacheStats.missCount()));
}
final JsonArray handlersStats = new JsonArray();
result.add("handlers", handlersStats);
for (Handler handler : handlers) {
final JsonObject statsCandidate = handler.getStats();
final JsonObject stats = (statsCandidate == null) ? new JsonObject() : statsCandidate;
handlersStats.add(stats.set("name", handler.getName()));
}
return result.toString();
}
private String getPlogVersion() {
if (MEMOIZED_PLOG_VERSION == null) {
try {
MEMOIZED_PLOG_VERSION = readVersionFromManifest();
} catch (Throwable e) {
MEMOIZED_PLOG_VERSION = "unknown";
}
}
return MEMOIZED_PLOG_VERSION;
}
private String readVersionFromManifest() throws IOException {
final Enumeration<URL> resources = getClass().getClassLoader()
.getResources(JarFile.MANIFEST_NAME);
while (resources.hasMoreElements()) {
final URL url = resources.nextElement();
final Attributes mainAttributes = new Manifest(url.openStream()).getMainAttributes();
final String version = mainAttributes.getValue("Plog-Version");
if (version != null) {
return version;
}
}
throw new NoSuchFieldError();
}
public synchronized void withDefrag(Defragmenter defragmenter) {
if (this.defragmenter == null) {
this.defragmenter = defragmenter;
} else {
throw new IllegalStateException("Defragmenter already provided!");
}
}
public synchronized void appendHandler(Handler handler) {
this.handlers.add(handler);
}
}
| 7,531 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/stats/StatisticsReporter.java | package com.airbnb.plog.server.stats;
public interface StatisticsReporter {
long receivedUdpSimpleMessage();
long receivedUdpInvalidVersion();
long receivedV0InvalidType();
long receivedV0Command();
long receivedUnknownCommand();
long receivedV0InvalidMultipartHeader();
long receivedV0MultipartMessage();
long exception();
long receivedV0MultipartFragment(int index);
long receivedV0InvalidChecksum(int index);
long foundHolesFromDeadPort(int holesFound);
long foundHolesFromNewMessage(int holesFound);
long receivedV0InvalidMultipartFragment(final int fragmentIndex, final int expectedFragments);
long missingFragmentInDroppedMessage(final int fragmentIndex, final int expectedFragments);
long unhandledObject();
}
| 7,532 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/fragmentation/Defragmenter.java | package com.airbnb.plog.server.fragmentation;
import com.airbnb.plog.MessageImpl;
import com.airbnb.plog.common.Murmur3;
import com.airbnb.plog.server.packetloss.ListenerHoleDetector;
import com.airbnb.plog.server.stats.StatisticsReporter;
import com.google.common.cache.*;
import com.typesafe.config.Config;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.MessageToMessageDecoder;
import lombok.extern.slf4j.Slf4j;
import java.util.BitSet;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
@Slf4j
public final class Defragmenter extends MessageToMessageDecoder<Fragment> {
private final StatisticsReporter stats;
private final Cache<Long, FragmentedMessage> incompleteMessages;
private final ListenerHoleDetector detector;
public Defragmenter(final StatisticsReporter statisticsReporter, final Config config) {
this.stats = statisticsReporter;
final Config holeConfig = config.getConfig("detect_holes");
if (holeConfig.getBoolean("enabled")) {
detector = new ListenerHoleDetector(holeConfig, stats);
} else {
detector = null;
}
incompleteMessages = CacheBuilder.newBuilder()
.maximumWeight(config.getInt("max_size"))
.expireAfterAccess(config.getDuration("expire_time", TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS)
.recordStats()
.weigher(new Weigher<Long, FragmentedMessage>() {
@Override
public int weigh(Long id, FragmentedMessage msg) {
return msg.getContentLength();
}
})
.removalListener(new RemovalListener<Long, FragmentedMessage>() {
@Override
public void onRemoval(RemovalNotification<Long, FragmentedMessage> notification) {
if (notification.getCause() == RemovalCause.EXPLICIT) {
return;
}
final FragmentedMessage message = notification.getValue();
if (message == null) {
return; // cannot happen with this cache, holds strong refs.
}
final int fragmentCount = message.getFragmentCount();
final BitSet receivedFragments = message.getReceivedFragments();
for (int idx = 0; idx < fragmentCount; idx++) {
if (!receivedFragments.get(idx)) {
stats.missingFragmentInDroppedMessage(idx, fragmentCount);
}
}
message.release();
}
}).build();
}
public CacheStats getCacheStats() {
return incompleteMessages.stats();
}
@Override
protected void decode(final ChannelHandlerContext ctx, final Fragment fragment, final List<Object> out)
throws Exception {
if (fragment.isAlone()) {
if (detector != null) {
detector.reportNewMessage(fragment.getMsgId());
}
final ByteBuf payload = fragment.content();
final int computedHash = Murmur3.hash32(payload);
if (computedHash == fragment.getMsgHash()) {
payload.retain();
out.add(new MessageImpl(payload, fragment.getTags()));
this.stats.receivedV0MultipartMessage();
} else {
this.stats.receivedV0InvalidChecksum(1);
}
} else {
handleMultiFragment(fragment, out);
}
}
private void handleMultiFragment(final Fragment fragment, List<Object> out) throws java.util.concurrent.ExecutionException {
// 2 fragments or more
final long msgId = fragment.getMsgId();
final boolean[] isNew = {false};
final boolean complete;
final FragmentedMessage message = incompleteMessages.get(msgId, new Callable<FragmentedMessage>() {
@Override
public FragmentedMessage call() throws Exception {
isNew[0] = true;
if (detector != null) {
detector.reportNewMessage(fragment.getMsgId());
}
return FragmentedMessage.fromFragment(fragment, Defragmenter.this.stats);
}
});
if (isNew[0]) {
complete = false; // new 2+ fragments, so cannot be complete
} else {
complete = message.ingestFragment(fragment, this.stats);
}
if (complete) {
incompleteMessages.invalidate(fragment.getMsgId());
final ByteBuf payload = message.getPayload();
if (Murmur3.hash32(payload) == message.getChecksum()) {
out.add(new MessageImpl(payload, message.getTags()));
this.stats.receivedV0MultipartMessage();
} else {
message.release();
this.stats.receivedV0InvalidChecksum(message.getFragmentCount());
}
}
}
}
| 7,533 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/fragmentation/FragmentedMessage.java | package com.airbnb.plog.server.fragmentation;
import com.airbnb.plog.Tagged;
import com.airbnb.plog.server.stats.StatisticsReporter;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.DefaultByteBufHolder;
import lombok.Getter;
import lombok.ToString;
import lombok.extern.slf4j.Slf4j;
import java.util.BitSet;
import java.util.Collection;
@Slf4j
@ToString
public final class FragmentedMessage extends DefaultByteBufHolder implements Tagged {
@Getter
private final BitSet receivedFragments;
@Getter
private final int fragmentCount;
@Getter
private final int fragmentSize;
@Getter
private final int checksum;
@Getter
private boolean complete = false;
@Getter
private Collection<String> tags = null;
private FragmentedMessage(ByteBufAllocator alloc,
final int totalLength,
final int fragmentCount,
final int fragmentSize,
final int hash) {
super(alloc.buffer(totalLength, totalLength));
this.receivedFragments = new BitSet(fragmentCount);
this.fragmentCount = fragmentCount;
this.fragmentSize = fragmentSize;
this.checksum = hash;
}
public static FragmentedMessage fromFragment(final Fragment fragment, StatisticsReporter stats) {
final FragmentedMessage msg = new FragmentedMessage(
fragment.content().alloc(),
fragment.getTotalLength(),
fragment.getFragmentCount(),
fragment.getFragmentSize(),
fragment.getMsgHash());
msg.ingestFragment(fragment, stats);
return msg;
}
public final boolean ingestFragment(final Fragment fragment, StatisticsReporter stats) {
final int fragmentSize = fragment.getFragmentSize();
final int fragmentCount = fragment.getFragmentCount();
final int msgHash = fragment.getMsgHash();
final ByteBuf fragmentPayload = fragment.content();
final int fragmentIndex = fragment.getFragmentIndex();
final boolean fragmentIsLast = (fragmentIndex == fragmentCount - 1);
final int foffset = fragmentSize * fragmentIndex;
final ByteBuf fragmentTagsBuffer = fragment.getTagsBuffer();
final int lengthOfCurrentFragment = fragmentPayload.capacity();
final boolean validFragmentLength;
if (fragmentIsLast) {
validFragmentLength = (lengthOfCurrentFragment == this.getContentLength() - foffset);
} else {
validFragmentLength = (lengthOfCurrentFragment == this.fragmentSize);
}
if (this.getFragmentSize() != fragmentSize ||
this.getFragmentCount() != fragmentCount ||
this.getChecksum() != msgHash ||
!validFragmentLength) {
log.warn("Invalid {} for {}", fragment, this);
stats.receivedV0InvalidMultipartFragment(fragmentIndex, this.getFragmentCount());
return false;
}
if (fragmentTagsBuffer != null) {
this.tags = fragment.getTags();
}
boolean justCompleted = false;
// valid fragment
synchronized (receivedFragments) {
receivedFragments.set(fragmentIndex);
if (receivedFragments.cardinality() == this.fragmentCount) {
justCompleted = true;
this.complete = true;
}
}
content().setBytes(foffset, fragmentPayload, 0, lengthOfCurrentFragment);
return justCompleted;
}
public final ByteBuf getPayload() {
if (!isComplete()) {
throw new IllegalStateException("Incomplete");
}
content().readerIndex(0);
content().writerIndex(getContentLength());
return content();
}
public final int getContentLength() {
return content().capacity();
}
}
| 7,534 |
0 | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-server/src/main/java/com/airbnb/plog/server/fragmentation/Fragment.java | package com.airbnb.plog.server.fragmentation;
import com.airbnb.plog.Tagged;
import com.airbnb.plog.server.pipeline.ByteBufs;
import com.google.common.base.Charsets;
import com.google.common.base.Splitter;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.DefaultByteBufHolder;
import io.netty.channel.socket.DatagramPacket;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.ToString;
import java.nio.ByteOrder;
import java.util.Collection;
import java.util.Collections;
@ToString(exclude = {"tagsBuffer"})
public final class Fragment extends DefaultByteBufHolder implements Tagged {
static final int HEADER_SIZE = 24;
@Getter
private final int fragmentCount;
@Getter
private final int fragmentIndex;
@Getter
private final int fragmentSize;
@Getter
private final long msgId;
@Getter
private final int totalLength;
@Getter
private final int msgHash;
@Getter(AccessLevel.MODULE)
private final ByteBuf tagsBuffer;
public Fragment(int fragmentCount,
int fragmentIndex,
int fragmentSize,
long msgId,
int totalLength,
int msgHash,
ByteBuf data,
ByteBuf tagsBuffer) {
super(data);
this.fragmentCount = fragmentCount;
this.fragmentIndex = fragmentIndex;
this.fragmentSize = fragmentSize;
this.msgId = msgId;
this.totalLength = totalLength;
this.msgHash = msgHash;
this.tagsBuffer = tagsBuffer;
}
public static Fragment fromDatagram(DatagramPacket packet) {
final ByteBuf content = packet.content().order(ByteOrder.BIG_ENDIAN);
final int length = content.readableBytes();
if (length < HEADER_SIZE) {
throw new IllegalArgumentException("Packet too short: " + length + " bytes");
}
final int fragmentCount = content.getUnsignedShort(2);
if (fragmentCount == 0) {
throw new IllegalArgumentException("0 fragment count");
}
final int fragmentIndex = content.getUnsignedShort(4);
if (fragmentIndex >= fragmentCount) {
throw new IllegalArgumentException("Index " + fragmentIndex + " < count " + fragmentCount);
}
final int fragmentSize = content.getUnsignedShort(6);
final int idRightPart = content.getInt(8);
final int totalLength = content.getInt(12);
if (totalLength < 0) {
throw new IllegalArgumentException("Cannot support length " + totalLength + " > 2^31");
}
final int msgHash = content.getInt(16);
final int tagsBufferLength = content.getUnsignedShort(20);
final ByteBuf tagsBuffer = tagsBufferLength == 0 ? null : content.slice(HEADER_SIZE, tagsBufferLength);
final int payloadLength = length - HEADER_SIZE - tagsBufferLength;
final ByteBuf payload = content.slice(HEADER_SIZE + tagsBufferLength, payloadLength);
final int port = packet.sender().getPort();
final long msgId = (((long) port) << Integer.SIZE) + idRightPart;
return new Fragment(fragmentCount, fragmentIndex, fragmentSize, msgId, totalLength, msgHash, payload, tagsBuffer);
}
boolean isAlone() {
return fragmentCount == 1;
}
@Override
public Collection<String> getTags() {
if (tagsBuffer == null) {
return Collections.emptyList();
}
final String seq = new String(ByteBufs.toByteArray(tagsBuffer), Charsets.UTF_8);
return Splitter.on('\0').omitEmptyStrings().splitToList(seq);
}
}
| 7,535 |
0 | Create_ds/plog/plog-distro/src/main/java | Create_ds/plog/plog-distro/src/main/java/plog/Stress.java | package plog;
import com.airbnb.plog.stress.PlogStress;
import java.net.SocketException;
@SuppressWarnings("ClassOnlyUsedInOneModule")
public final class Stress {
public static void main(String[] args) throws SocketException {
PlogStress.main(args);
}
}
| 7,536 |
0 | Create_ds/plog/plog-distro/src/main/java | Create_ds/plog/plog-distro/src/main/java/plog/Server.java | package plog;
import com.airbnb.plog.server.PlogServer;
@SuppressWarnings("ClassOnlyUsedInOneModule")
public final class Server {
public static void main(String[] args) {
PlogServer.main(args);
}
}
| 7,537 |
0 | Create_ds/plog/plog-common/src/main/java/com/airbnb/plog | Create_ds/plog/plog-common/src/main/java/com/airbnb/plog/common/Murmur3.java | package com.airbnb.plog.common;
import io.netty.buffer.ByteBuf;
import lombok.extern.slf4j.Slf4j;
import java.nio.ByteOrder;
@Slf4j
public final class Murmur3 {
private static final int C1 = 0xcc9e2d51;
private static final int C2 = 0x1b873593;
public static int hash32(ByteBuf data) {
return hash32(data, data.readerIndex(), data.readableBytes(), 0);
}
public static int hash32(ByteBuf data, final int offset, final int length) {
return hash32(data, offset, length, 0);
}
@SuppressWarnings("OverlyLongMethod")
public static int hash32(ByteBuf data, final int offset, final int length, final int seed) {
final ByteBuf ordered = data.order(ByteOrder.LITTLE_ENDIAN);
int h = seed;
final int len4 = length >>> 2;
final int end4 = offset + (len4 << 2);
for (int i = offset; i < end4; i += 4) {
int k = ordered.getInt(i);
k *= C1;
k = k << 15 | k >>> 17;
k *= C2;
h ^= k;
h = h << 13 | h >>> 19;
h = h * 5 + 0xe6546b64;
}
int k = 0;
switch (length & 3) {
case 3:
k = (ordered.getByte(end4 + 2) & 0xff) << 16;
case 2:
k |= (ordered.getByte(end4 + 1) & 0xff) << 8;
case 1:
k |= ordered.getByte(end4) & 0xff;
k *= C1;
k = (k << 15) | (k >>> 17);
k *= C2;
h ^= k;
}
h ^= length;
h ^= h >>> 16;
h *= 0x85ebca6b;
h ^= h >>> 13;
h *= 0xc2b2ae35;
h ^= h >>> 16;
return h;
}
}
| 7,538 |
0 | Create_ds/plog/plog-console/src/main/java/com/airbnb/plog | Create_ds/plog/plog-console/src/main/java/com/airbnb/plog/console/ConsoleOutputProvider.java | package com.airbnb.plog.console;
import com.airbnb.plog.handlers.Handler;
import com.airbnb.plog.handlers.HandlerProvider;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigException;
import java.io.PrintStream;
public final class ConsoleOutputProvider implements HandlerProvider {
@Override
public Handler getHandler(Config config) throws Exception {
PrintStream target = System.out;
try {
final String targetDescription = config.getString("target");
if (targetDescription.toLowerCase().equals("stderr")) {
target = System.err;
}
} catch (ConfigException.Missing ignored) {
}
return new ConsoleOutputHandler(target);
}
}
| 7,539 |
0 | Create_ds/plog/plog-console/src/main/java/com/airbnb/plog | Create_ds/plog/plog-console/src/main/java/com/airbnb/plog/console/ConsoleOutputHandler.java | package com.airbnb.plog.console;
import com.airbnb.plog.Message;
import com.airbnb.plog.handlers.Handler;
import com.eclipsesource.json.JsonObject;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import lombok.RequiredArgsConstructor;
import java.io.PrintStream;
import java.util.concurrent.atomic.AtomicLong;
@RequiredArgsConstructor
public final class ConsoleOutputHandler extends SimpleChannelInboundHandler<Message> implements Handler {
private final PrintStream target;
private final AtomicLong logged = new AtomicLong();
@Override
protected final void channelRead0(ChannelHandlerContext ctx, Message msg) throws Exception {
target.println(msg.toString());
logged.incrementAndGet();
}
@Override
public final JsonObject getStats() {
return new JsonObject().add("logged", logged.get());
}
@Override
public final String getName() {
return "console";
}
}
| 7,540 |
0 | Create_ds/plog/plog-kafka/src/test/java/com/airbnb/plog/kafka | Create_ds/plog/plog-kafka/src/test/java/com/airbnb/plog/kafka/partitioner/FlinkPartitionerTest.java | package com.airbnb.plog.kafka.partitioner;
import java.util.Base64;
import java.util.Random;
import org.apache.flink.runtime.state.KeyGroupRangeAssignment;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.junit.Assert.*;
public class FlinkPartitionerTest {
@Test
public void computePartition() {
Random random = new Random(42L);
byte[] id = new byte[16];
int maxParallelism = 10393;
int numPartitions = 1983;
for (int i = 0; i < 40; i++) {
random.nextBytes(id);
String encoded = Base64.getEncoder().encodeToString(id);
int testPartition = FlinkPartitioner.computePartition(encoded, numPartitions, maxParallelism);
int flinkPartition = KeyGroupRangeAssignment.assignKeyToParallelOperator(encoded, maxParallelism, numPartitions);
assertThat(testPartition, equalTo(flinkPartition));
}
}
} | 7,541 |
0 | Create_ds/plog/plog-kafka/src/main/java/com/airbnb/plog | Create_ds/plog/plog-kafka/src/main/java/com/airbnb/plog/kafka/KafkaHandler.java | package com.airbnb.plog.kafka;
import com.airbnb.plog.kafka.KafkaProvider.EncryptionConfig;
import com.airbnb.plog.Message;
import com.airbnb.plog.handlers.Handler;
import com.eclipsesource.json.JsonObject;
import com.google.common.collect.ImmutableMap;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.errors.SerializationException;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import java.io.ByteArrayOutputStream;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import javax.crypto.Cipher;
import javax.crypto.spec.SecretKeySpec;
@RequiredArgsConstructor
@Slf4j
public final class KafkaHandler extends SimpleChannelInboundHandler<Message> implements Handler {
private final String defaultTopic;
private final boolean propagate;
private final KafkaProducer<String, byte[]> producer;
private final AtomicLong failedToSendMessageExceptions = new AtomicLong();
private final AtomicLong seenMessages = new AtomicLong();
private final AtomicLong serializationErrors = new AtomicLong();
private final EncryptionConfig encryptionConfig;
private SecretKeySpec keySpec = null;
private static final ImmutableMap<String, MetricName> SHORTNAME_TO_METRICNAME =
ImmutableMap.<String, MetricName>builder()
// Keep some compatibility with Plog 4.0
.put("message", new MetricName("record-send-rate", "producer-metrics"))
.put("resend", new MetricName("record-retry-rate", "producer-metrics"))
.put("failed_send", new MetricName("record-error-rate", "producer-metrics"))
.put("dropped_message", new MetricName("record-error-rate", "producer-metrics"))
.put("byte", new MetricName("outgoing-byte-rate", "producer-metrics"))
.build();
protected KafkaHandler(
final String clientId,
final boolean propagate,
final String defaultTopic,
final KafkaProducer<String, byte[]> producer,
final EncryptionConfig encryptionConfig) {
super();
this.propagate = propagate;
this.defaultTopic = defaultTopic;
this.producer = producer;
this.encryptionConfig = encryptionConfig;
if (encryptionConfig != null) {
final byte[] keyBytes = encryptionConfig.encryptionKey.getBytes();
keySpec = new SecretKeySpec(keyBytes, encryptionConfig.encryptionAlgorithm);
log.info("KafkaHandler start with encryption algorithm '"
+ encryptionConfig.encryptionAlgorithm + "' transformation '"
+ encryptionConfig.encryptionTransformation + "' provider '"
+ encryptionConfig.encryptionProvider + "'.");
} else {
log.info("KafkaHandler start without encryption.");
}
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, Message msg) throws Exception {
seenMessages.incrementAndGet();
byte[] payload = msg.asBytes();
if (encryptionConfig != null) {
try {
payload = encrypt(payload);
} catch (Exception e) {
log.error("Fail to encrypt message: ", e.getMessage());
}
}
String kafkaTopic = defaultTopic;
// Producer will simply do round-robin when a null partitionKey is provided
String partitionKey = null;
for (String tag : msg.getTags()) {
if (tag.startsWith("kt:")) {
kafkaTopic = tag.substring(3);
} else if (tag.startsWith("pk:")) {
partitionKey = tag.substring(3);
}
}
sendOrReportFailure(kafkaTopic, partitionKey, payload);
if (propagate) {
msg.retain();
ctx.fireChannelRead(msg);
}
}
private boolean sendOrReportFailure(String topic, final String key, final byte[] msg) {
final boolean nonNullTopic = !("null".equals(topic));
if (nonNullTopic) {
try {
producer.send(new ProducerRecord<String, byte[]>(topic, key, msg));
} catch (SerializationException e) {
failedToSendMessageExceptions.incrementAndGet();
serializationErrors.incrementAndGet();
} catch (KafkaException e) {
log.warn("Failed to send to topic {}", topic, e);
failedToSendMessageExceptions.incrementAndGet();
}
}
return nonNullTopic;
}
private byte[] encrypt(final byte[] plaintext) throws Exception {
Cipher cipher = Cipher.getInstance(
encryptionConfig.encryptionTransformation,encryptionConfig.encryptionProvider);
cipher.init(Cipher.ENCRYPT_MODE, keySpec);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
// IV size is the same as a block size and cipher dependent.
// This can be derived from consumer side by calling `cipher.getBlockSize()`.
outputStream.write(cipher.getIV());
outputStream.write(cipher.doFinal(plaintext));
return outputStream.toByteArray();
}
@Override
public JsonObject getStats() {
Map<MetricName, ? extends Metric> metrics = producer.metrics();
JsonObject stats = new JsonObject()
.add("seen_messages", seenMessages.get())
.add("failed_to_send", failedToSendMessageExceptions.get());
// Map to Plog v4-style naming
for (Map.Entry<String, MetricName> entry: SHORTNAME_TO_METRICNAME.entrySet()) {
Metric metric = metrics.get(entry.getValue());
if (metric != null) {
stats.add(entry.getKey(), metric.value());
} else {
stats.add(entry.getKey(), 0.0);
}
}
// Use default kafka naming, include all producer metrics
for (Map.Entry<MetricName, ? extends Metric> metric : metrics.entrySet()) {
double value = metric.getValue().value();
String name = metric.getKey().name().replace("-", "_");
if (value > -Double.MAX_VALUE && value < Double.MAX_VALUE) {
stats.add(name, value);
} else {
stats.add(name, 0.0);
}
}
return stats;
}
@Override
public final String getName() {
return "kafka";
}
}
| 7,542 |
0 | Create_ds/plog/plog-kafka/src/main/java/com/airbnb/plog | Create_ds/plog/plog-kafka/src/main/java/com/airbnb/plog/kafka/KafkaProvider.java | package com.airbnb.plog.kafka;
import com.airbnb.plog.handlers.Handler;
import com.airbnb.plog.handlers.HandlerProvider;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigException;
import com.typesafe.config.ConfigValue;
import lombok.extern.slf4j.Slf4j;
import java.net.InetAddress;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
@Slf4j
public final class KafkaProvider implements HandlerProvider {
private final static AtomicInteger clientId = new AtomicInteger();
static class EncryptionConfig {
public String encryptionKey;
public String encryptionAlgorithm;
public String encryptionTransformation;
public String encryptionProvider;
}
@Override
public Handler getHandler(Config config) throws Exception {
final String defaultTopic = config.getString("default_topic");
boolean propagate = false;
try {
propagate = config.getBoolean("propagate");
} catch (ConfigException.Missing ignored) {}
if ("null".equals(defaultTopic)) {
log.warn("default topic is \"null\"; messages will be discarded unless tagged with kt:");
}
final Properties properties = new Properties();
for (Map.Entry<String, ConfigValue> kv : config.getConfig("producer_config").entrySet()) {
properties.put(kv.getKey(), kv.getValue().unwrapped().toString());
}
final String clientId = "plog_" +
InetAddress.getLocalHost().getHostName() + "_" +
KafkaProvider.clientId.getAndIncrement();
properties.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer");
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
log.info("Using producer with properties {}", properties);
final KafkaProducer<String, byte[]> producer = new KafkaProducer<String, byte[]>(properties);
EncryptionConfig encryptionConfig = new EncryptionConfig();
try {
Config encryption = config.getConfig("encryption");
encryptionConfig.encryptionKey = encryption.getString("key");
encryptionConfig.encryptionAlgorithm = encryption.getString("algorithm");
encryptionConfig.encryptionTransformation = encryption.getString("transformation");
encryptionConfig.encryptionProvider = encryption.getString("provider");
} catch (ConfigException.Missing ignored) {
encryptionConfig = null;
}
return new KafkaHandler(clientId, propagate, defaultTopic, producer, encryptionConfig);
}
}
| 7,543 |
0 | Create_ds/plog/plog-kafka/src/main/java/com/airbnb/plog/kafka | Create_ds/plog/plog-kafka/src/main/java/com/airbnb/plog/kafka/partitioner/FlinkPartitioner.java | package com.airbnb.plog.kafka.partitioner;
import lombok.extern.slf4j.Slf4j;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.kafka.clients.producer.Partitioner;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.PartitionInfo;
@Slf4j
public class FlinkPartitioner implements Partitioner {
private static final String MAX_PARALLELISM_CONFIG = "partitioner.maxParallelism";
private final AtomicInteger counter = new AtomicInteger((new Random()).nextInt());
private final AtomicInteger normalCounter = new AtomicInteger(0);
private int maxParallelism = 16386;
private static int toPositive(int number) {
return number & Integer.MAX_VALUE;
}
public void configure(Map<String, ?> configs) {
Object maxParallelism = configs.get(MAX_PARALLELISM_CONFIG);
log.warn("Configuration is {}", configs);
if (maxParallelism instanceof Number) {
this.maxParallelism = ((Number) maxParallelism).intValue();
} else if (maxParallelism instanceof String) {
try {
this.maxParallelism = Integer.parseInt((String) maxParallelism);
} catch (NumberFormatException e) {
log.error("Failed to parse maxParallelism value {}", maxParallelism);
}
}
}
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
int numPartitions = partitions.size();
int msgCount = normalCounter.incrementAndGet();
if (msgCount % 1000 == 0) {
log.info("Sent {} messages", msgCount);
}
if (key == null) {
int nextValue = this.counter.getAndIncrement();
List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic);
if (availablePartitions.size() > 0) {
int part = toPositive(nextValue) % availablePartitions.size();
return availablePartitions.get(part).partition();
} else {
return toPositive(nextValue) % numPartitions;
}
} else {
return computePartition(key, numPartitions, maxParallelism);
}
}
public void close() {
}
/*
* These static functions are derived from the code in KeyGroupRangeAssignment.
* https://github.com/apache/flink/blob/8674b69964eae50cad024f2c5caf92a71bf21a09/flink-runtime/src/main/java/org/apache/flink/runtime/state/KeyGroupRangeAssignment.java
* The full dependency into this project results in a significant jar size increase.
*
* By pulling in only these functions, we keep the distribution size under 10 MB.
*/
static int computePartition(Object key, int numPartitions, int maxParallelism) {
int group = murmurHash(key.hashCode()) % maxParallelism;
return (group * numPartitions) / maxParallelism;
}
static int murmurHash(int code) {
code *= 0xcc9e2d51;
code = Integer.rotateLeft(code, 15);
code *= 0x1b873593;
code = Integer.rotateLeft(code, 13);
code = code * 5 + 0xe6546b64;
code ^= 4;
code = bitMix(code);
if (code >= 0) {
return code;
} else if (code != Integer.MIN_VALUE) {
return -code;
} else {
return 0;
}
}
static int bitMix(int in) {
in ^= in >>> 16;
in *= 0x85ebca6b;
in ^= in >>> 13;
in *= 0xc2b2ae35;
in ^= in >>> 16;
return in;
}
}
| 7,544 |
0 | Create_ds/plog/plog-client/src/test/groovy/com/airbnb/plog | Create_ds/plog/plog-client/src/test/groovy/com/airbnb/plog/client/PlogClientTest.java | package com.airbnb.plog.client;
import java.util.List;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.Is.is;
/**
* Created by rong_hu on 2/1/17.
*/
public class PlogClientTest {
@Test
public void testChunkMessage_NoSplit() {
int chunkSize = 8;
byte[] messageBytes = "cafebabe".getBytes();
List<byte[]> chunks = PlogClient.chunkMessage(messageBytes, chunkSize);
assertThat(chunks.size(), is(1));
assertThat(chunks.get(0), equalTo(messageBytes));
}
@Test
public void testChunkMessage_UnevenSplit() {
int chunkSize = 20;
byte[] messageBytes = "Brevity is the soul of wit".getBytes();
List<byte[]> chunks = PlogClient.chunkMessage(messageBytes, chunkSize);
assertThat(chunks.size(), is(2));
assertThat(chunks.get(0), equalTo("Brevity is the soul ".getBytes()));
assertThat(chunks.get(1), equalTo("of wit".getBytes()));
}
} | 7,545 |
0 | Create_ds/plog/plog-client/src/test/groovy/com/airbnb/plog | Create_ds/plog/plog-client/src/test/groovy/com/airbnb/plog/client/MultipartMessageTest.java | package com.airbnb.plog.client;
import java.nio.charset.Charset;
import org.junit.Test;
import static org.junit.Assert.assertArrayEquals;
public class MultipartMessageTest {
@Test
public void testEncode() {
byte[] message = "abc".getBytes(Charset.forName("ISO-8859-1"));
byte[] checksum = PlogClient.computeChecksum(message);
byte[] chunk = MultipartMessage.encode(1, 3, checksum, 64000, 1, 0, message);
byte[] expected = {0, 1, 0, 1, 0, 0, (byte)0xfa, 0, 0, 0, 0, 1, 0, 0, 0, 3,
(byte)0xb3, (byte)0xdd, (byte)0x93, (byte)0xfa, 0, 0, 0, 0,
(byte)0x61, (byte)0x62, (byte)0x63};
assertArrayEquals(expected, chunk);
}
} | 7,546 |
0 | Create_ds/plog/plog-client/src/main/java/com/airbnb/plog | Create_ds/plog/plog-client/src/main/java/com/airbnb/plog/client/MultipartMessage.java | package com.airbnb.plog.client;
import java.nio.ByteBuffer;
/**
* ## A chunk in [a multi-part UDP message](https://github.com/airbnb/plog#packet-type-01-fragmented-message).
*/
public class MultipartMessage {
public static final byte PROTOCOL_VERSION = 0;
public static final byte TYPE_MULTIPART_MESSGAE = 1;
private static final int NUM_HEADER_BYTES = 24;
/**
* Encode the payload as a chunk in a multi-part UDP message.
*
* @param messageId
* @param length
* @param checksum
* @param chunkSize
* @param count
* @param index
* @param payload
* @return the encoded bytes ready for UDP transmission.
*/
public static byte[] encode(int messageId,
int length,
byte[] checksum,
int chunkSize,
int count,
int index,
byte[] payload) {
// ByteBuffer by default is big-endian.
ByteBuffer byteBuffer = ByteBuffer.allocate(NUM_HEADER_BYTES + payload.length);
// Some temporary byte buffer used.
ByteBuffer twoBytes = ByteBuffer.allocate(2);
ByteBuffer fourBytes = ByteBuffer.allocate(4);
// Byte 00: version (00)
byteBuffer.put(PROTOCOL_VERSION);
// Byte 01: Packet type 01: fragmented message
byteBuffer.put(TYPE_MULTIPART_MESSGAE);
// Bytes 02-03: Fragment count for the message.
byteBuffer.put(twoBytes.putShort(0,(short)count).array(), 0, 2);
// Bytes 04-05: Index of this fragment in the message.
byteBuffer.put(twoBytes.putShort(0, (short)index).array(), 0, 2);
// Bytes 06-07: Byte length of the payload for each fragment in the message.
byteBuffer.put(twoBytes.putShort(0, (short)chunkSize).array(), 0, 2);
// Bytes 08-11: Second half of the identifier for the message.
// Messages are identified by the UDP client port and this second half.
// Needs to increment with each message for hole detection.
byteBuffer.put(fourBytes.putInt(0, messageId).array(), 0, 4);
// Bytes 12-15: signed, big-endian, 32-bit integer below 2,147,483,647.
// Total byte length of the message.
byteBuffer.put(fourBytes.putInt(0, length).array(), 0, 4);
// Bytes 16-19: MurmurHash3 hash of the total message payload.
byteBuffer.put(checksum, 0, 4);
// Bytes 20-23: null bytes.
byteBuffer.put(new byte[4], 0, 4);
// Bytes (24+taglength)-: Bytes. Payload. Will only read the payload length.
byteBuffer.put(payload, 0, payload.length);
return byteBuffer.array();
}
}
| 7,547 |
0 | Create_ds/plog/plog-client/src/main/java/com/airbnb/plog | Create_ds/plog/plog-client/src/main/java/com/airbnb/plog/client/PlogClient.java | package com.airbnb.plog.client;
import lombok.extern.slf4j.Slf4j;
import java.io.Closeable;
import java.io.IOException;
import java.net.DatagramPacket;
import java.net.DatagramSocket;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.hash.Hashing;
/**
* ## Plog client for Java
* Example:
*
* ```java
* PlogClient plogClient = new PlogClient("127.0.0.1", 23456, 64000);
* plogClient.send("My hovercraft is full of eels.");
* ```
* You can configure the client at initialization by passing these options:
*
* + host - The host of the Plog process (e.g., 'localhost')
* + port - The port on which Plog is listening (e.g., 23456)
* + chunkSize - The maximum payload size for multipart datagrams (e.g., 64,000)
*/
@Slf4j
public class PlogClient implements Closeable {
public static final int DEFAULT_CHUNK_SIZE = 64000;
private AtomicInteger lastMessageId;
private final InetAddress address;
private final int port;
private final int chunkSize;
private DatagramSocket socket;
public PlogClient(String host, int port) {
this(host, port, DEFAULT_CHUNK_SIZE);
}
public PlogClient(String host, int port, int chunkSize) {
Preconditions.checkNotNull(host, "host cannot be null!");
Preconditions.checkArgument(port > 1024 && port < 65536, "Must provide a valid port number!");
Preconditions.checkArgument(chunkSize < 65483, "Maximum Plog UDP data length is 65483 bytes!");
openSocket();
try {
address = InetAddress.getByName(host);
} catch (UnknownHostException e) {
log.error("Unknown address {}", host, e);
throw Throwables.propagate(e);
}
this.port = port;
this.chunkSize = chunkSize;
this.lastMessageId = new AtomicInteger(1);
}
/**
* Send the message to Plog server.
*/
public void send(String message) {
// ISO-8859-1 is ASCII-8bit. It's equivalent to ruby's BINARY encoding.
byte[] messageBytes = message.getBytes(Charset.forName("ISO-8859-1"));
int messageId = lastMessageId.getAndIncrement();
lastMessageId.compareAndSet(Integer.MAX_VALUE, 1);
int messageLength = messageBytes.length;
byte[] checksum = computeChecksum(messageBytes);
List<byte[]> chunks = chunkMessage(messageBytes, chunkSize);
int count = chunks.size();
log.debug("Plog: sending {}; {} chunk(s)", messageId, count);
for (int i = 0; i < count; i++) {
sendToSocket(MultipartMessage.encode(messageId,
messageLength,
checksum,
chunkSize,
count,
i,
chunks.get(i)));
}
}
@VisibleForTesting
static byte[] computeChecksum(byte[] messageBytes) {
// Checksum in little-endian.
byte[] checksum = Hashing.murmur3_32().hashBytes(messageBytes).asBytes();
// Reverse checksum bytes to make it big-endian.
byte temp;
int start = 0, end = 3;
while (start < end) {
temp = checksum[start];
checksum[start] = checksum[end];
checksum[end] = temp;
start++;
end--;
}
return checksum;
}
@VisibleForTesting
static List<byte[]> chunkMessage(byte[] messageBytes, int size) {
final List<byte[]> chunks = new ArrayList<byte[]>();
int startIndex = 0;
while (startIndex + size < messageBytes.length) {
chunks.add(Arrays.copyOfRange(messageBytes, startIndex, startIndex + size));
startIndex += size;
}
// If there's some remaining bytes,
// copy them up to the end of messageBytes.
if (startIndex < messageBytes.length) {
chunks.add(Arrays.copyOfRange(messageBytes, startIndex, messageBytes.length));
}
return chunks;
}
private void openSocket() {
try {
socket = new DatagramSocket();
} catch (SocketException e) {
log.error("Cannot open socket", e);
throw Throwables.propagate(e);
}
}
private void sendToSocket(byte[] chunk) {
DatagramPacket packet = new DatagramPacket(chunk, chunk.length, address, port);
try {
log.trace("Sending {} to UDP port {}", chunk, port);
socket.send(packet);
} catch (IOException e) {
log.error("Error sending packet!", e);
socket.close();
openSocket();
}
}
@Override
public void close() throws IOException {
if (socket == null) return;
socket.close();
}
} | 7,548 |
0 | Create_ds/plog/plog-client/src/main/java/com/airbnb/plog/client | Create_ds/plog/plog-client/src/main/java/com/airbnb/plog/client/fragmentation/Fragmenter.java | package com.airbnb.plog.client.fragmentation;
import com.airbnb.plog.Message;
import com.airbnb.plog.common.Murmur3;
import com.google.common.base.Charsets;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.Unpooled;
import lombok.extern.slf4j.Slf4j;
import java.nio.ByteOrder;
import java.util.Collection;
@Slf4j
public final class Fragmenter {
public static final byte[] UDP_V0_FRAGMENT_PREFIX = new byte[]{0, 1};
private static final int HEADER_SIZE = 24;
private final int maxFragmentSizeExcludingHeader;
public Fragmenter(int maxFragmentSize) {
maxFragmentSizeExcludingHeader = maxFragmentSize - HEADER_SIZE;
if (maxFragmentSizeExcludingHeader < 1) {
throw new IllegalArgumentException("Fragment size < " + (HEADER_SIZE + 1));
}
}
private static void writeHeader(int messageIndex, int fragmentLength, int tagsBufferLength, int messageLength, int hash, int fragmentCount, int fragmentIdx, ByteBuf fragment) {
fragment.writeBytes(UDP_V0_FRAGMENT_PREFIX);
fragment.writeShort(fragmentCount);
fragment.writeShort(fragmentIdx);
fragment.writeShort(fragmentLength);
fragment.writeInt(messageIndex);
fragment.writeInt(messageLength);
fragment.writeInt(hash);
fragment.writeShort(tagsBufferLength);
fragment.writeZero(2);
}
public ByteBuf[] fragment(ByteBufAllocator alloc, byte[] payload, Collection<String> tags, int messageIndex) {
final ByteBuf buf = Unpooled.wrappedBuffer(payload);
final int hash = Murmur3.hash32(buf, 0, payload.length);
return fragment(alloc, buf, tags, messageIndex, payload.length, hash);
}
public ByteBuf[] fragment(ByteBufAllocator alloc, ByteBuf payload, Collection<String> tags, int messageIndex) {
final int length = payload.readableBytes();
final int hash = Murmur3.hash32(payload, 0, length);
return fragment(alloc, payload, tags, messageIndex, length, hash);
}
public ByteBuf[] fragment(ByteBufAllocator alloc, Message msg, int messageIndex) {
return fragment(alloc, msg.content(), msg.getTags(), messageIndex);
}
public ByteBuf[] fragment(ByteBufAllocator alloc, ByteBuf payload, Collection<String> tags, int messageIndex, int length, int hash) {
final byte[][] tagBytes;
int tagsBufferLength = 0;
final int tagsCount;
if (tags != null && !tags.isEmpty()) {
tagsCount = tags.size();
if (tagsCount > 1) {
tagsBufferLength += tagsCount - 1;
}
tagBytes = new byte[tagsCount][];
int tagIdx = 0;
for (String tag : tags) {
final byte[] bytes = tag.getBytes(Charsets.UTF_8);
tagsBufferLength += bytes.length;
tagBytes[tagIdx] = bytes;
tagIdx++;
}
if (tagBytes.length > maxFragmentSizeExcludingHeader) {
throw new IllegalStateException("Cannot store " + tagBytes.length + " bytes of tags in " +
maxFragmentSizeExcludingHeader + " bytes max");
}
} else {
tagBytes = null;
tagsCount = 0;
}
// round-up division
final int fragmentCount = (int) (
((long) length + tagsBufferLength + maxFragmentSizeExcludingHeader - 1)
/ maxFragmentSizeExcludingHeader);
final ByteBuf[] fragments = new ByteBuf[fragmentCount];
// All packets but the last are easy
int contentIdx, fragmentIdx;
for (contentIdx = 0, fragmentIdx = 0; fragmentIdx < fragmentCount - 1;
fragmentIdx++, contentIdx += maxFragmentSizeExcludingHeader) {
final ByteBuf fragment = alloc.buffer(HEADER_SIZE + maxFragmentSizeExcludingHeader,
HEADER_SIZE + maxFragmentSizeExcludingHeader).order(ByteOrder.BIG_ENDIAN);
writeHeader(messageIndex, maxFragmentSizeExcludingHeader, 0, length, hash, fragmentCount, fragmentIdx, fragment);
fragment.writeBytes(payload, contentIdx, maxFragmentSizeExcludingHeader);
fragments[fragmentIdx] = fragment;
}
final int lastPayloadLength = length - (maxFragmentSizeExcludingHeader * (fragmentCount - 1));
final ByteBuf finalFragment = alloc.buffer(HEADER_SIZE + tagsBufferLength + lastPayloadLength,
HEADER_SIZE + tagsBufferLength + lastPayloadLength).order(ByteOrder.BIG_ENDIAN);
writeHeader(messageIndex, maxFragmentSizeExcludingHeader, tagsBufferLength, length, hash, fragmentCount, fragmentIdx, finalFragment);
if (tagsCount > 0) {
finalFragment.setShort(20, tagsBufferLength); // tags buffer length
for (int i = 0; i < tagsCount - 1; i++) {
finalFragment.writeBytes(tagBytes[i]);
finalFragment.writeZero(1);
}
finalFragment.writeBytes(tagBytes[tagsCount - 1]);
}
finalFragment.writeBytes(payload, contentIdx, lastPayloadLength);
fragments[fragmentCount - 1] = finalFragment;
return fragments;
}
}
| 7,549 |
0 | Create_ds/plog/plog-stress/src/main/java/com/airbnb/plog | Create_ds/plog/plog-stress/src/main/java/com/airbnb/plog/stress/PlogStress.java | package com.airbnb.plog.stress;
import com.airbnb.plog.client.fragmentation.Fragmenter;
import com.airbnb.plog.common.Murmur3;
import com.codahale.metrics.ConsoleReporter;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricRegistry;
import com.google.common.util.concurrent.RateLimiter;
import com.typesafe.config.Config;
import com.typesafe.config.ConfigFactory;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.buffer.Unpooled;
import lombok.extern.slf4j.Slf4j;
import java.net.InetSocketAddress;
import java.net.SocketException;
import java.nio.ByteBuffer;
import java.nio.channels.DatagramChannel;
import java.util.Random;
import java.util.concurrent.TimeUnit;
@SuppressWarnings("CallToSystemExit")
@Slf4j
public final class PlogStress {
private final MetricRegistry registry = new MetricRegistry();
public static void main(String[] args) {
new PlogStress().run(ConfigFactory.load());
}
@SuppressWarnings("OverlyLongMethod")
private void run(Config config) {
System.err.println(
" _\n" +
" _ __| |___ __ _\n" +
"| '_ \\ / _ \\/ _` |\n" +
"| .__/_\\___/\\__, |\n" +
"|_| |___/ stress"
);
final Config stressConfig = config.getConfig("plog.stress");
final int threadCount = stressConfig.getInt("threads");
log.info("Using {} threads", threadCount);
final int rate = stressConfig.getInt("rate");
final RateLimiter rateLimiter = RateLimiter.create(rate);
final int socketRenewRate = stressConfig.getInt("renew_rate");
final int minSize = stressConfig.getInt("min_size");
final int maxSize = stressConfig.getInt("max_size");
final int sizeIncrements = stressConfig.getInt("size_increments");
final double sizeExponent = stressConfig.getDouble("size_exponent");
final int sizeDelta = maxSize - minSize;
final int differentSizes = sizeDelta / sizeIncrements;
if (differentSizes == 0) {
throw new RuntimeException("No sizes! Decrease plog.stress.size_increments");
}
final int stopAfter = stressConfig.getInt("stop_after");
final int packetSize = stressConfig.getInt("udp.size");
final int bufferSize = stressConfig.getInt("udp.SO_SNDBUF");
final Fragmenter fragmenter = new Fragmenter(packetSize);
final Random random = new Random(stressConfig.getLong("seed"));
final byte[] randomBytes = new byte[maxSize];
random.nextBytes(randomBytes);
final ByteBuf randomMessage = Unpooled.wrappedBuffer(randomBytes);
log.info("Generating {} different hashes", differentSizes);
final int[] precomputedHashes = new int[differentSizes];
for (int i = 0; i < differentSizes; i++) {
precomputedHashes[i] = Murmur3.hash32(randomMessage, 0, minSize + sizeIncrements * i, 0);
}
final ByteBufAllocator allocator = new PooledByteBufAllocator();
final double packetLoss = stressConfig.getDouble("udp.loss");
final Meter socketMeter = registry.meter("Sockets used");
final Meter messageMeter = registry.meter("Messages sent");
final Meter packetMeter = registry.meter("Packets sent");
final Meter sendFailureMeter = registry.meter("Send failures");
final Meter lossMeter = registry.meter("Packets dropped");
final Histogram messageSizeHistogram = registry.histogram("Message size");
final Histogram packetSizeHistogram = registry.histogram("Packet size");
final InetSocketAddress target = new InetSocketAddress(stressConfig.getString("host"), stressConfig.getInt("port"));
log.info("Starting with config {}", config);
final long consoleRate = stressConfig.getDuration("console.interval", TimeUnit.MILLISECONDS);
ConsoleReporter.forRegistry(registry).build().start(consoleRate, TimeUnit.MILLISECONDS);
for (int i = 0; i < threadCount; i++) {
new Thread("stress_" + i) {
private DatagramChannel channel = null;
@Override
public void run() {
try {
for (int sent = 0; sent < stopAfter; sent++, messageMeter.mark()) {
if (sent % socketRenewRate == 0) {
if (channel != null) {
channel.close();
}
channel = DatagramChannel.open();
channel.socket().setSendBufferSize(bufferSize);
socketMeter.mark();
}
// global rate limiting
rateLimiter.acquire();
final int sizeIndex = (int) (Math.pow(random.nextDouble(), sizeExponent) * differentSizes);
final int messageSize = minSize + sizeIncrements * sizeIndex;
final int hash = precomputedHashes[sizeIndex];
messageSizeHistogram.update(messageSize);
final ByteBuf[] fragments = fragmenter.fragment(allocator, randomMessage, null, sent, messageSize, hash);
for (ByteBuf fragment : fragments) {
if (random.nextDouble() < packetLoss) {
lossMeter.mark();
} else {
final int packetSize = fragment.readableBytes();
final ByteBuffer buffer = fragment.nioBuffer();
try {
channel.send(buffer, target);
packetSizeHistogram.update(packetSize);
packetMeter.mark();
} catch (SocketException e) {
sendFailureMeter.mark();
}
}
fragment.release();
}
}
} catch (Throwable t) {
t.printStackTrace();
System.exit(1);
}
}
}.start();
}
}
}
| 7,550 |
0 | Create_ds/plog/plog-stress/src/main/java/com/airbnb/plog | Create_ds/plog/plog-stress/src/main/java/com/airbnb/plog/stress/EaterProvider.java | package com.airbnb.plog.stress;
import com.airbnb.plog.Message;
import com.airbnb.plog.handlers.Handler;
import com.airbnb.plog.handlers.HandlerProvider;
import com.eclipsesource.json.JsonObject;
import com.typesafe.config.Config;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import java.util.concurrent.atomic.AtomicLong;
@SuppressWarnings("ClassOnlyUsedInOneModule")
public final class EaterProvider implements HandlerProvider {
@Override
public Handler getHandler(Config config) throws Exception {
return new Eater();
}
private static class Eater extends SimpleChannelInboundHandler<Message> implements Handler {
private final AtomicLong counter = new AtomicLong();
@Override
public JsonObject getStats() {
return new JsonObject().add("seen_messages", counter.get());
}
@Override
public String getName() {
return "eater";
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, Message msg) throws Exception {
counter.incrementAndGet();
}
}
}
| 7,551 |
0 | Create_ds/plog/plog-api/src/main/java/com/airbnb | Create_ds/plog/plog-api/src/main/java/com/airbnb/plog/Message.java | package com.airbnb.plog;
import io.netty.buffer.ByteBufHolder;
public interface Message extends ByteBufHolder, Tagged {
byte[] asBytes();
}
| 7,552 |
0 | Create_ds/plog/plog-api/src/main/java/com/airbnb | Create_ds/plog/plog-api/src/main/java/com/airbnb/plog/MessageImpl.java | package com.airbnb.plog;
import com.airbnb.plog.server.pipeline.ByteBufs;
import com.google.common.base.Joiner;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.DefaultByteBufHolder;
import lombok.AccessLevel;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import java.util.Collection;
import java.util.Collections;
@Data
@EqualsAndHashCode(callSuper = false)
public final class MessageImpl extends DefaultByteBufHolder implements Message {
private final Collection<String> tags;
@Getter(AccessLevel.NONE)
private byte[] memoizedBytes;
public MessageImpl(ByteBuf data, Collection<String> tags) {
super(data);
this.tags = tags;
}
public static Message fromBytes(ByteBufAllocator alloc, byte[] bytes, Collection<String> tags) {
final ByteBuf data = alloc.buffer(bytes.length, bytes.length);
data.writeBytes(bytes);
return new MessageImpl(data, tags);
}
@Override
public Collection<String> getTags() {
return (this.tags == null) ? Collections.<String>emptyList() : this.tags;
}
@Override
public byte[] asBytes() {
if (this.memoizedBytes == null) {
this.memoizedBytes = ByteBufs.toByteArray(content());
}
return this.memoizedBytes;
}
@Override
public final String toString() {
if (tags == null || tags.isEmpty()) {
return new String(asBytes());
} else {
final String tagList = Joiner.on(',').join(tags);
return "[" + tagList + "] " + new String(asBytes());
}
}
}
| 7,553 |
0 | Create_ds/plog/plog-api/src/main/java/com/airbnb | Create_ds/plog/plog-api/src/main/java/com/airbnb/plog/Tagged.java | package com.airbnb.plog;
import java.util.Collection;
public interface Tagged {
Collection<String> getTags();
}
| 7,554 |
0 | Create_ds/plog/plog-api/src/main/java/com/airbnb/plog/server | Create_ds/plog/plog-api/src/main/java/com/airbnb/plog/server/pipeline/ByteBufs.java | package com.airbnb.plog.server.pipeline;
import io.netty.buffer.ByteBuf;
public final class ByteBufs {
public static byte[] toByteArray(ByteBuf buf) {
final byte[] payload = new byte[buf.readableBytes()];
buf.getBytes(0, payload);
return payload;
}
}
| 7,555 |
0 | Create_ds/plog/plog-api/src/main/java/com/airbnb/plog | Create_ds/plog/plog-api/src/main/java/com/airbnb/plog/handlers/Handler.java | package com.airbnb.plog.handlers;
import com.eclipsesource.json.JsonObject;
import io.netty.channel.ChannelHandler;
public interface Handler extends ChannelHandler {
public JsonObject getStats();
public String getName();
}
| 7,556 |
0 | Create_ds/plog/plog-api/src/main/java/com/airbnb/plog | Create_ds/plog/plog-api/src/main/java/com/airbnb/plog/handlers/HandlerProvider.java | package com.airbnb.plog.handlers;
import com.typesafe.config.Config;
public interface HandlerProvider {
public Handler getHandler(Config config) throws Exception;
}
| 7,557 |
0 | Create_ds/CassJMeter/src/main/java/com/matriks/jmeter/connections | Create_ds/CassJMeter/src/main/java/com/matriks/jmeter/connections/datastaxclient/DataStaxClientConnection.java | package com.matriks.jmeter.connections.datastaxclient;
import java.io.File;
import java.io.FileReader;
import java.util.Properties;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.KeyspaceMetadata;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.policies.ConstantReconnectionPolicy;
import com.datastax.driver.core.policies.DowngradingConsistencyRetryPolicy;
import com.netflix.jmeter.connections.a6x.AstyanaxConnection;
import com.netflix.jmeter.sampler.Connection;
import com.netflix.jmeter.sampler.Operation;
public class DataStaxClientConnection extends Connection {
private static final Logger logger = LoggerFactory.getLogger(AstyanaxConnection.class);
private Session session;
private Cluster cluster;
private KeyspaceMetadata keyspaceMetaData;
public static final DataStaxClientConnection instance = new DataStaxClientConnection();
public Properties config = new Properties();
public Session session() {
if (session != null)
return session;
synchronized (DataStaxClientConnection.class) {
if (session != null)
return session;
try {
File propFile = new File("cassandra.properties");
if (propFile.exists()) {
config.load(new FileReader(propFile));
}
cluster = Cluster.builder().addContactPoints(endpoints.toArray(new String[0]))
.withRetryPolicy(DowngradingConsistencyRetryPolicy.INSTANCE).withReconnectionPolicy(new ConstantReconnectionPolicy(2000L))
.build();
session = cluster.connect();
session.execute("USE " + getKeyspaceName() + ";");
return session;
}
catch (Exception e) {
throw new RuntimeException(e);
}
}
}
public KeyspaceMetadata getKeyspaceMetadata() {
if (null == keyspaceMetaData)
keyspaceMetaData = session().getCluster().getMetadata().getKeyspace(DataStaxClientConnection.instance.getKeyspaceName());
return keyspaceMetaData;
}
@Override
public Operation newOperation(String columnFamily, boolean isCounter) {
return new DataStaxClientOperation(columnFamily, isCounter);
}
@Override
public String logConnections() {
return cluster == null ? "" : "";
}
@Override
public void shutdown() {
if (cluster != null)
cluster.shutdown();
}
}
| 7,558 |
0 | Create_ds/CassJMeter/src/main/java/com/matriks/jmeter/connections | Create_ds/CassJMeter/src/main/java/com/matriks/jmeter/connections/datastaxclient/DataStaxClientOperation.java | package com.matriks.jmeter.connections.datastaxclient;
import java.nio.ByteBuffer;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import com.datastax.driver.core.ConsistencyLevel;
import com.datastax.driver.core.Query;
import com.datastax.driver.core.ResultSetFuture;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
import com.datastax.driver.core.TableMetadata;
import com.datastax.driver.core.querybuilder.QueryBuilder;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.serializers.AbstractSerializer;
import com.netflix.jmeter.sampler.AbstractSampler.ResponseData;
import com.netflix.jmeter.sampler.Operation;
import com.netflix.jmeter.sampler.OperationException;
public class DataStaxClientOperation implements Operation {
private AbstractSerializer valueSerializer;
private ColumnFamily<Object, Object> cfs;
private AbstractSerializer columnSerializer;
private final String cfName;
private final boolean isCounter;
public class DataStaxClientResponseData extends ResponseData {
public DataStaxClientResponseData(String response, int size, String host, long latency, Object key, Object cn, Object value) {
super(response, size, host, latency, key, cn, value);
}
public DataStaxClientResponseData(String response, int size, OperationResult<?> result, Object key, Object cn, Object value) {
super(response, size, EXECUTED_ON + (result != null ? result.getHost().getHostName() : ""), (result != null ? result
.getLatency(TimeUnit.MILLISECONDS) : 0), key, cn, value);
}
public DataStaxClientResponseData(String response, int size, OperationResult<?> result, Object key, Map<?, ?> kv) {
super(response, size, (result == null) ? "" : result.getHost().getHostName(), result != null ? result.getLatency(TimeUnit.MILLISECONDS)
: 0, key, kv);
}
}
public DataStaxClientOperation(String cfName, boolean isCounter) {
this.cfName = cfName;
this.isCounter = isCounter;
}
@Override
public void serlizers(AbstractSerializer<?> keySerializer, AbstractSerializer<?> columnSerializer, AbstractSerializer<?> valueSerializer) {
this.cfs = new ColumnFamily(cfName, keySerializer, columnSerializer);
this.columnSerializer = columnSerializer;
this.valueSerializer = valueSerializer;
}
@Override
public ResponseData put(Object key, Object colName, Object value) throws OperationException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResponseData batchMutate(Object key, Map<?, ?> nv) throws OperationException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResponseData get(Object rkey, Object colName) throws OperationException {
Session session = DataStaxClientConnection.instance.session();
TableMetadata tm = DataStaxClientConnection.instance.getKeyspaceMetadata().getTable(cfName);
String partitionKey = tm.getPartitionKey().get(0).getName();
Query query = QueryBuilder.select(colName.toString()).from(cfName).where(QueryBuilder.eq(partitionKey, rkey)).limit(1000000)
.setConsistencyLevel(ConsistencyLevel.valueOf(com.netflix.jmeter.properties.Properties.instance.cassandra.getReadConsistency()));
ResultSetFuture rs = session.executeAsync(query);
int size = 0;
try {
Row row = rs.getUninterruptibly(1000000, TimeUnit.MILLISECONDS).one();
size = row != null ? row.getBytesUnsafe(colName.toString()).capacity() : 0;
}
catch (TimeoutException e) {
e.printStackTrace();
throw new OperationException(e);
}
return new DataStaxClientResponseData("", size, "", 0, rkey, colName, null);
}
@Override
public ResponseData rangeSlice(Object rKey, Object startColumn, Object endColumn, boolean reversed, int count) throws OperationException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResponseData putComposite(String key, String colName, ByteBuffer vbb) throws OperationException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResponseData batchCompositeMutate(String key, Map<String, ByteBuffer> nv) throws OperationException {
// TODO Auto-generated method stub
return null;
}
@Override
public ResponseData getComposite(String key, String compositeColName) throws OperationException {
Session session = DataStaxClientConnection.instance.session();
TableMetadata tm = DataStaxClientConnection.instance.getKeyspaceMetadata().getTable(cfName);
String partitionKey = tm.getPartitionKey().get(0).getName();
Object partitionValue = key;
String[] colList = compositeColName.split(":");
String clusteredKey = colList[0];
String clusteredValue = colList[1];
String colName = colList[2];
Long start = System.nanoTime();
ResultSetFuture rs = session.executeAsync(QueryBuilder.select(colName).from(cfName).where(QueryBuilder.eq(partitionKey, partitionValue))
.and(QueryBuilder.eq(clusteredKey, clusteredValue)).limit(1000000));
int size = 0;
try {
Row row = rs.getUninterruptibly(1000000, TimeUnit.MILLISECONDS).one();
size = row != null ? row.getBytesUnsafe(colName.toString()).capacity() : 0;
}
catch (TimeoutException e) {
e.printStackTrace();
throw new OperationException(e);
}
Long duration = System.nanoTime() - start;
return new DataStaxClientResponseData("", size, "", TimeUnit.MILLISECONDS.convert(duration, TimeUnit.NANOSECONDS), key, compositeColName, null);
}
@Override
public ResponseData delete(Object rkey, Object colName) throws OperationException {
// TODO Auto-generated method stub
return null;
}
}
| 7,559 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/utils/SystemUtils.java | package com.netflix.jmeter.utils;
import java.nio.ByteBuffer;
import com.netflix.astyanax.serializers.AbstractSerializer;
import com.netflix.astyanax.serializers.BytesArraySerializer;
public class SystemUtils
{
public static final String NEW_LINE = System.getProperty("line.separator");
public static String getStackTrace(Throwable aThrowable)
{
final StringBuilder result = new StringBuilder("ERROR: ");
result.append(aThrowable.toString());
result.append(NEW_LINE);
// add each element of the stack trace
for (StackTraceElement element : aThrowable.getStackTrace())
{
result.append(element.toString());
result.append(NEW_LINE);
}
return result.toString();
}
public static String convertToString(AbstractSerializer<?> ser, ByteBuffer byteBuffer)
{
String value;
if (ser instanceof BytesArraySerializer)
value = Hex.bytesToHex(byteBuffer.array());
else
value = ser.fromByteBuffer(byteBuffer).toString();
return value;
}
}
| 7,560 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/utils/CClient.java | package com.netflix.jmeter.utils;
import org.apache.cassandra.thrift.Cassandra;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.transport.TFramedTransport;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
public class CClient extends Cassandra.Client
{
public String host;
public TSocket socket;
public CClient(TSocket socket, TBinaryProtocol tBinaryProtocol, String h)
{
super(tBinaryProtocol);
this.host = h;
this.socket = socket;
}
public static CClient getClient(String currentNode, int port)
{
try
{
TSocket socket = new TSocket(currentNode, port);
TTransport transport = new TFramedTransport(socket);
CClient client = new CClient(socket, new TBinaryProtocol(transport), currentNode);
transport.open();
return client;
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
}
| 7,561 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/utils/YamlUpdater.java | package com.netflix.jmeter.utils;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileWriter;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.yaml.snakeyaml.DumperOptions;
import org.yaml.snakeyaml.Yaml;
import com.google.common.collect.Lists;
public class YamlUpdater
{
private Yaml yaml;
private Map<Object, Object> map;
private File yamlFile;
public YamlUpdater(String location) throws FileNotFoundException
{
DumperOptions options = new DumperOptions();
options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK);
yaml = new Yaml(options);
yamlFile = new File(location);
map = (Map) yaml.load(new FileInputStream(yamlFile));
}
public void update(String key, Object value)
{
map.put(key, value);
}
@SuppressWarnings("unchecked")
public void setSeeds(Set<String> seeds)
{
List<?> seedp = (List) map.get("seed_provider");
Map m = (Map) seedp.get(0);
m.put("class_name", "org.apache.cassandra.locator.SimpleSeedProvider");
List lst = Lists.newArrayList();
Map map = new HashMap();
map.put("seeds", StringUtils.join(seeds, ","));
lst.add(map);
m.put("parameters", lst);
}
@SuppressWarnings("unchecked")
public void encriptionOption(String string, String internode_encryption)
{
Map m = (Map) map.get("encryption_options");
m.put(string, internode_encryption);
}
public void dump() throws IOException
{
yaml.dump(map, new FileWriter(yamlFile));
}
}
| 7,562 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/utils/Schema.java | package com.netflix.jmeter.utils;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.thrift.CfDef;
import org.apache.cassandra.thrift.KsDef;
import org.apache.cassandra.thrift.NotFoundException;
import com.google.common.collect.Maps;
import com.netflix.jmeter.properties.Properties;
import com.netflix.jmeter.properties.SchemaProperties;
import com.netflix.jmeter.sampler.Connection;
public class Schema
{
private static String STATEGY_CLASS = "org.apache.cassandra.locator.NetworkTopologyStrategy";
private CClient client;
private String ksName;
public Schema(CClient client)
{
this.client = client;
this.ksName = Connection.getKeyspaceName();
}
public synchronized void createKeyspace() throws Exception
{
// create Keyspace if it doesnt exist.
KsDef ksd;
try
{
ksd = client.describe_keyspace(ksName);
client.set_keyspace(ksName);
createColumnFamily(ksd, false);
}
catch (NotFoundException ex)
{
ksd = new KsDef(ksName, STATEGY_CLASS, new ArrayList<CfDef>());
Map<String, String> strategy_options = Maps.newHashMap();
String[] splits = Properties.instance.getSchemas().get(0).getStrategy_options().split(",");
for (String split : splits)
{
String[] replication = split.split(":");
assert replication.length == 2;
strategy_options.put(replication[0], replication[1]);
}
ksd.setStrategy_options(strategy_options);
createColumnFamily(ksd, true);
client.send_system_add_keyspace(ksd);
}
}
public void createColumnFamily(KsDef ksd, boolean addToKS) throws Exception
{
Map<String, SchemaProperties> removedDuplicates = Maps.newConcurrentMap();
for (SchemaProperties props : Properties.instance.getSchemas())
removedDuplicates.put(props.getColumn_family(), props);
OUTER: for (SchemaProperties props : removedDuplicates.values())
{
List<CfDef> list = ksd.getCf_defs() == null ? new ArrayList<CfDef>() : ksd.getCf_defs();
for (CfDef cfd : list)
{
if (cfd.getName().equals(props.getColumn_family()))
continue OUTER;
}
if (addToKS)
{
ksd.addToCf_defs(columnFamilyDef(props));
}
else
{
client.send_system_add_column_family(columnFamilyDef(props));
}
}
}
// create column family
private CfDef columnFamilyDef(SchemaProperties prop)
{
CfDef cfd = new CfDef(ksName, prop.getColumn_family());
cfd.setKey_cache_size(Double.parseDouble(prop.getKeys_cached()));
cfd.setComparator_type(prop.getComparator_type());
cfd.setKey_validation_class(prop.getKey_validation_class());
cfd.setDefault_validation_class(prop.getDefault_validation_class());
cfd.setRow_cache_provider(prop.getRow_cache_provider());
cfd.setRow_cache_size(Double.parseDouble(prop.getRows_cached()));
return cfd;
}
}
| 7,563 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/utils/Hex.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.jmeter.utils;
import java.lang.reflect.Constructor;
public class Hex
{
private static final Constructor<String> stringConstructor = getProtectedConstructor(String.class, int.class, int.class, char[].class);
private final static byte[] charToByte = new byte[256];
// package protected for use by ByteBufferUtil. Do not modify this array !!
static final char[] byteToChar = new char[16];
static
{
for (char c = 0; c < charToByte.length; ++c)
{
if (c >= '0' && c <= '9')
charToByte[c] = (byte)(c - '0');
else if (c >= 'A' && c <= 'F')
charToByte[c] = (byte)(c - 'A' + 10);
else if (c >= 'a' && c <= 'f')
charToByte[c] = (byte)(c - 'a' + 10);
else
charToByte[c] = (byte)-1;
}
for (int i = 0; i < 16; ++i)
{
byteToChar[i] = Integer.toHexString(i).charAt(0);
}
}
public static byte[] hexToBytes(String str)
{
if (str.length() % 2 == 1)
throw new NumberFormatException("An hex string representing bytes must have an even length");
byte[] bytes = new byte[str.length() / 2];
for (int i = 0; i < bytes.length; i++)
{
byte halfByte1 = charToByte[str.charAt(i * 2)];
byte halfByte2 = charToByte[str.charAt(i * 2 + 1)];
if (halfByte1 == -1 || halfByte2 == -1)
throw new NumberFormatException("Non-hex characters in " + str);
bytes[i] = (byte)((halfByte1 << 4) | halfByte2);
}
return bytes;
}
public static String bytesToHex(byte... bytes)
{
char[] c = new char[bytes.length * 2];
for (int i = 0; i < bytes.length; i++)
{
int bint = bytes[i];
c[i * 2] = byteToChar[(bint & 0xf0) >> 4];
c[1 + i * 2] = byteToChar[bint & 0x0f];
}
return wrapCharArray(c);
}
/**
* Create a String from a char array with zero-copy (if available), using reflection to access a package-protected constructor of String.
* */
public static String wrapCharArray(char[] c)
{
if (c == null)
return null;
String s = null;
if (stringConstructor != null)
{
try
{
s = stringConstructor.newInstance(0, c.length, c);
}
catch (Exception e)
{
// Swallowing as we'll just use a copying constructor
}
}
return s == null ? new String(c) : s;
}
/**
* Used to get access to protected/private constructor of the specified class
* @param klass - name of the class
* @param paramTypes - types of the constructor parameters
* @return Constructor if successful, null if the constructor cannot be
* accessed
*/
public static Constructor getProtectedConstructor(Class klass, Class... paramTypes)
{
Constructor c;
try
{
c = klass.getDeclaredConstructor(paramTypes);
c.setAccessible(true);
return c;
}
catch (Exception e)
{
return null;
}
}
}
| 7,564 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections/fatclient/FatClientConnection.java | package com.netflix.jmeter.connections.fatclient;
import java.io.File;
import java.io.IOException;
import org.apache.cassandra.service.StorageService;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.jmeter.properties.Properties;
import com.netflix.jmeter.sampler.Connection;
import com.netflix.jmeter.sampler.Operation;
import com.netflix.jmeter.utils.YamlUpdater;
public class FatClientConnection extends Connection
{
private static final Logger logger = LoggerFactory.getLogger(FatClientConnection.class);
public FatClientConnection()
{
super();
try
{
// update yaml for the test case.
updateYaml();
// start the fat client.
startClient();
}
catch (Exception ex)
{
logger.error("Couldnt Start the client because of:", ex);
throw new RuntimeException(ex);
}
}
private void updateYaml() throws IOException
{
YamlUpdater updater = new YamlUpdater("cassandra.yaml");
updater.update("listen_address", null);
updater.update("rpc_address", null);
updater.update("storage_port", 7101);
updater.update("rpc_port", port);
updater.update("cluster_name", Properties.instance.cassandra.getClusterName());
updater.update("endpoint_snitch", Properties.instance.fatclient.getEndpoint_Snitch());
updater.setSeeds(endpoints);
updater.update("dynamic_snitch", Properties.instance.fatclient.getDynamic_snitch());
updater.update("rpc_timeout_in_ms", Properties.instance.fatclient.getRpc_timeout_in_ms());
updater.update("dynamic_snitch", Properties.instance.fatclient.getDynamic_snitch());
updater.encriptionOption("internode_encryption", Properties.instance.fatclient.getInternode_encryption());
updater.dump();
}
private static void startClient() throws Exception
{
try
{
if (!System.getProperties().contains("cassandra.config"))
{
String url = "file:///" + new File("cassandra.yaml").getAbsolutePath();
System.getProperties().setProperty("cassandra.config", url);
}
StorageService.instance.initClient();
// sleep for a bit so that gossip can do its thing.
Thread.sleep(10000L);
}
catch (Exception ex)
{
logger.error("Couldnt Start the client because of:", ex);
throw new AssertionError(ex);
}
catch (Throwable ex)
{
logger.error("Couldnt Start the client because of:", ex);
throw new AssertionError(ex);
}
}
@Override
public Operation newOperation(String columnName, boolean isCounter)
{
return new FatClientOperation(Properties.instance.cassandra.getWriteConsistency(),
Properties.instance.cassandra.getReadConsistency(),
Properties.instance.cassandra.getKeyspace(),
columnName,
isCounter);
}
@Override
public String logConnections()
{
return "Live Nodes: " + StringUtils.join(StorageService.instance.getLiveNodes(), ",");
}
@Override
public void shutdown()
{
StorageService.instance.stopClient();
}
}
| 7,565 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections/fatclient/FatClientOperation.java | package com.netflix.jmeter.connections.fatclient;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.db.ColumnFamily;
import org.apache.cassandra.db.IColumn;
import org.apache.cassandra.db.ReadCommand;
import org.apache.cassandra.db.Row;
import org.apache.cassandra.db.RowMutation;
import org.apache.cassandra.db.SliceByNamesReadCommand;
import org.apache.cassandra.db.SliceFromReadCommand;
import org.apache.cassandra.db.filter.QueryPath;
import org.apache.cassandra.service.StorageProxy;
import org.apache.cassandra.thrift.ColumnParent;
import org.apache.cassandra.thrift.ColumnPath;
import com.google.common.collect.Lists;
import com.netflix.jmeter.connections.thrift.ThriftOperation;
import com.netflix.jmeter.sampler.AbstractSampler.ResponseData;
import com.netflix.jmeter.sampler.OperationException;
import com.netflix.jmeter.utils.SystemUtils;
public class FatClientOperation extends ThriftOperation
{
private String cf;
private String ks;
public FatClientOperation(String writeConsistency, String readConsistency, String ks, String cf, boolean isCounter)
{
super(null, writeConsistency, readConsistency, cf, isCounter);
this.cf = cf;
this.ks = ks;
}
@Override
public ResponseData put(Object key, Object colName, Object value) throws OperationException
{
ByteBuffer rKey = kser.toByteBuffer(key);
ByteBuffer name = colser.toByteBuffer(colName);
ByteBuffer val = valser.toByteBuffer(value);
RowMutation change = new RowMutation(ks, rKey);
ColumnPath cp = new ColumnPath(cf).setColumn(name);
change.add(new QueryPath(cp), val, System.currentTimeMillis());
try
{
StorageProxy.mutate(Arrays.asList(change), wConsistecy);
}
catch (Exception e)
{
throw new OperationException(e);
}
return new ResponseData("", 0, "");
}
@Override
public ResponseData batchMutate(Object key, Map<?, ?> nv) throws OperationException
{
ByteBuffer rKey = kser.toByteBuffer(key);
RowMutation change = new RowMutation(ks, rKey);
for (Map.Entry entry : nv.entrySet())
{
ByteBuffer name = colser.toByteBuffer(entry.getKey());
ByteBuffer val = valser.toByteBuffer(entry.getValue());
ColumnPath cp = new ColumnPath(cf).setColumn(name);
change.add(new QueryPath(cp), val, System.currentTimeMillis());
}
try
{
StorageProxy.mutate(Arrays.asList(change), wConsistecy);
}
catch (Exception e)
{
throw new OperationException(e);
}
return new ResponseData("", 0, "");
}
@Override
public ResponseData get(Object rkey, Object colName) throws OperationException
{
ByteBuffer rKey = kser.toByteBuffer(rkey);
ByteBuffer name = colser.toByteBuffer(colName);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
Collection<ByteBuffer> cols = Lists.newArrayList(name);
SliceByNamesReadCommand readCommand = new SliceByNamesReadCommand(ks, rKey, new QueryPath(cf, null, null), cols);
readCommand.setDigestQuery(false);
commands.add(readCommand);
List<Row> rows;
try
{
rows = StorageProxy.read(commands, rConsistecy);
Row row = rows.get(0);
ColumnFamily cf = row.cf;
int bytes = 0;
StringBuffer response = new StringBuffer();
if (cf != null)
{
for (IColumn col : cf.getSortedColumns())
{
String value = SystemUtils.convertToString(valser, col.value());
response.append(colser.fromByteBuffer(col.name())).append(":").append(value);
bytes += col.name().capacity();
bytes += col.value().capacity();
}
}
return new ResponseData(response.toString(), bytes, "");
}
catch (Exception e)
{
throw new OperationException(e);
}
}
@Override
public ResponseData rangeSlice(Object rkey, Object startColumn, Object endColumn, boolean reversed, int count) throws OperationException
{
ByteBuffer rKey = kser.toByteBuffer(rkey);
ByteBuffer sname = colser.toByteBuffer(startColumn);
ByteBuffer ename = colser.toByteBuffer(endColumn);
List<ReadCommand> commands = new ArrayList<ReadCommand>();
ReadCommand readCommand = new SliceFromReadCommand(ks, rKey, new ColumnParent(cf), sname, ename, reversed, count);
readCommand.setDigestQuery(false);
commands.add(readCommand);
List<Row> rows;
try
{
rows = StorageProxy.read(commands, rConsistecy);
Row row = rows.get(0);
ColumnFamily cf = row.cf;
int bytes = 0;
StringBuffer response = new StringBuffer();
if (cf != null)
{
for (IColumn col : cf.getSortedColumns())
{
String value = SystemUtils.convertToString(valser, col.value());
response.append(colser.fromByteBuffer(col.name())).append(":").append(value);
bytes += col.name().capacity();
bytes += col.value().capacity();
}
}
return new ResponseData(response.toString(), bytes, "");
}
catch (Exception e)
{
throw new OperationException(e);
}
}
}
| 7,566 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections/thrift/Writer.java | package com.netflix.jmeter.connections.thrift;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.ColumnParent;
import org.apache.cassandra.thrift.ConsistencyLevel;
import org.apache.cassandra.thrift.Mutation;
import com.google.common.collect.Lists;
import com.netflix.jmeter.utils.CClient;
public class Writer
{
private CClient client;
private ConsistencyLevel cl;
private final String cfName;
List<Column> columns = Lists.newArrayList();
public Writer(CClient client, ConsistencyLevel cl, String cfName)
{
this.client = client;
this.cl = cl;
this.cfName = cfName;
}
public void insert(ByteBuffer key, ByteBuffer name, ByteBuffer value) throws Exception
{
Column col = new Column(name).setValue(value).setTimestamp(System.nanoTime());
ColumnParent cp = new ColumnParent(cfName);
client.insert(key, cp, col, cl);
}
public Writer prepareAdd(ByteBuffer name, ByteBuffer value) throws Exception
{
Column col = new Column(name).setValue(value).setTimestamp(System.nanoTime());
columns.add(col);
return this;
}
public void insert(ByteBuffer key) throws Exception
{
assert columns.size() != 0;
Map<ByteBuffer, Map<String, List<Mutation>>> record = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
record.put(key, getColumnsMutationMap(columns));
client.batch_mutate(record, cl);
}
private Map<String, List<Mutation>> getColumnsMutationMap(List<Column> columns)
{
List<Mutation> mutations = new ArrayList<Mutation>();
Map<String, List<Mutation>> mutationMap = new HashMap<String, List<Mutation>>();
for (Column c : columns)
{
ColumnOrSuperColumn column = new ColumnOrSuperColumn().setColumn(c);
mutations.add(new Mutation().setColumn_or_supercolumn(column));
}
mutationMap.put(cfName, mutations);
return mutationMap;
}
}
| 7,567 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections/thrift/Reader.java | package com.netflix.jmeter.connections.thrift;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.ColumnParent;
import org.apache.cassandra.thrift.ColumnPath;
import org.apache.cassandra.thrift.ConsistencyLevel;
import org.apache.cassandra.thrift.IndexClause;
import org.apache.cassandra.thrift.IndexExpression;
import org.apache.cassandra.thrift.IndexOperator;
import org.apache.cassandra.thrift.KeyRange;
import org.apache.cassandra.thrift.KeySlice;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SliceRange;
import org.apache.cassandra.utils.ByteBufferUtil;
import com.netflix.jmeter.utils.CClient;
public class Reader
{
private CClient client;
private ConsistencyLevel cl;
private final String cfName;
public Reader(CClient client, ConsistencyLevel cl, String cfName)
{
this.client = client;
this.cl = cl;
this.cfName = cfName;
}
public List<KeySlice> indexGet(ByteBuffer columnName, ByteBuffer value, ByteBuffer startOffset, int limit, boolean isReverse) throws Exception
{
SlicePredicate predicate = new SlicePredicate().setSlice_range(new SliceRange(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, isReverse, limit));
ColumnParent parent = new ColumnParent(cfName);
IndexExpression expression = new IndexExpression(columnName, IndexOperator.EQ, value);
IndexClause clause = new IndexClause(Arrays.asList(expression), startOffset, limit);
return client.get_indexed_slices(parent, clause, predicate, cl);
}
public Map<ByteBuffer, List<ColumnOrSuperColumn>> multiGet(List<ByteBuffer> keys, int limit) throws Exception
{
SlicePredicate predicate = new SlicePredicate().setSlice_range(new SliceRange(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, limit));
ColumnParent parent = new ColumnParent(cfName);
return client.multiget_slice(keys, parent, predicate, cl);
}
public List<KeySlice> getRangeSlice(ByteBuffer start, ByteBuffer end, int limit) throws Exception
{
SlicePredicate predicate = new SlicePredicate().setSlice_range(new SliceRange(ByteBufferUtil.EMPTY_BYTE_BUFFER, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, limit));
ColumnParent parent = new ColumnParent(cfName);
KeyRange range = new KeyRange(limit).setStart_key(start).setEnd_key(end);
return client.get_range_slices(parent, predicate, range, cl);
}
public List<ColumnOrSuperColumn> getSlice(ByteBuffer key, ByteBuffer start, ByteBuffer end, int limit, boolean isReverse) throws Exception
{
SliceRange sliceRange = new SliceRange().setStart(start).setFinish(end).setReversed(isReverse).setCount(limit);
// initialize SlicePredicate with existing SliceRange
SlicePredicate predicate = new SlicePredicate().setSlice_range(sliceRange);
ColumnParent parent = new ColumnParent(cfName);
return client.get_slice(key, parent, predicate, cl);
}
public ColumnOrSuperColumn get(ByteBuffer key, ByteBuffer column) throws Exception
{
ColumnPath cp = new ColumnPath().setColumn_family(cfName).setColumn(column);
return client.get(key, cp, cl);
}
}
| 7,568 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections/thrift/ThriftConnection.java | package com.netflix.jmeter.connections.thrift;
import java.util.Collections;
import java.util.List;
import org.apache.commons.lang.StringUtils;
import com.google.common.collect.Lists;
import com.netflix.jmeter.properties.Properties;
import com.netflix.jmeter.sampler.Connection;
import com.netflix.jmeter.sampler.Operation;
import com.netflix.jmeter.utils.CClient;
public class ThriftConnection extends Connection
{
public static final ThriftConnection instance = new ThriftConnection();
public final ThreadLocal<CClient> clients = new ThreadLocal<CClient>()
{
@Override
public CClient initialValue()
{
List<String> t = Lists.newArrayList(endpoints);
Collections.shuffle(t);
CClient client = CClient.getClient(t.get(0), port);
try
{
client.set_keyspace(getKeyspaceName());
}
catch (Exception e)
{
throw new RuntimeException(e);
}
return client;
}
};
@Override
public Operation newOperation(String cfName, boolean iscounter)
{
return new ThriftOperation(clients.get(),
Properties.instance.cassandra.getWriteConsistency(),
Properties.instance.cassandra.getReadConsistency(),
cfName,
iscounter);
}
@Override
public String logConnections()
{
return "Nodes in the list: " + StringUtils.join(endpoints, ",");
}
@Override
public void shutdown()
{
// do nothing.
}
}
| 7,569 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections/thrift/ThriftOperation.java | package com.netflix.jmeter.connections.thrift;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.ConsistencyLevel;
import org.apache.cassandra.thrift.CounterColumn;
import org.apache.cassandra.thrift.NotFoundException;
import org.apache.cassandra.utils.Pair;
import com.google.common.collect.Lists;
import com.netflix.astyanax.serializers.AbstractSerializer;
import com.netflix.jmeter.sampler.AbstractSampler.ResponseData;
import com.netflix.jmeter.sampler.Operation;
import com.netflix.jmeter.sampler.OperationException;
import com.netflix.jmeter.utils.CClient;
import com.netflix.jmeter.utils.SystemUtils;
public class ThriftOperation implements Operation
{
private final String cfName;
protected ConsistencyLevel wConsistecy;
protected ConsistencyLevel rConsistecy;
protected AbstractSerializer colser;
protected AbstractSerializer valser;
protected AbstractSerializer kser;
private CClient client;
private boolean isCounter;
public ThriftOperation(CClient client, String writeConsistency, String readConsistency, String cfName, boolean isCounter)
{
this.client = client;
this.wConsistecy = ConsistencyLevel.valueOf(writeConsistency);
this.rConsistecy = ConsistencyLevel.valueOf(readConsistency);
this.cfName = cfName;
this.isCounter = isCounter;
}
@Override
public void serlizers(AbstractSerializer<?> kser, AbstractSerializer<?> colser, AbstractSerializer<?> valser)
{
this.kser = kser;
this.colser = colser;
this.valser = valser;
}
@Override
public ResponseData put(Object key, Object colName, Object value) throws OperationException
{
ByteBuffer rKey = kser.toByteBuffer(key);
ByteBuffer name = colser.toByteBuffer(colName);
try
{
if (isCounter)
{
new Counter(client, wConsistecy, cfName).add(rKey, Lists.newArrayList(new CounterColumn(name, (Long) value)));
}
else
{
ByteBuffer val = valser.toByteBuffer(value);
new Writer(client, wConsistecy, cfName).insert(rKey, name, val);
}
}
catch (Exception e)
{
throw new OperationException(e);
}
return new ResponseData("", 0, client.host, 0, key, colName, value);
}
@Override
public ResponseData batchMutate(Object key, Map<?, ?> nv) throws OperationException
{
ByteBuffer rKey = kser.toByteBuffer(key);
try
{
if (isCounter)
{
Counter counter = new Counter(client, wConsistecy, cfName);
List<CounterColumn> columns = Lists.newArrayList();
for (Map.Entry<?, ?> entity : nv.entrySet())
{
ByteBuffer name = colser.toByteBuffer(entity.getKey());
columns.add(new CounterColumn(name, (Long) entity.getValue()));
}
counter.add(rKey, columns);
}
else
{
Writer writer = new Writer(client, wConsistecy, cfName);
for (Map.Entry<?, ?> entity : nv.entrySet())
{
ByteBuffer name = colser.toByteBuffer(entity.getKey());
ByteBuffer value = valser.toByteBuffer(entity.getValue());
writer.prepareAdd(name, value);
}
writer.insert(rKey);
}
}
catch (Exception e)
{
throw new OperationException(e);
}
return new ResponseData("", 0, client.host, 0, key, nv);
}
@Override
public ResponseData get(Object rkey, Object colName) throws OperationException
{
ByteBuffer rKey = kser.toByteBuffer(rkey);
ByteBuffer name = colser.toByteBuffer(colName);
String response;
int bytes = 0;
try
{
ByteBuffer value = new Reader(client, rConsistecy, cfName).get(rKey, name).getColumn().value;
response = SystemUtils.convertToString(valser, value);
bytes = value.capacity();
}
catch (NotFoundException e)
{
response = ".... Not Found ...";
}
catch (Exception e)
{
throw new OperationException(e);
}
return new ResponseData(response, bytes, client.host, 0, rkey, colName, null);
}
@Override
public ResponseData rangeSlice(Object rKey, Object startColumn, Object endColumn, boolean reversed, int count) throws OperationException
{
ByteBuffer key = kser.toByteBuffer(rKey);
ByteBuffer start = colser.toByteBuffer(startColumn);
ByteBuffer end = colser.toByteBuffer(endColumn);
StringBuffer response = new StringBuffer();
int bytes = 0;
try
{
long s = System.currentTimeMillis();
List<ColumnOrSuperColumn> reader = new Reader(client, rConsistecy, cfName).getSlice(key, start, end, count, reversed);
for (ColumnOrSuperColumn col : reader)
{
byte[] name = col.getColumn().getName();
bytes += name.length;
ByteBuffer value = col.getColumn().value;
bytes += value.capacity();
String valueString = SystemUtils.convertToString(valser, value);
response.append(colser.fromBytes(name).toString()).append(":").append(valueString).append("\n");
}
}
catch (NotFoundException e)
{
response.append(".... Not Found ...");
}
catch (Exception e)
{
throw new OperationException(e);
}
return new ResponseData(response.toString(), bytes, client.host, 0, rKey, Pair.create(startColumn, endColumn), null);
}
@Override
public ResponseData putComposite(String key, String colName, ByteBuffer vbb) throws OperationException
{
// TODO Auto-generated method stub
return null;
}
@Override
public ResponseData batchCompositeMutate(String key, Map<String, ByteBuffer> nv) throws OperationException
{
// TODO Auto-generated method stub
return null;
}
@Override
public ResponseData getComposite(String stringValue, String stringValue2) throws OperationException
{
// TODO Auto-generated method stub
return null;
}
@Override
public ResponseData delete(Object rkey, Object colName) throws OperationException
{
// TODO Auto-generated method stub
return null;
}
}
| 7,570 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections/thrift/Counter.java | package com.netflix.jmeter.connections.thrift;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.cassandra.thrift.Cassandra;
import org.apache.cassandra.thrift.Cassandra.Client;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.ColumnParent;
import org.apache.cassandra.thrift.ConsistencyLevel;
import org.apache.cassandra.thrift.CounterColumn;
import org.apache.cassandra.thrift.Mutation;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SliceRange;
public class Counter
{
private Client client;
private ConsistencyLevel cl;
private final String cfName;
public Counter(Cassandra.Client client, ConsistencyLevel cl, String cfName)
{
this.client = client;
this.cl = cl;
this.cfName = cfName;
}
public void add(ByteBuffer rawKey, List<CounterColumn> columns) throws Exception
{
Map<ByteBuffer, Map<String, List<Mutation>>> record = new HashMap<ByteBuffer, Map<String, List<Mutation>>>();
record.put(rawKey, getColumnsMutationMap(columns));
client.batch_mutate(record, cl);
}
public List<ColumnOrSuperColumn> get(ByteBuffer keyBuffer, ByteBuffer start, ByteBuffer finish, int order) throws Exception
{
SliceRange sliceRange = new SliceRange();
// start/finish
sliceRange.setStart(start).setFinish(finish);
// reversed/count
sliceRange.setReversed(false).setCount(order);
// initialize SlicePredicate with existing SliceRange
SlicePredicate predicate = new SlicePredicate().setSlice_range(sliceRange);
ColumnParent parent = new ColumnParent(cfName);
return client.get_slice(keyBuffer, parent, predicate, cl);
}
private Map<String, List<Mutation>> getColumnsMutationMap(List<CounterColumn> columns)
{
List<Mutation> mutations = new ArrayList<Mutation>();
Map<String, List<Mutation>> mutationMap = new HashMap<String, List<Mutation>>();
for (CounterColumn c : columns)
{
ColumnOrSuperColumn cosc = new ColumnOrSuperColumn().setCounter_column(c);
mutations.add(new Mutation().setColumn_or_supercolumn(cosc));
}
mutationMap.put(cfName, mutations);
return mutationMap;
}
}
| 7,571 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections/a6x/AstyanaxConnection.java | package com.netflix.jmeter.connections.a6x;
import java.io.File;
import java.io.FileReader;
import java.util.Properties;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.astyanax.AstyanaxContext;
import com.netflix.astyanax.AstyanaxContext.Builder;
import com.netflix.astyanax.Keyspace;
import com.netflix.astyanax.connectionpool.ConnectionPoolMonitor;
import com.netflix.astyanax.connectionpool.LatencyScoreStrategy;
import com.netflix.astyanax.connectionpool.NodeDiscoveryType;
import com.netflix.astyanax.connectionpool.impl.ConnectionPoolConfigurationImpl;
import com.netflix.astyanax.connectionpool.impl.ConnectionPoolType;
import com.netflix.astyanax.connectionpool.impl.CountingConnectionPoolMonitor;
import com.netflix.astyanax.impl.AstyanaxConfigurationImpl;
import com.netflix.astyanax.model.ConsistencyLevel;
import com.netflix.astyanax.shallows.EmptyLatencyScoreStrategyImpl;
import com.netflix.astyanax.thrift.ThriftFamilyFactory;
import com.netflix.jmeter.sampler.Connection;
import com.netflix.jmeter.sampler.Operation;
public class AstyanaxConnection extends Connection
{
private static final Logger logger = LoggerFactory.getLogger(AstyanaxConnection.class);
public static final AstyanaxConnection instance = new AstyanaxConnection();
public Properties config = new Properties();
private Keyspace keyspace;
private AstyanaxContext<Keyspace> context;
public Keyspace keyspace()
{
if (keyspace != null)
return keyspace;
synchronized (AstyanaxConnection.class)
{
// double check...
if (keyspace != null)
return keyspace;
try
{
File propFile = new File("cassandra.properties");
if (propFile.exists())
config.load(new FileReader(propFile));
AstyanaxConfigurationImpl configuration = new AstyanaxConfigurationImpl();
configuration.setDiscoveryType(NodeDiscoveryType.valueOf(config.getProperty("astyanax.connection.discovery", "NONE")));
configuration.setConnectionPoolType(ConnectionPoolType.valueOf(config.getProperty("astyanax.connection.pool", "ROUND_ROBIN")));
configuration.setDefaultReadConsistencyLevel(ConsistencyLevel.valueOf(com.netflix.jmeter.properties.Properties.instance.cassandra.getReadConsistency()));
configuration.setDefaultWriteConsistencyLevel(ConsistencyLevel.valueOf(com.netflix.jmeter.properties.Properties.instance.cassandra.getWriteConsistency()));
logger.info("AstyanaxConfiguration: " + configuration.toString());
String property = config.getProperty("astyanax.connection.latency.stategy", "EmptyLatencyScoreStrategyImpl");
LatencyScoreStrategy latencyScoreStrategy = null;
if (property.equalsIgnoreCase("SmaLatencyScoreStrategyImpl"))
{
int updateInterval = Integer.parseInt(config.getProperty("astyanax.connection.latency.stategy.updateInterval", "2000"));
int resetInterval = Integer.parseInt(config.getProperty("astyanax.connection.latency.stategy.resetInterval", "10000"));
int windowSize = Integer.parseInt(config.getProperty("astyanax.connection.latency.stategy.windowSize", "100"));
double badnessThreshold = Double.parseDouble(config.getProperty("astyanax.connection.latency.stategy.badnessThreshold", "0.1"));
// latencyScoreStrategy = new SmaLatencyScoreStrategyImpl(updateInterval, resetInterval, windowSize, badnessThreshold);
}
else
{
latencyScoreStrategy = new EmptyLatencyScoreStrategyImpl();
}
String maxConnection = com.netflix.jmeter.properties.Properties.instance.cassandra.getMaxConnsPerHost();
ConnectionPoolConfigurationImpl poolConfig = new ConnectionPoolConfigurationImpl(getClusterName()).setPort(port);
poolConfig.setMaxConnsPerHost(Integer.parseInt(maxConnection));
poolConfig.setSeeds(StringUtils.join(endpoints, ":" + port + ","));
poolConfig.setLatencyScoreStrategy(latencyScoreStrategy);
logger.info("ConnectionPoolConfiguration: " + poolConfig.toString());
ConnectionPoolMonitor connectionPoolMonitor = new CountingConnectionPoolMonitor();
// set this as field for logging purpose only.
Builder builder = new AstyanaxContext.Builder();
builder.forCluster(getClusterName());
builder.forKeyspace(getKeyspaceName());
builder.withAstyanaxConfiguration(configuration);
builder.withConnectionPoolConfiguration(poolConfig);
builder.withConnectionPoolMonitor(connectionPoolMonitor);
builder.withConnectionPoolMonitor(new CountingConnectionPoolMonitor());
context = builder.buildKeyspace(ThriftFamilyFactory.getInstance());
context.start();
keyspace = context.getEntity();
return keyspace;
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
}
public Operation newOperation(String columnName, boolean isCounter)
{
return new AstyanaxOperation(columnName, isCounter);
}
@Override
public String logConnections()
{
return context == null ? "" : context.getConnectionPoolMonitor().toString();
}
@Override
public void shutdown()
{
if (context == null)
return;
context.shutdown();
context = null;
keyspace = null;
}
}
| 7,572 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/connections/a6x/AstyanaxOperation.java | package com.netflix.jmeter.connections.a6x;
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.apache.cassandra.utils.Pair;
import com.netflix.astyanax.ColumnListMutation;
import com.netflix.astyanax.ColumnMutation;
import com.netflix.astyanax.MutationBatch;
import com.netflix.astyanax.SerializerPackage;
import com.netflix.astyanax.connectionpool.OperationResult;
import com.netflix.astyanax.connectionpool.exceptions.ConnectionException;
import com.netflix.astyanax.connectionpool.exceptions.NotFoundException;
import com.netflix.astyanax.model.Column;
import com.netflix.astyanax.model.ColumnFamily;
import com.netflix.astyanax.model.ColumnList;
import com.netflix.astyanax.serializers.AbstractSerializer;
import com.netflix.astyanax.serializers.ByteBufferSerializer;
import com.netflix.astyanax.serializers.LongSerializer;
import com.netflix.astyanax.util.RangeBuilder;
import com.netflix.jmeter.sampler.AbstractSampler.ResponseData;
import com.netflix.jmeter.sampler.Operation;
import com.netflix.jmeter.sampler.OperationException;
import com.netflix.jmeter.utils.SystemUtils;
public class AstyanaxOperation implements Operation
{
private AbstractSerializer valueSerializer;
private ColumnFamily<Object, Object> cfs;
private AbstractSerializer columnSerializer;
private final String cfName;
private final boolean isCounter;
public class AstyanaxResponseData extends ResponseData
{
public AstyanaxResponseData(String response, int size, OperationResult<?> result)
{
super(response, size, EXECUTED_ON + result != null ? result.getHost().getHostName() : "", result != null ? result.getLatency(TimeUnit.MILLISECONDS) : 0);
}
public AstyanaxResponseData(String response, int size, OperationResult<?> result, Object key, Object cn, Object value)
{
super(response, size, EXECUTED_ON + (result != null ? result.getHost().getHostName() : ""), (result != null ? result.getLatency(TimeUnit.MILLISECONDS) : 0), key, cn, value);
}
public AstyanaxResponseData(String response, int size, OperationResult<?> result, Object key, Map<?, ?> kv)
{
super(response, size, (result == null) ? "" : result.getHost().getHostName(), result != null ? result.getLatency(TimeUnit.MILLISECONDS) : 0, key, kv);
}
}
AstyanaxOperation(String columnName, boolean isCounter)
{
this.cfName = columnName;
this.isCounter = isCounter;
}
@Override
public void serlizers(AbstractSerializer<?> keySerializer, AbstractSerializer<?> columnSerializer, AbstractSerializer<?> valueSerializer)
{
this.cfs = new ColumnFamily(cfName, keySerializer, columnSerializer);
this.columnSerializer = columnSerializer;
this.valueSerializer = valueSerializer;
}
@Override
public ResponseData put(Object key, Object colName, Object value) throws OperationException
{
MutationBatch m = AstyanaxConnection.instance.keyspace().prepareMutationBatch();
if (isCounter)
m.withRow(cfs, key).incrementCounterColumn(colName, (Long) value);
else
m.withRow(cfs, key).putColumn(colName, value, valueSerializer, null);
try
{
OperationResult<Void> result = m.execute();
return new AstyanaxResponseData("", 0, result, key, colName, value);
}
catch (ConnectionException e)
{
throw new OperationException(e);
}
}
@Override
public ResponseData putComposite(String key, String colName, ByteBuffer value) throws OperationException
{
try
{
SerializerPackage sp = AstyanaxConnection.instance.keyspace().getSerializerPackage(cfName, false);
// work around
ByteBuffer rowKey = sp.keyAsByteBuffer(key);
ByteBuffer column = sp.columnAsByteBuffer(colName);
ColumnFamily<ByteBuffer, ByteBuffer> columnFamily = new ColumnFamily(cfName, ByteBufferSerializer.get(), ByteBufferSerializer.get());
ColumnMutation mutation = AstyanaxConnection.instance.keyspace().prepareColumnMutation(columnFamily, rowKey, column);
OperationResult<Void> result;
if (isCounter)
result = mutation.incrementCounterColumn(LongSerializer.get().fromByteBuffer(value)).execute();
else
result = mutation.putValue(value, null).execute();
return new AstyanaxResponseData("", 0, result, key, colName, value);
}
catch (Exception e)
{
throw new OperationException(e);
}
}
@Override
public ResponseData batchCompositeMutate(String key, Map<String, ByteBuffer> nv) throws OperationException
{
// TODO implement
return null;
}
@Override
public ResponseData batchMutate(Object key, Map<?, ?> nv) throws OperationException
{
MutationBatch m = AstyanaxConnection.instance.keyspace().prepareMutationBatch();
ColumnListMutation<Object> cf = m.withRow(cfs, key);
for (Map.Entry<?, ?> entry : nv.entrySet())
{
if (isCounter)
cf.incrementCounterColumn(entry.getKey(), (Long) entry.getValue());
else
cf.putColumn(entry.getKey(), entry.getValue(), valueSerializer, null);
}
try
{
OperationResult<Void> result = m.execute();
return new AstyanaxResponseData("", 0, result, key, nv);
}
catch (ConnectionException e)
{
throw new OperationException(e);
}
}
@Override
public ResponseData get(Object rkey, Object colName) throws OperationException
{
StringBuffer response = new StringBuffer();
int bytes = 0;
OperationResult<Column<Object>> opResult = null;
try
{
opResult = AstyanaxConnection.instance.keyspace().prepareQuery(cfs).getKey(rkey).getColumn(colName).execute();
bytes = opResult.getResult().getRawName().capacity();
bytes += opResult.getResult().getByteBufferValue().capacity();
String value = SystemUtils.convertToString(valueSerializer, opResult.getResult().getByteBufferValue());
response.append(value);
}
catch (NotFoundException ex)
{
// ignore this because nothing is available to show
response.append("...Not found...");
}
catch (ConnectionException e)
{
throw new OperationException(e);
}
return new AstyanaxResponseData(response.toString(), bytes, opResult, rkey, colName, null);
}
@Override
public ResponseData getComposite(String key, String colName) throws OperationException
{
StringBuffer response = new StringBuffer();
int bytes = 0;
OperationResult<Column<ByteBuffer>> opResult = null;
try
{
SerializerPackage sp = AstyanaxConnection.instance.keyspace().getSerializerPackage(cfName, false);
ByteBuffer bbName = sp.columnAsByteBuffer(colName);
ByteBuffer bbKey = sp.keyAsByteBuffer(key);
ColumnFamily<ByteBuffer, ByteBuffer> columnFamily = new ColumnFamily(cfName, ByteBufferSerializer.get(), ByteBufferSerializer.get());
opResult = AstyanaxConnection.instance.keyspace().prepareQuery(columnFamily).getKey(bbKey).getColumn(bbName).execute();
bytes = opResult.getResult().getByteBufferValue().capacity();
bytes += opResult.getResult().getRawName().capacity();
String value = SystemUtils.convertToString(valueSerializer, opResult.getResult().getByteBufferValue());
response.append(value);
}
catch (NotFoundException ex)
{
// ignore this because nothing is available to show
response.append("...Not found...");
}
catch (Exception e)
{
throw new OperationException(e);
}
return new AstyanaxResponseData(response.toString(), bytes, opResult, key, colName, null);
}
@Override
public ResponseData rangeSlice(Object rKey, Object startColumn, Object endColumn, boolean reversed, int count) throws OperationException
{
int bytes = 0;
OperationResult<ColumnList<Object>> opResult = null;
StringBuffer response = new StringBuffer().append("\n");
try
{
RangeBuilder rb = new RangeBuilder().setStart(startColumn, columnSerializer).setEnd(endColumn, columnSerializer).setLimit(count).setReversed(reversed);
opResult = AstyanaxConnection.instance.keyspace().prepareQuery(cfs).getKey(rKey).withColumnRange(rb.build()).execute();
Iterator<?> it = opResult.getResult().iterator();
while (it.hasNext())
{
Column<?> col = (Column<?>) it.next();
String key = SystemUtils.convertToString(columnSerializer, col.getRawName());
bytes += col.getRawName().capacity();
String value = SystemUtils.convertToString(valueSerializer, col.getByteBufferValue());
bytes += col.getByteBufferValue().capacity();
response.append(key).append(":").append(value).append(SystemUtils.NEW_LINE);
}
}
catch (NotFoundException ex)
{
// ignore this because nothing is available to show
response.append("...Not found...");
}
catch (ConnectionException e)
{
throw new OperationException(e);
}
return new AstyanaxResponseData(response.toString(), bytes, opResult, rKey, Pair.create(startColumn, endColumn), null);
}
@Override
public ResponseData delete(Object rkey, Object colName) throws OperationException
{
try
{
OperationResult<Void> opResult = AstyanaxConnection.instance.keyspace().prepareColumnMutation(cfs, rkey, colName).deleteColumn().execute();
return new AstyanaxResponseData("", 0, opResult, rkey, colName, null);
}
catch (ConnectionException e)
{
throw new OperationException(e);
}
}
}
| 7,573 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/properties/CassandraProperties.java | package com.netflix.jmeter.properties;
import java.util.Properties;
import org.apache.jmeter.config.ConfigTestElement;
import org.apache.jmeter.testbeans.TestBean;
public class CassandraProperties extends ConfigTestElement implements TestBean
{
private static final long serialVersionUID = 468255622613306730L;
private static final String Keyspace = "keyspace";
private static final String ClusterName = "clusterName";
private static final String ReadConsistency = "readConsistency";
private static final String WriteConsistency = "writeConsistency";
private static final String CassandraServers = "cassandraServers";
private static final String ClientType = "clientType";
public static final String MaxConnectionsPerHost = "maxConnsPerHost";
public CassandraProperties()
{
com.netflix.jmeter.properties.Properties.instance.cassandra = this;
}
public String prefixPropertyName(String name)
{
return getPropertyAsString(CassandraProperties.ClusterName) + "." + getPropertyAsString(CassandraProperties.Keyspace) + name;
}
public String getClientType()
{
return getPropertyAsString(ClientType);
}
public void setClientType(String clientType)
{
setProperty(ClientType, clientType);
}
public String getKeyspace()
{
return getPropertyAsString(Keyspace);
}
public void setKeyspace(String keyspace)
{
setProperty(Keyspace, keyspace);
}
public String getClusterName()
{
return getPropertyAsString(ClusterName);
}
public void setClusterName(String clusterName)
{
setProperty(ClusterName, clusterName);
}
public String getReadConsistency()
{
return getPropertyAsString(ReadConsistency);
}
public void setReadConsistency(String readConsistency)
{
setProperty(ReadConsistency, readConsistency);
}
public String getWriteConsistency()
{
return getPropertyAsString(WriteConsistency);
}
public void setWriteConsistency(String writeConsistency)
{
setProperty(WriteConsistency, writeConsistency);
}
public String getCassandraServers()
{
return getPropertyAsString(CassandraServers);
}
public void setCassandraServers(String cassandraServers)
{
setProperty(CassandraServers, cassandraServers);
}
public String getMaxConnsPerHost()
{
return getPropertyAsString(MaxConnectionsPerHost);
}
public void setMaxConnsPerHost(String connections)
{
setProperty(MaxConnectionsPerHost, connections);
}
public void addProperties(Properties prop)
{
prop.put("jmeter.cluster", getClusterName());
prop.put("jmeter.keyspace", getKeyspace());
prop.put(prefixPropertyName(".astyanax.writeConsistency"), getWriteConsistency());
prop.put(prefixPropertyName(".astyanax.servers"), getCassandraServers());
prop.put(prefixPropertyName(".astyanax.readConsistency"), getReadConsistency());
prop.put(prefixPropertyName(".astyanax.maxConnsPerHost"), getMaxConnsPerHost());
}
} | 7,574 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/properties/FatclientProperties.java | package com.netflix.jmeter.properties;
import org.apache.jmeter.config.ConfigTestElement;
import org.apache.jmeter.testbeans.TestBean;
public class FatclientProperties extends ConfigTestElement implements TestBean
{
private static final long serialVersionUID = 468255622613306730L;
private static final String seed_provider = "seed_provider";
private static final String dynamic_snitch = "dynamic_snitch";
private static final String endpoint_Snitch = "endpoint_Snitch";
private static final String rpc_timeout_in_ms = "rpc_timeout_in_ms";
private static final String internode_encryption = "internode_encryption";
public FatclientProperties()
{
Properties.instance.fatclient = this;
}
public String getEndpoint_Snitch()
{
return getPropertyAsString(endpoint_Snitch);
}
public void setEndpoint_Snitch(String val)
{
setProperty(endpoint_Snitch, val);
}
public String getSeed_provider()
{
return getPropertyAsString(seed_provider);
}
public void setSeed_provider(String val)
{
setProperty(seed_provider, val);
}
public String getDynamic_snitch()
{
return getPropertyAsString(dynamic_snitch);
}
public void setDynamic_snitch(String val)
{
setProperty(dynamic_snitch, val);
}
public String getRpc_timeout_in_ms()
{
return getPropertyAsString(rpc_timeout_in_ms);
}
public void setRpc_timeout_in_ms(String val)
{
setProperty(rpc_timeout_in_ms, val);
}
public String getInternode_encryption()
{
return getPropertyAsString(internode_encryption);
}
public void setInternode_encryption(String val)
{
setProperty(internode_encryption, val);
}
} | 7,575 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/properties/SchemaProperties.java | package com.netflix.jmeter.properties;
import org.apache.jmeter.config.ConfigTestElement;
import org.apache.jmeter.testbeans.TestBean;
public class SchemaProperties extends ConfigTestElement implements TestBean
{
private static final long serialVersionUID = 468255622613306730L;
private static final String strategy_options = "strategy_options";
private static final String comparator_type = "comparator_type";
private static final String key_validation_class = "key_validation_class";
private static final String default_validation_class = "default_validation_class";
private static final String validator = "validator";
private static final String rows_cached = "rows_cached";
private static final String keys_cached = "keys_cached";
private static final String row_cache_provider = "row_cache_provider";
private static final String read_repair_chance = "read_repair_chance";
private static final String column_family = "column_family";
public SchemaProperties()
{
Properties.instance.addSchema(this);
}
public String getStrategy_options()
{
return getPropertyAsString(strategy_options);
}
public void setStrategy_options(String val)
{
setProperty(strategy_options, val);
}
public String getKey_validation_class()
{
return getPropertyAsString(key_validation_class);
}
public void setKey_validation_class(String val)
{
setProperty(key_validation_class, val);
}
public String getValidator()
{
return getPropertyAsString(validator);
}
public void setValidator(String val)
{
setProperty(validator, val);
}
public String getDefault_validation_class()
{
return getPropertyAsString(default_validation_class);
}
public void setDefault_validation_class(String val)
{
setProperty(default_validation_class, val);
}
public String getKeys_cached()
{
return getPropertyAsString(keys_cached);
}
public void setKeys_cached(String val)
{
setProperty(keys_cached, val);
}
public String getRows_cached()
{
return getPropertyAsString(rows_cached);
}
public void setRows_cached(String val)
{
setProperty(rows_cached, val);
}
public String getRow_cache_provider()
{
return getPropertyAsString(row_cache_provider);
}
public void setRow_cache_provider(String val)
{
setProperty(row_cache_provider, val);
}
public String getRead_repair_chance()
{
return getPropertyAsString(read_repair_chance);
}
public void setRead_repair_chance(String val)
{
setProperty(read_repair_chance, val);
}
public String getComparator_type()
{
return getPropertyAsString(comparator_type);
}
public void setComparator_type(String val)
{
setProperty(comparator_type, val);
}
public String getColumn_family()
{
return getPropertyAsString(column_family);
}
public void setColumn_family(String val)
{
setProperty(column_family, val);
}
}
| 7,576 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/properties/Properties.java | package com.netflix.jmeter.properties;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.jmeter.connections.a6x.AstyanaxConnection;
public class Properties
{
private static final Logger logger = LoggerFactory.getLogger(Properties.class);
public static final Properties instance = new Properties();
public CassandraProperties cassandra;
public FatclientProperties fatclient;
private List<SchemaProperties> schemas = new ArrayList<SchemaProperties>();
public void addSchema(SchemaProperties newProp)
{
schemas.add(newProp);
logger.info("Queing schema change for the cf: {}", newProp);
}
public List<SchemaProperties> getSchemas()
{
return schemas;
}
}
| 7,577 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/sampler/OperationException.java | package com.netflix.jmeter.sampler;
public class OperationException extends Exception
{
private static final long serialVersionUID = 1L;
public OperationException(Exception ex)
{
super(ex);
}
}
| 7,578 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/sampler/GetSampler.java | package com.netflix.jmeter.sampler;
public class GetSampler extends AbstractSampler
{
private static final long serialVersionUID = -2103499609822848595L;
public ResponseData execute() throws OperationException
{
Operation ops = Connection.getInstance().newOperation(getColumnFamily(), false);
setSerializers(ops);
return ops.get(getKey(), getColumnName());
}
}
| 7,579 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/sampler/CompsitePutSampler.java | package com.netflix.jmeter.sampler;
import java.nio.ByteBuffer;
public class CompsitePutSampler extends AbstractSampler
{
private static final long serialVersionUID = 6393722552275749483L;
public static final String VALUE = "VALUE";
public static final String IS_Batch = "IS_Batch";
public static final String IS_Commit = "IS_Commit";
public ResponseData execute() throws OperationException
{
Operation ops = Connection.getInstance().newOperation(getColumnFamily(), isCounter());
setSerializers(ops);
return ops.putComposite(getProperty(KEY).getStringValue(), getProperty(COLUMN_NAME).getStringValue(), getValue());
}
public ByteBuffer getValue()
{
String text = getProperty(VALUE).getStringValue();
return serialier(getVSerializerType()).fromString(text);
}
public void setValue(String text)
{
setProperty(VALUE, text);
}
}
| 7,580 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/sampler/Operation.java | package com.netflix.jmeter.sampler;
import java.nio.ByteBuffer;
import java.util.Map;
import com.netflix.astyanax.serializers.AbstractSerializer;
import com.netflix.jmeter.sampler.AbstractSampler.ResponseData;
public interface Operation
{
void serlizers(AbstractSerializer<?> kser, AbstractSerializer<?> colser, AbstractSerializer<?> valser);
ResponseData put(Object key, Object colName, Object value) throws OperationException;
ResponseData batchMutate(Object key, Map<?, ?> nv) throws OperationException;
ResponseData get(Object rkey, Object colName) throws OperationException;
ResponseData rangeSlice(Object rKey, Object startColumn, Object endColumn, boolean reversed, int count) throws OperationException;
ResponseData putComposite(String key, String colName, ByteBuffer vbb) throws OperationException;
ResponseData batchCompositeMutate(String key, Map<String, ByteBuffer> nv) throws OperationException;
ResponseData getComposite(String stringValue, String stringValue2) throws OperationException;
ResponseData delete(Object rkey, Object colName) throws OperationException;
} | 7,581 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/sampler/CompositeBatchPutSampler.java | package com.netflix.jmeter.sampler;
import java.util.Map;
import com.google.common.collect.Maps;
public class CompositeBatchPutSampler extends AbstractSampler
{
private static final long serialVersionUID = 6393722552275749483L;
public static final String NAME_AND_VALUE = "NAME_AND_VALUE";
public static final String IS_Batch = "IS_Batch";
public ResponseData execute() throws OperationException
{
Operation ops = Connection.getInstance().newOperation(getColumnFamily(), isCounter());
setSerializers(ops);
Map<?, ?> nv = getNameValue();
return ops.batchMutate(getKey(), nv);
}
public Map<?, ?> getNameValue()
{
Map<Object, Object> return_ = Maps.newHashMap();
String text = getProperty(NAME_AND_VALUE).getStringValue();
for (String str : text.split("[\\r\\n]+"))
{
String[] cv = str.split(":", 2);
String cName = cv[0];
String vName = cv[1];
return_.put(convert(cName, getCSerializerType()), convert(vName, getVSerializerType()));
}
return return_;
}
}
| 7,582 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/sampler/PutSampler.java | package com.netflix.jmeter.sampler;
public class PutSampler extends AbstractSampler
{
private static final long serialVersionUID = 6393722552275749483L;
public static final String VALUE = "VALUE";
public static final String IS_Batch = "IS_Batch";
public static final String IS_Commit = "IS_Commit";
public ResponseData execute() throws OperationException
{
Operation ops = Connection.getInstance().newOperation(getColumnFamily(), isCounter());
setSerializers(ops);
return ops.put(getKey(), getColumnName(), getValue());
}
public Object getValue()
{
String text = getProperty(VALUE).getStringValue();
return convert(text, getVSerializerType());
}
public void setValue(String text)
{
setProperty(VALUE, text);
}
}
| 7,583 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/sampler/GetRangeSliceSampler.java | package com.netflix.jmeter.sampler;
public class GetRangeSliceSampler extends AbstractSampler
{
private static final long serialVersionUID = -8566773644299382213L;
public static final String START_COLUMN_NAME = "START_COLUMN_NAME";
public static final String END_COLUMN_NAME = "END_COLUMN_NAME";
public static final String IS_REVERSE = "IS_REVERSE";
public static final String COUNT = "COUNT";
public ResponseData execute() throws OperationException
{
Operation ops = Connection.getInstance().newOperation(getColumnFamily(), false);
setSerializers(ops);
return ops.rangeSlice(getKey(), getStartName(), getEndName(), isReverse(), getCount());
}
public void setStartName(String text)
{
setProperty(START_COLUMN_NAME, text);
}
public void setEndName(String text)
{
setProperty(END_COLUMN_NAME, text);
}
public Object getStartName()
{
String text = getProperty(START_COLUMN_NAME).getStringValue();
return convert(text, getCSerializerType());
}
public Object getEndName()
{
String text = getProperty(END_COLUMN_NAME).getStringValue();
return convert(text, getCSerializerType());
}
public boolean isReverse()
{
return getPropertyAsBoolean(IS_REVERSE);
}
public void setReverse(boolean isReverse)
{
setProperty(IS_REVERSE, isReverse);
}
public void setCount(String text)
{
setProperty(COUNT, text);
}
public int getCount()
{
return getProperty(COUNT).getIntValue();
}
}
| 7,584 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/sampler/BatchPutSampler.java | package com.netflix.jmeter.sampler;
import java.util.Map;
import com.google.common.collect.Maps;
public class BatchPutSampler extends AbstractSampler
{
private static final long serialVersionUID = 6393722552275749483L;
public static final String NAME_AND_VALUE = "NAME_AND_VALUE";
public static final String IS_Batch = "IS_Batch";
public ResponseData execute() throws OperationException
{
Operation ops = Connection.getInstance().newOperation(getColumnFamily(), isCounter());
setSerializers(ops);
Map<?, ?> nv = getNameValue();
return ops.batchMutate(getKey(), nv);
}
public Map<?, ?> getNameValue()
{
Map<Object, Object> return_ = Maps.newHashMap();
String text = getProperty(NAME_AND_VALUE).getStringValue();
for (String str : text.split("[\\r\\n]+"))
{
String[] cv = str.split(":", 2);
String cName = cv[0];
String vName = cv[1];
return_.put(convert(cName, getCSerializerType()), convert(vName, getVSerializerType()));
}
return return_;
}
public void setNameValue(String text)
{
setProperty(NAME_AND_VALUE, text);
}
}
| 7,585 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/sampler/Connection.java | package com.netflix.jmeter.sampler;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import org.apache.cassandra.db.ColumnFamilyType;
import org.apache.cassandra.thrift.TokenRange;
import org.apache.cassandra.utils.FBUtilities;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.Sets;
import com.netflix.jmeter.properties.Properties;
import com.netflix.jmeter.utils.CClient;
import com.netflix.jmeter.utils.Schema;
public abstract class Connection
{
private static final Logger logger = LoggerFactory.getLogger(Connection.class);
public static volatile boolean intialized = false;
public static Connection connection;
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
public volatile Set<String> endpoints;
public int port = 0;
public Connection()
{
// parse the seeds from the property.
parseSeeds();
// schedule the describe ring.
scheduleDescribeRing();
// setup for your use first.
setupKeyspace();
}
/**
* This method will parse the seed property from the test case.
*/
void parseSeeds()
{
Set<String> temp = Sets.newHashSet();
for (String host : Properties.instance.cassandra.getCassandraServers().split(","))
{
String[] hp = host.split(":");
temp.add(hp[0]);
port = Integer.parseInt(hp[1]);
}
assert temp.size() > 0;
endpoints = temp;
}
void setupKeyspace()
{
if (Properties.instance.getSchemas().size() == 0)
return;
for (String host : endpoints)
{
try
{
CClient c = CClient.getClient(host, port);
new Schema(c).createKeyspace();
c.socket.close();
break;
}
catch (Exception unlucky)
{
logger.error("Error talking to the client: ", unlucky);
}
}
}
void scheduleDescribeRing()
{
if (endpoints.size() > 1)
return;
// sleep for 2 Min
executor.schedule(new Runnable()
{
public void run()
{
describeRing();
}
}, 2, TimeUnit.MINUTES);
}
void describeRing()
{
try
{
CClient client = CClient.getClient(endpoints.iterator().next(), port);
client.set_keyspace(Properties.instance.cassandra.getKeyspace());
// get the nodes in the ring.
List<TokenRange> lt = client.describe_ring(getKeyspaceName());
Set<String> temp = Sets.newHashSet();
for (TokenRange range : lt)
temp.addAll(range.endpoints);
endpoints = temp;
// TODO: filter out the nodes in the other region.
client.socket.close();
}
catch (Exception wtf)
{
throw new RuntimeException(wtf);
}
}
public static Connection getInstance()
{
if (connection != null)
return connection;
synchronized (Connection.class)
{
if (connection != null)
return connection;
try
{
connection = FBUtilities.construct(Properties.instance.cassandra.getClientType(), "Creating Connection");
// Log the metrics for troubleshooting.
Thread t = new Thread()
{
@Override
public void run()
{
while (true)
{
try
{
logger.info("ConnectionPoolMonitor: " + connection.logConnections());
Thread.sleep(60 * 1000);
}
catch (InterruptedException wtf)
{
// Ignored.
}
}
}
};
t.setDaemon(true);
t.start();
return connection;
}
catch (Exception e)
{
throw new RuntimeException(e);
}
}
}
public static String getKeyspaceName()
{
return Properties.instance.cassandra.getKeyspace();
}
public static String getClusterName()
{
return Properties.instance.cassandra.getClusterName();
}
public static ColumnFamilyType getColumnFamilyType()
{
return ColumnFamilyType.Standard;
}
public abstract Operation newOperation(String columnName, boolean isCounter);
public abstract String logConnections();
public abstract void shutdown();
}
| 7,586 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/sampler/AbstractSampler.java | package com.netflix.jmeter.sampler;
import java.math.BigInteger;
import java.util.Date;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import org.apache.jmeter.samplers.Entry;
import org.apache.jmeter.samplers.SampleResult;
import com.google.common.collect.Maps;
import com.netflix.astyanax.serializers.AbstractSerializer;
import com.netflix.astyanax.serializers.AsciiSerializer;
import com.netflix.astyanax.serializers.BigIntegerSerializer;
import com.netflix.astyanax.serializers.BooleanSerializer;
import com.netflix.astyanax.serializers.BytesArraySerializer;
import com.netflix.astyanax.serializers.CharSerializer;
import com.netflix.astyanax.serializers.DateSerializer;
import com.netflix.astyanax.serializers.DoubleSerializer;
import com.netflix.astyanax.serializers.FloatSerializer;
import com.netflix.astyanax.serializers.IntegerSerializer;
import com.netflix.astyanax.serializers.LongSerializer;
import com.netflix.astyanax.serializers.ShortSerializer;
import com.netflix.astyanax.serializers.StringSerializer;
import com.netflix.astyanax.serializers.UUIDSerializer;
import com.netflix.jmeter.utils.Hex;
import com.netflix.jmeter.utils.SystemUtils;
public abstract class AbstractSampler extends org.apache.jmeter.samplers.AbstractSampler
{
private static final long serialVersionUID = -8637635942486594464L;
public static final String KEY = "KEY";
public static final String COLUMN_NAME = "COLUMN_NAME";
public static final String KEY_SERIALIZER_TYPE = "KEY_SERIALIZER_TYPE";
public static final String COLUMN_SERIALIZER_TYPE = "COLUMN_SERIALIZER_TYPE";
public static final String VALUE_SERIALIZER_TYPE = "VALUE_SERIALIZER_TYPE";
public static final String COLUMN_FAMILY = "COLUMN_FAMILY";
public static final String IS_COUNTER = "IS_COUNTER";
@SuppressWarnings("rawtypes")
public static Map<String, AbstractSerializer> serializers = Maps.newHashMap();
static
{
serializers.put("StringSerializer", StringSerializer.get());
serializers.put("IntegerSerializer", IntegerSerializer.get());
serializers.put("LongSerializer", LongSerializer.get());
serializers.put("BooleanSerializer", BooleanSerializer.get());
serializers.put("DoubleSerializer", DoubleSerializer.get());
serializers.put("DateSerializer", DateSerializer.get());
serializers.put("FloatSerializer", FloatSerializer.get());
serializers.put("ShortSerializer", ShortSerializer.get());
serializers.put("UUIDSerializer", UUIDSerializer.get());
serializers.put("BigIntegerSerializer", BigIntegerSerializer.get());
serializers.put("CharSerializer", CharSerializer.get());
serializers.put("AsciiSerializer", AsciiSerializer.get());
serializers.put("BytesArraySerializer", BytesArraySerializer.get());
}
public Object convert(String text, String kSerializerType)
{
if (kSerializerType.equals("StringSerializer"))
{
return text;
}
else if (kSerializerType.equals("IntegerSerializer"))
{
return Integer.parseInt(text);
}
else if (kSerializerType.equals("LongSerializer"))
{
return Long.parseLong(text);
}
else if (kSerializerType.equals("BooleanSerializer"))
{
return Boolean.parseBoolean(text);
}
else if (kSerializerType.equals("DoubleSerializer"))
{
return Double.parseDouble(text);
}
else if (kSerializerType.equals("BooleanSerializer"))
{
return Boolean.parseBoolean(text);
}
else if (kSerializerType.equals("DateSerializer"))
{
return Date.parse(text);
}
else if (kSerializerType.equals("FloatSerializer"))
{
return Float.parseFloat(text);
}
else if (kSerializerType.equals("ShortSerializer"))
{
return Short.parseShort(text);
}
else if (kSerializerType.equals("UUIDSerializer"))
{
return UUID.fromString(text);
}
else if (kSerializerType.equals("BigIntegerSerializer"))
{
return new BigInteger(text);
}
else if (kSerializerType.equals("CharSerializer"))
{
// TODO fix it.
return text;
}
else if (kSerializerType.equals("AsciiSerializer"))
{
return text;
}
else if (kSerializerType.equals("BytesArraySerializer"))
{
return Hex.hexToBytes(text);
}
return serializers.get(kSerializerType).fromString(text);
}
public void setColumnName(String text)
{
setProperty(COLUMN_NAME, text);
}
public Object getColumnName()
{
String text = getProperty(COLUMN_NAME).getStringValue();
return convert(text, getCSerializerType());
}
public void setKSerializerType(String text)
{
setProperty(KEY_SERIALIZER_TYPE, text);
}
public void setCSerializerType(String text)
{
setProperty(COLUMN_SERIALIZER_TYPE, text);
}
public void setVSerializerType(String text)
{
setProperty(VALUE_SERIALIZER_TYPE, text);
}
public void setKey(String text)
{
setProperty(KEY, text);
}
public Object getKey()
{
String text = getProperty(KEY).getStringValue();
return convert(text, getKSerializerType());
}
public void setColumnFamily(String text)
{
setProperty(COLUMN_FAMILY, text);
}
public String getColumnFamily()
{
return getProperty(COLUMN_FAMILY).getStringValue();
}
public String getKSerializerType()
{
return getProperty(KEY_SERIALIZER_TYPE).getStringValue();
}
public String getCSerializerType()
{
return getProperty(COLUMN_SERIALIZER_TYPE).getStringValue();
}
public String getVSerializerType()
{
return getProperty(VALUE_SERIALIZER_TYPE).getStringValue();
}
public static AbstractSerializer serialier(String text)
{
return serializers.get(text);
}
public static Set<String> getSerializerNames()
{
return serializers.keySet();
}
public SampleResult sample(Entry e)
{
SampleResult sr = new SampleResult();
sr.setSampleLabel(getName());
sr.sampleStart();
sr.setDataType(SampleResult.TEXT);
long start = sr.currentTimeInMillis();
String message = "ERROR: UNKNOWN";
ResponseData response = null;
try
{
response = execute();
sr.setBytes(response.size);
message = response.responseRecived;
sr.setSuccessful(true);
sr.setResponseCodeOK();
sr.setResponseHeaders(response.requestSent);
}
catch (Exception ex)
{
message = SystemUtils.getStackTrace(ex);
sr.setSuccessful(false);
}
finally
{
sr.setResponseData(message);
long latency = System.currentTimeMillis() - start;
sr.sampleEnd();
sr.setLatency(latency);
/*
* A6x reports the latency via thrift. the following logic will
* figure out the connection pooling time... the following applies
* only for A6x clients. rest will set the latency = 0
*/
if (response != null && response.latency_in_ms != 0)
sr.setIdleTime(latency - response.latency_in_ms);
}
return sr;
}
public abstract ResponseData execute() throws Exception;
public static class ResponseData
{
protected static final String EXECUTED_ON = "Executed on: ";
protected static final String ROW_KEY = "Row Key: ";
protected static final String COLUMN_NAME = "Column Name: ";
protected static final String COLUMN_VALUE = "Column Value: ";
public final String responseRecived;
public final String requestSent;
public final int size;
public final long latency_in_ms;
protected ResponseData(String response, int size, String requestSent, long latency)
{
this.responseRecived = response;
this.size = size;
this.requestSent = requestSent;
this.latency_in_ms = latency;
}
public ResponseData(String response, int size, String request)
{
this(response, size, EXECUTED_ON + request, 0);
}
public ResponseData(String response, int size, String host, long latency, Object key, Object cn, Object value)
{
this(response, size, constructRequest(host, key, cn, value), latency);
}
public ResponseData(String response, int size, String host, long latency, Object key, Map<?, ?> kv)
{
this(response, size, constructRequest(host, key, kv), latency);
}
private static String constructRequest(String host, Object key, Object cn, Object value)
{
StringBuffer buff = new StringBuffer();
appendHostAndRowKey(buff, host, key);
appendKeyValue(buff, cn, value);
return buff.toString();
}
private static String constructRequest(String host, Object key, Map<?, ?> nv)
{
StringBuffer buff = new StringBuffer();
appendHostAndRowKey(buff, host, key);
for (Map.Entry<?, ?> entry : nv.entrySet())
appendKeyValue(buff, entry.getKey(), entry.getValue());
return buff.toString();
}
private static void appendHostAndRowKey(StringBuffer buff, String host, Object key)
{
buff.append(EXECUTED_ON).append(host).append(SystemUtils.NEW_LINE);
buff.append(ROW_KEY).append(key).append(SystemUtils.NEW_LINE);
}
private static void appendKeyValue(StringBuffer buff, Object cn, Object value)
{
if (cn != null)
buff.append(COLUMN_NAME).append(null == cn ? "" :cn).append(SystemUtils.NEW_LINE);
if (value != null)
buff.append(COLUMN_VALUE).append(null == value ? "" :value).append(SystemUtils.NEW_LINE);
}
}
public void setSerializers(Operation ops)
{
AbstractSerializer<?> vser = serialier(getVSerializerType());
AbstractSerializer<?> kser = serialier(getKSerializerType());
AbstractSerializer<?> cser = serialier(getCSerializerType());
ops.serlizers(kser, cser, vser);
}
public void setCounter(boolean selected)
{
setProperty(IS_COUNTER, selected);
}
public boolean isCounter()
{
return getPropertyAsBoolean(IS_COUNTER);
}
}
| 7,587 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/sampler/CompositGetSampler.java | package com.netflix.jmeter.sampler;
public class CompositGetSampler extends AbstractSampler
{
private static final long serialVersionUID = -2103499609822848595L;
public ResponseData execute() throws OperationException
{
Operation ops = Connection.getInstance().newOperation(getColumnFamily(), false);
setSerializers(ops);
return ops.getComposite(getProperty(KEY).getStringValue(), getProperty(COLUMN_NAME).getStringValue());
}
}
| 7,588 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/sampler/DeleteSampler.java | package com.netflix.jmeter.sampler;
public class DeleteSampler extends AbstractSampler
{
private static final long serialVersionUID = -2103499609822848595L;
public ResponseData execute() throws OperationException
{
Operation ops = Connection.getInstance().newOperation(getColumnFamily(), false);
setSerializers(ops);
return ops.delete(getKey(), getColumnName());
}
}
| 7,589 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/gui/CompositPut.java | package com.netflix.jmeter.gui;
import java.awt.GridBagConstraints;
import javax.swing.JCheckBox;
import javax.swing.JComboBox;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JTextField;
import org.apache.jmeter.testelement.TestElement;
import com.netflix.jmeter.sampler.AbstractSampler;
import com.netflix.jmeter.sampler.CompsitePutSampler;
public class CompositPut extends AbstractGUI
{
private static final long serialVersionUID = 3197090412869386190L;
private static final String LABEL = "Cassandra Composite Put";
private JTextField CNAME;
private JTextField VALUE;
private JComboBox VSERIALIZER;
private JCheckBox IS_COUNTER;
@Override
public void configure(TestElement element)
{
super.configure(element);
CNAME.setText(element.getPropertyAsString(CompsitePutSampler.COLUMN_NAME));
VALUE.setText(element.getPropertyAsString(CompsitePutSampler.VALUE));
VSERIALIZER.setSelectedItem(element.getPropertyAsString(CompsitePutSampler.VALUE_SERIALIZER_TYPE));
IS_COUNTER.setSelected(element.getPropertyAsBoolean(CompsitePutSampler.IS_COUNTER));
}
public TestElement createTestElement()
{
CompsitePutSampler sampler = new CompsitePutSampler();
modifyTestElement(sampler);
sampler.setComment("test comment");
return sampler;
}
public void modifyTestElement(TestElement sampler)
{
super.configureTestElement(sampler);
if (sampler instanceof CompsitePutSampler)
{
CompsitePutSampler gSampler = (CompsitePutSampler) sampler;
gSampler.setVSerializerType((String) VSERIALIZER.getSelectedItem());
gSampler.setColumnName(CNAME.getText());
gSampler.setValue(VALUE.getText());
gSampler.setCounter(IS_COUNTER.isSelected());
}
}
public void initFields()
{
CNAME.setText("${__Random(1,1000)}");
VALUE.setText("${__Random(1,1000)}");
VSERIALIZER.setSelectedItem("Value Serializer");
IS_COUNTER.setSelected(false);
}
@Override
public void init(JPanel mainPanel, GridBagConstraints labelConstraints, GridBagConstraints editConstraints)
{
addToPanel(mainPanel, labelConstraints, 0, 3, new JLabel("Column Name: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 3, CNAME = new JTextField());
addToPanel(mainPanel, labelConstraints, 0, 4, new JLabel("Column Value: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 4, VALUE = new JTextField());
addToPanel(mainPanel, labelConstraints, 0, 6, new JLabel("Value Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 6, VSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
addToPanel(mainPanel, labelConstraints, 0, 7, new JLabel("Counter: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 7, IS_COUNTER = new JCheckBox());
}
@Override
public String getLable()
{
return LABEL;
}
}
| 7,590 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/gui/GetRangeSlice.java | package com.netflix.jmeter.gui;
import java.awt.GridBagConstraints;
import javax.swing.JCheckBox;
import javax.swing.JComboBox;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JTextField;
import org.apache.jmeter.testelement.TestElement;
import com.netflix.jmeter.sampler.AbstractSampler;
import com.netflix.jmeter.sampler.GetRangeSliceSampler;
public class GetRangeSlice extends AbstractGUI
{
private static final long serialVersionUID = 3197090412869386190L;
private static final String LABEL = "Cassandra Get Range Slice";
private JTextField START_COLUMN_NAME;
private JTextField END_COLUMN_NAME;
private JTextField COUNT;
private JCheckBox IS_REVERSE;
private JComboBox CSERIALIZER;
private JComboBox VSERIALIZER;
@Override
public void configure(TestElement element)
{
super.configure(element);
START_COLUMN_NAME.setText(element.getPropertyAsString(GetRangeSliceSampler.START_COLUMN_NAME));
END_COLUMN_NAME.setText(element.getPropertyAsString(GetRangeSliceSampler.END_COLUMN_NAME));
COUNT.setText(element.getPropertyAsString(GetRangeSliceSampler.COUNT));
IS_REVERSE.setSelected(element.getPropertyAsBoolean(GetRangeSliceSampler.IS_REVERSE));
CSERIALIZER.setSelectedItem(element.getPropertyAsString(GetRangeSliceSampler.COLUMN_SERIALIZER_TYPE));
VSERIALIZER.setSelectedItem(element.getPropertyAsString(GetRangeSliceSampler.VALUE_SERIALIZER_TYPE));
}
public TestElement createTestElement()
{
GetRangeSliceSampler sampler = new GetRangeSliceSampler();
modifyTestElement(sampler);
sampler.setComment("test comment");
return sampler;
}
public void modifyTestElement(TestElement sampler)
{
super.configureTestElement(sampler);
if (sampler instanceof GetRangeSliceSampler)
{
GetRangeSliceSampler gSampler = (GetRangeSliceSampler) sampler;
gSampler.setCSerializerType((String) CSERIALIZER.getSelectedItem());
gSampler.setVSerializerType((String) VSERIALIZER.getSelectedItem());
gSampler.setStartName(START_COLUMN_NAME.getText());
gSampler.setEndName(END_COLUMN_NAME.getText());
gSampler.setCount(COUNT.getText());
gSampler.setReverse(IS_REVERSE.isSelected());
}
}
public void initFields()
{
START_COLUMN_NAME.setText("${__Random(1,1000)}");
END_COLUMN_NAME.setText("${__Random(1,1000)}");
COUNT.setText("100");
IS_REVERSE.setSelected(true);
CSERIALIZER.setSelectedItem("StringSerializer");
VSERIALIZER.setSelectedItem("StringSerializer");
}
@Override
public void init(JPanel mainPanel, GridBagConstraints labelConstraints, GridBagConstraints editConstraints)
{
addToPanel(mainPanel, labelConstraints, 0, 3, new JLabel("Start Column Name: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 3, START_COLUMN_NAME = new JTextField());
addToPanel(mainPanel, labelConstraints, 0, 4, new JLabel("End Column Name: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 4, END_COLUMN_NAME = new JTextField());
addToPanel(mainPanel, labelConstraints, 0, 5, new JLabel("Count: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 5, COUNT = new JTextField());
addToPanel(mainPanel, labelConstraints, 0, 6, new JLabel("Reverse: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 6, IS_REVERSE = new JCheckBox());
addToPanel(mainPanel, labelConstraints, 0, 7, new JLabel("Column Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 7, CSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
addToPanel(mainPanel, labelConstraints, 0, 8, new JLabel("Value Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 8, VSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
}
@Override
public String getLable()
{
return LABEL;
}
}
| 7,591 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/gui/CompositGet.java | package com.netflix.jmeter.gui;
import java.awt.GridBagConstraints;
import javax.swing.JComboBox;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JTextField;
import org.apache.jmeter.testelement.TestElement;
import com.netflix.jmeter.sampler.AbstractSampler;
import com.netflix.jmeter.sampler.CompositGetSampler;
public class CompositGet extends AbstractGUI
{
private static final long serialVersionUID = 3197090412869386190L;
public static String LABEL = "Cassandra Composite Get";
private JTextField CNAME;
private JComboBox VSERIALIZER;
@Override
public void configure(TestElement element)
{
super.configure(element);
CNAME.setText(element.getPropertyAsString(CompositGetSampler.COLUMN_NAME));
VSERIALIZER.setSelectedItem(element.getPropertyAsString(CompositGetSampler.VALUE_SERIALIZER_TYPE));
}
public TestElement createTestElement()
{
CompositGetSampler sampler = new CompositGetSampler();
modifyTestElement(sampler);
sampler.setComment("test comment");
return sampler;
}
public void modifyTestElement(TestElement sampler)
{
super.configureTestElement(sampler);
if (sampler instanceof CompositGetSampler)
{
CompositGetSampler gSampler = (CompositGetSampler) sampler;
gSampler.setVSerializerType((String) VSERIALIZER.getSelectedItem());
gSampler.setColumnName(CNAME.getText());
}
}
public void initFields()
{
CNAME.setText("${__Random(1,1000)}:${__Random(1,1000)}");
VSERIALIZER.setSelectedItem("StringSerializer");
}
@Override
public void init(JPanel mainPanel, GridBagConstraints labelConstraints, GridBagConstraints editConstraints)
{
addToPanel(mainPanel, labelConstraints, 0, 3, new JLabel("Column Name: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 3, CNAME = new JTextField());
addToPanel(mainPanel, labelConstraints, 0, 5, new JLabel("Value Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 5, VSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
}
@Override
public String getLable()
{
return LABEL;
}
}
| 7,592 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/gui/AbstractGUI.java | package com.netflix.jmeter.gui;
import java.awt.BorderLayout;
import java.awt.Color;
import java.awt.Component;
import java.awt.Container;
import java.awt.Cursor;
import java.awt.Font;
import java.awt.GridBagConstraints;
import java.awt.GridBagLayout;
import javax.swing.BorderFactory;
import javax.swing.JComboBox;
import javax.swing.JComponent;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JTextField;
import javax.swing.border.Border;
import org.apache.jmeter.samplers.gui.AbstractSamplerGui;
import org.apache.jmeter.testelement.TestElement;
import com.netflix.jmeter.sampler.AbstractSampler;
import com.netflix.jmeter.sampler.Connection;
public abstract class AbstractGUI extends AbstractSamplerGui
{
private static final long serialVersionUID = -1372154378991423872L;
private static final String WIKI = "https://github.com/Netflix/CassJMeter";
private JTextField KEY;
private JComboBox KSERIALIZER;
private JTextField COLUMN_FAMILY;
public AbstractGUI()
{
setLayout(new BorderLayout(0, 5));
setBorder(makeBorder());
add(addHelpLinkToPanel(makeTitlePanel(), WIKI), BorderLayout.NORTH);
JPanel mainPanel = new JPanel(new GridBagLayout());
GridBagConstraints labelConstraints = new GridBagConstraints();
labelConstraints.anchor = GridBagConstraints.FIRST_LINE_END;
GridBagConstraints editConstraints = new GridBagConstraints();
editConstraints.anchor = GridBagConstraints.FIRST_LINE_START;
editConstraints.weightx = 1.0;
editConstraints.fill = GridBagConstraints.HORIZONTAL;
addToPanel(mainPanel, labelConstraints, 0, 1, new JLabel("Column Family: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 1, COLUMN_FAMILY = new JTextField());
addToPanel(mainPanel, labelConstraints, 0, 2, new JLabel("Row Key: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 2, KEY = new JTextField());
init(mainPanel, labelConstraints, editConstraints);
addToPanel(mainPanel, labelConstraints, 0, 10, new JLabel("Key Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 10, KSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
JPanel container = new JPanel(new BorderLayout());
container.add(mainPanel, BorderLayout.NORTH);
add(container, BorderLayout.CENTER);
}
@Override
public void clearGui()
{
super.clearGui();
KEY.setText("${__Random(1,1000)}");
KSERIALIZER.setSelectedItem("StringSerializer");
COLUMN_FAMILY.setText("Standard3");
initFields();
if (Connection.connection != null)
{
Connection.getInstance().shutdown();
}
}
@Override
public void configure(TestElement element)
{
super.configure(element);
KEY.setText(element.getPropertyAsString(AbstractSampler.KEY));
KSERIALIZER.setSelectedItem(element.getPropertyAsString(AbstractSampler.KEY_SERIALIZER_TYPE));
COLUMN_FAMILY.setText(element.getPropertyAsString(AbstractSampler.COLUMN_FAMILY));
}
protected void configureTestElement(TestElement mc)
{
super.configureTestElement(mc);
if (mc instanceof AbstractSampler)
{
AbstractSampler gSampler = (AbstractSampler) mc;
gSampler.setKSerializerType((String) KSERIALIZER.getSelectedItem());
gSampler.setKey(KEY.getText());
gSampler.setColumnFamily(COLUMN_FAMILY.getText());
}
}
public static Component addHelpLinkToPanel(Container panel, String helpPage)
{
if (!java.awt.Desktop.isDesktopSupported())
return panel;
JLabel icon = new JLabel();
JLabel link = new JLabel("Help on this plugin");
link.setForeground(Color.blue);
link.setFont(link.getFont().deriveFont(Font.PLAIN));
link.setCursor(new Cursor(Cursor.HAND_CURSOR));
Border border = BorderFactory.createMatteBorder(0, 0, 1, 0, java.awt.Color.blue);
link.setBorder(border);
JLabel version = new JLabel("v" + 123);
version.setFont(version.getFont().deriveFont(Font.PLAIN).deriveFont(11F));
version.setForeground(Color.GRAY);
JPanel panelLink = new JPanel(new GridBagLayout());
GridBagConstraints gridBagConstraints;
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.gridx = 0;
gridBagConstraints.gridy = 0;
gridBagConstraints.insets = new java.awt.Insets(0, 1, 0, 0);
panelLink.add(icon, gridBagConstraints);
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.gridx = 1;
gridBagConstraints.gridy = 0;
gridBagConstraints.anchor = java.awt.GridBagConstraints.WEST;
gridBagConstraints.weightx = 1.0;
gridBagConstraints.insets = new java.awt.Insets(0, 2, 3, 0);
panelLink.add(link, gridBagConstraints);
gridBagConstraints = new java.awt.GridBagConstraints();
gridBagConstraints.gridx = 2;
gridBagConstraints.gridy = 0;
gridBagConstraints.insets = new java.awt.Insets(0, 0, 0, 4);
panelLink.add(version, gridBagConstraints);
panel.add(panelLink);
return panel;
}
public void addToPanel(JPanel panel, GridBagConstraints constraints, int col, int row, JComponent component)
{
constraints.gridx = col;
constraints.gridy = row;
panel.add(component, constraints);
}
@Override
public String getStaticLabel()
{
return getLable();
}
@Override
public String getLabelResource()
{
return getLable();
}
public abstract String getLable();
public abstract void initFields();
public abstract void init(JPanel mainPanel, GridBagConstraints labelConstraints, GridBagConstraints editConstraints);
}
| 7,593 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/gui/BatchPut.java | package com.netflix.jmeter.gui;
import java.awt.GridBagConstraints;
import javax.swing.JCheckBox;
import javax.swing.JComboBox;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JTextArea;
import javax.swing.border.BevelBorder;
import org.apache.jmeter.testelement.TestElement;
import com.netflix.jmeter.sampler.AbstractSampler;
import com.netflix.jmeter.sampler.BatchPutSampler;
public class BatchPut extends AbstractGUI
{
private static final long serialVersionUID = 3197090412869386190L;
public static String LABEL = "Cassandra Batch Put";
private JTextArea NAME_AND_VALUE;
private JComboBox CSERIALIZER;
private JComboBox VSERIALIZER;
private JCheckBox IS_COUNTER;
@Override
public void configure(TestElement element)
{
super.configure(element);
NAME_AND_VALUE.setText(element.getPropertyAsString(BatchPutSampler.NAME_AND_VALUE));
CSERIALIZER.setSelectedItem(element.getPropertyAsString(BatchPutSampler.COLUMN_SERIALIZER_TYPE));
VSERIALIZER.setSelectedItem(element.getPropertyAsString(BatchPutSampler.VALUE_SERIALIZER_TYPE));
IS_COUNTER.setSelected(element.getPropertyAsBoolean(BatchPutSampler.IS_COUNTER));
}
public TestElement createTestElement()
{
BatchPutSampler sampler = new BatchPutSampler();
modifyTestElement(sampler);
sampler.setComment("test comment");
return sampler;
}
public void modifyTestElement(TestElement sampler)
{
super.configureTestElement(sampler);
if (sampler instanceof BatchPutSampler)
{
BatchPutSampler gSampler = (BatchPutSampler) sampler;
gSampler.setCSerializerType((String) CSERIALIZER.getSelectedItem());
gSampler.setVSerializerType((String) VSERIALIZER.getSelectedItem());
gSampler.setNameValue(NAME_AND_VALUE.getText());
gSampler.setCounter(IS_COUNTER.isSelected());
}
}
public void initFields()
{
NAME_AND_VALUE.setText("${__Random(1,1000)}:${__Random(1,1000)}\n${__Random(1,1000)}:${__Random(1,1000)}");
CSERIALIZER.setSelectedItem("Column Serializer");
VSERIALIZER.setSelectedItem("Value Serializer");
IS_COUNTER.setSelected(false);
}
public void init(JPanel mainPanel, GridBagConstraints labelConstraints, GridBagConstraints editConstraints)
{
addToPanel(mainPanel, labelConstraints, 0, 3, new JLabel("Column K/V(eg: Name:Value): ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 3, NAME_AND_VALUE = new JTextArea());
NAME_AND_VALUE.setRows(10);
NAME_AND_VALUE.setBorder(new BevelBorder(BevelBorder.LOWERED));
addToPanel(mainPanel, labelConstraints, 0, 4, new JLabel("Column Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 4, CSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
addToPanel(mainPanel, labelConstraints, 0, 5, new JLabel("Value Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 5, VSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
addToPanel(mainPanel, labelConstraints, 0, 6, new JLabel("Counter: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 6, IS_COUNTER = new JCheckBox());
}
@Override
public String getLable()
{
return LABEL;
}
}
| 7,594 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/gui/Put.java | package com.netflix.jmeter.gui;
import java.awt.GridBagConstraints;
import javax.swing.JCheckBox;
import javax.swing.JComboBox;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JTextField;
import org.apache.jmeter.testelement.TestElement;
import com.netflix.jmeter.sampler.AbstractSampler;
import com.netflix.jmeter.sampler.PutSampler;
public class Put extends AbstractGUI
{
private static final long serialVersionUID = 3197090412869386190L;
private static final String LABEL = "Cassandra Put";
private JTextField CNAME;
private JTextField VALUE;
private JComboBox CSERIALIZER;
private JComboBox VSERIALIZER;
private JCheckBox IS_COUNTER;
@Override
public void configure(TestElement element)
{
super.configure(element);
CNAME.setText(element.getPropertyAsString(PutSampler.COLUMN_NAME));
VALUE.setText(element.getPropertyAsString(PutSampler.VALUE));
CSERIALIZER.setSelectedItem(element.getPropertyAsString(PutSampler.COLUMN_SERIALIZER_TYPE));
VSERIALIZER.setSelectedItem(element.getPropertyAsString(PutSampler.VALUE_SERIALIZER_TYPE));
IS_COUNTER.setSelected(element.getPropertyAsBoolean(PutSampler.IS_COUNTER));
}
public TestElement createTestElement()
{
PutSampler sampler = new PutSampler();
modifyTestElement(sampler);
sampler.setComment("test comment");
return sampler;
}
public void modifyTestElement(TestElement sampler)
{
super.configureTestElement(sampler);
if (sampler instanceof PutSampler)
{
PutSampler gSampler = (PutSampler) sampler;
gSampler.setCSerializerType((String) CSERIALIZER.getSelectedItem());
gSampler.setVSerializerType((String) VSERIALIZER.getSelectedItem());
gSampler.setColumnName(CNAME.getText());
gSampler.setValue(VALUE.getText());
gSampler.setCounter(IS_COUNTER.isSelected());
}
}
public void initFields()
{
CNAME.setText("${__Random(1,1000)}");
VALUE.setText("${__Random(1,1000)}");
CSERIALIZER.setSelectedItem("Column Serializer");
VSERIALIZER.setSelectedItem("Value Serializer");
}
@Override
public void init(JPanel mainPanel, GridBagConstraints labelConstraints, GridBagConstraints editConstraints)
{
addToPanel(mainPanel, labelConstraints, 0, 3, new JLabel("Column Name: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 3, CNAME = new JTextField());
addToPanel(mainPanel, labelConstraints, 0, 4, new JLabel("Column Value: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 4, VALUE = new JTextField());
addToPanel(mainPanel, labelConstraints, 0, 5, new JLabel("Column Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 5, CSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
addToPanel(mainPanel, labelConstraints, 0, 6, new JLabel("Value Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 6, VSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
addToPanel(mainPanel, labelConstraints, 0, 7, new JLabel("Counter: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 7, IS_COUNTER = new JCheckBox());
}
@Override
public String getLable()
{
return LABEL;
}
}
| 7,595 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/gui/Delete.java | package com.netflix.jmeter.gui;
import java.awt.GridBagConstraints;
import javax.swing.JComboBox;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JTextField;
import org.apache.jmeter.testelement.TestElement;
import com.netflix.jmeter.sampler.AbstractSampler;
import com.netflix.jmeter.sampler.DeleteSampler;
public class Delete extends AbstractGUI
{
private static final long serialVersionUID = 3197090412869386190L;
public static String LABEL = "Cassandra Delete";
private JTextField CNAME;
private JComboBox CSERIALIZER;
private JComboBox VSERIALIZER;
@Override
public void configure(TestElement element)
{
super.configure(element);
CNAME.setText(element.getPropertyAsString(DeleteSampler.COLUMN_NAME));
CSERIALIZER.setSelectedItem(element.getPropertyAsString(DeleteSampler.COLUMN_SERIALIZER_TYPE));
VSERIALIZER.setSelectedItem(element.getPropertyAsString(DeleteSampler.VALUE_SERIALIZER_TYPE));
}
public TestElement createTestElement()
{
DeleteSampler sampler = new DeleteSampler();
modifyTestElement(sampler);
sampler.setComment("test comment");
return sampler;
}
public void modifyTestElement(TestElement sampler)
{
super.configureTestElement(sampler);
if (sampler instanceof DeleteSampler)
{
DeleteSampler gSampler = (DeleteSampler) sampler;
gSampler.setCSerializerType((String) CSERIALIZER.getSelectedItem());
gSampler.setVSerializerType((String) VSERIALIZER.getSelectedItem());
gSampler.setColumnName(CNAME.getText());
}
}
public void initFields()
{
CNAME.setText("${__Random(1,1000)}");
CSERIALIZER.setSelectedItem("StringSerializer");
VSERIALIZER.setSelectedItem("StringSerializer");
}
@Override
public void init(JPanel mainPanel, GridBagConstraints labelConstraints, GridBagConstraints editConstraints)
{
addToPanel(mainPanel, labelConstraints, 0, 3, new JLabel("Column Name: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 3, CNAME = new JTextField());
addToPanel(mainPanel, labelConstraints, 0, 4, new JLabel("Column Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 4, CSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
addToPanel(mainPanel, labelConstraints, 0, 5, new JLabel("Value Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 5, VSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
}
@Override
public String getLable()
{
return LABEL;
}
}
| 7,596 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/gui/Get.java | package com.netflix.jmeter.gui;
import java.awt.GridBagConstraints;
import javax.swing.JComboBox;
import javax.swing.JLabel;
import javax.swing.JPanel;
import javax.swing.JTextField;
import org.apache.jmeter.testelement.TestElement;
import com.netflix.jmeter.sampler.AbstractSampler;
import com.netflix.jmeter.sampler.GetSampler;
public class Get extends AbstractGUI
{
private static final long serialVersionUID = 3197090412869386190L;
public static String LABEL = "Cassandra Get";
private JTextField CNAME;
private JComboBox CSERIALIZER;
private JComboBox VSERIALIZER;
@Override
public void configure(TestElement element)
{
super.configure(element);
CNAME.setText(element.getPropertyAsString(GetSampler.COLUMN_NAME));
CSERIALIZER.setSelectedItem(element.getPropertyAsString(GetSampler.COLUMN_SERIALIZER_TYPE));
VSERIALIZER.setSelectedItem(element.getPropertyAsString(GetSampler.VALUE_SERIALIZER_TYPE));
}
public TestElement createTestElement()
{
GetSampler sampler = new GetSampler();
modifyTestElement(sampler);
sampler.setComment("test comment");
return sampler;
}
public void modifyTestElement(TestElement sampler)
{
super.configureTestElement(sampler);
if (sampler instanceof GetSampler)
{
GetSampler gSampler = (GetSampler) sampler;
gSampler.setCSerializerType((String) CSERIALIZER.getSelectedItem());
gSampler.setVSerializerType((String) VSERIALIZER.getSelectedItem());
gSampler.setColumnName(CNAME.getText());
}
}
public void initFields()
{
CNAME.setText("${__Random(1,1000)}");
CSERIALIZER.setSelectedItem("StringSerializer");
VSERIALIZER.setSelectedItem("StringSerializer");
}
@Override
public void init(JPanel mainPanel, GridBagConstraints labelConstraints, GridBagConstraints editConstraints)
{
addToPanel(mainPanel, labelConstraints, 0, 3, new JLabel("Column Name: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 3, CNAME = new JTextField());
addToPanel(mainPanel, labelConstraints, 0, 4, new JLabel("Column Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 4, CSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
addToPanel(mainPanel, labelConstraints, 0, 5, new JLabel("Value Serializer: ", JLabel.RIGHT));
addToPanel(mainPanel, editConstraints, 1, 5, VSERIALIZER = new JComboBox(AbstractSampler.getSerializerNames().toArray()));
}
@Override
public String getLable()
{
return LABEL;
}
}
| 7,597 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/report/ServoSummariser.java | package com.netflix.jmeter.report;
import java.io.File;
import java.util.concurrent.TimeUnit;
import com.netflix.servo.monitor.Monitors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.servo.annotations.DataSourceType;
import com.netflix.servo.annotations.Monitor;
import com.netflix.servo.publish.BasicMetricFilter;
import com.netflix.servo.publish.CounterToRateMetricTransform;
import com.netflix.servo.publish.FileMetricObserver;
import com.netflix.servo.publish.MetricObserver;
import com.netflix.servo.publish.MonitorRegistryMetricPoller;
import com.netflix.servo.publish.PollRunnable;
import com.netflix.servo.publish.PollScheduler;
public class ServoSummariser extends AbstractSummariser
{
private static final long serialVersionUID = 6638743483539533164L;
private static final Logger logger = LoggerFactory.getLogger(ServoSummariser.class);
private static boolean initalized = false;
@Override
protected void initializePlatform()
{
if (initalized)
return;
try
{
PollScheduler scheduler = PollScheduler.getInstance();
scheduler.start();
MetricObserver fileObserver = new FileMetricObserver("stats", new File("."));
MetricObserver transform = new CounterToRateMetricTransform(fileObserver, 2, TimeUnit.MINUTES);
PollRunnable task = new PollRunnable(new MonitorRegistryMetricPoller(), BasicMetricFilter.MATCH_ALL, transform);
scheduler.addPoller(task, 1, TimeUnit.MINUTES);
}
catch (Throwable e)
{
// dont do anything... just eat.
logger.error("Epic Plugin was not intialized: ", e);
}
initalized = true;
}
@Override
protected AbstractRunningSampleWrapper newRunningSampleWrapper(String label)
{
return new ServoRunningSampleWrapper(label);
}
public static class ServoRunningSampleWrapper extends AbstractRunningSampleWrapper
{
public final String name;
public ServoRunningSampleWrapper(String name)
{
super(name);
this.name = ("JMeter_" + name).replace(" ", "_");
}
@Monitor(name = "ErrorPercentage", type = DataSourceType.GAUGE)
public double getErrorPercentage()
{
return previous.getErrorPercentage();
}
@Monitor(name = "SampleCount", type = DataSourceType.GAUGE)
public int getCount()
{
return previous.getCount();
}
@Monitor(name = "Rate", type = DataSourceType.GAUGE)
public double getRate()
{
return previous.getRate();
}
@Monitor(name = "Mean", type = DataSourceType.GAUGE)
public double getMean()
{
return previous.getMean();
}
@Monitor(name = "Min", type = DataSourceType.GAUGE)
public long getMin()
{
return previous.getMin();
}
@Monitor(name = "Max", type = DataSourceType.GAUGE)
public long getMax()
{
return previous.getMax();
}
@Monitor(name = "TotalBytes", type = DataSourceType.GAUGE)
public long getTotalBytes()
{
return previous.getTotalBytes();
}
@Monitor(name = "StandardDeviation", type = DataSourceType.GAUGE)
public double getStandardDeviation()
{
return previous.getStandardDeviation();
}
@Monitor(name = "AvgPageBytes", type = DataSourceType.GAUGE)
public double getAvgPageBytes()
{
return previous.getAvgPageBytes();
}
@Monitor(name = "BytesPerSecond", type = DataSourceType.GAUGE)
public double getBytesPerSecond()
{
return previous.getBytesPerSecond();
}
@Monitor(name = "KBPerSecond", type = DataSourceType.GAUGE)
public double getKBPerSecond()
{
return previous.getKBPerSecond();
}
@Override
public void start()
{
Monitors.registerObject(name, this);
}
@Override
public void shutdown()
{
Monitors.unregisterObject(name, this);
}
}
}
| 7,598 |
0 | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter | Create_ds/CassJMeter/src/main/java/com/netflix/jmeter/report/AbstractSummariser.java | package com.netflix.jmeter.report;
import java.io.Serializable;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.jmeter.engine.event.LoopIterationEvent;
import org.apache.jmeter.engine.util.NoThreadClone;
import org.apache.jmeter.samplers.Remoteable;
import org.apache.jmeter.samplers.SampleEvent;
import org.apache.jmeter.samplers.SampleListener;
import org.apache.jmeter.samplers.SampleResult;
import org.apache.jmeter.testelement.AbstractTestElement;
import org.apache.jmeter.testelement.TestListener;
import org.apache.jmeter.util.Calculator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class AbstractSummariser extends AbstractTestElement implements Serializable, SampleListener, TestListener, NoThreadClone, Remoteable
{
private static final long serialVersionUID = 3089085300897902045L;
private static final Logger logger = LoggerFactory.getLogger(AbstractSummariser.class);
private static final ConcurrentHashMap<String, AbstractRunningSampleWrapper> allTests = new ConcurrentHashMap<String, AbstractRunningSampleWrapper>();
private static final long INTERVAL = 60 * 1000; // Every Minute
private static boolean initalized = false;
public AbstractSummariser()
{
super();
initializePlatform();
}
protected abstract void initializePlatform();
protected abstract AbstractRunningSampleWrapper newRunningSampleWrapper(String label);
public static abstract class AbstractRunningSampleWrapper
{
protected volatile Calculator delta;
protected volatile Calculator previous;
protected volatile long totalUpdated = 0;
private String name;
public AbstractRunningSampleWrapper(String name)
{
this.name = name;
this.delta = new Calculator(name);
logHeader();
}
public void moveDelta()
{
previous = delta;
delta = new Calculator(name);
}
public abstract void start();
public abstract void shutdown();
public void logHeader()
{
StringBuffer buff = new StringBuffer();
buff.append("Name").append(", ");
buff.append("Count").append(", ");
buff.append("Rate").append(", ");
buff.append("Min").append(", ");
buff.append("Max").append(", ");
buff.append("Mean").append(", ");
buff.append("TotalBytes").append(", ");
buff.append("StandardDeviation").append(", ");
buff.append("ErrorPercentage").append(", ");
buff.append("AvgPageBytes").append(", ");
buff.append("BytesPerSecond").append(", ");
buff.append("KBPerSecond").append(", ");
logger.info(buff.toString());
}
public void log()
{
StringBuffer buff = new StringBuffer();
buff.append(name).append(", ");
buff.append(previous.getCount()).append(", ");
buff.append(previous.getRate()).append(", ");
buff.append(previous.getMin()).append(", ");
buff.append(previous.getMax()).append(", ");
buff.append(previous.getMean()).append(", ");
buff.append(previous.getTotalBytes()).append(", ");
buff.append(previous.getStandardDeviation()).append(", ");
buff.append(previous.getErrorPercentage()).append(", ");
buff.append(previous.getAvgPageBytes()).append(", ");
buff.append(previous.getBytesPerSecond()).append(", ");
buff.append(previous.getKBPerSecond()).append(", ");
logger.info(buff.toString());
}
}
public void sampleOccurred(SampleEvent e)
{
if (e.getResult() == null || e.getResult() == null)
return;
SampleResult s = e.getResult();
long now = System.currentTimeMillis();// in seconds
AbstractRunningSampleWrapper totals;
synchronized (allTests)
{
String label = s.getSampleLabel();
if ((totals = allTests.get(label)) == null)
{
totals = newRunningSampleWrapper(label);
totals.start();
allTests.put(label, totals);
}
}
synchronized (totals)
{
totals.delta.addSample(s);
if ((now > totals.totalUpdated + INTERVAL))
{
totals.moveDelta();
totals.totalUpdated = now;
totals.log();
}
}
}
@Override
public void sampleStarted(SampleEvent e)
{
}
@Override
public void sampleStopped(SampleEvent e)
{
}
@Override
public void testStarted()
{
testStarted("local");
}
@Override
public void testEnded()
{
testEnded("local");
}
@Override
public void testStarted(String host)
{
allTests.clear();
}
public void testEnded(String host)
{
for (AbstractRunningSampleWrapper wrapper : allTests.values())
{
wrapper.log();
wrapper.shutdown();
}
allTests.clear();
}
public void testIterationStart(LoopIterationEvent event)
{
}
} | 7,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.