index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/resultset/GremlinResultSetGetColumns.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.resultset;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetColumns;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Gremlin ResultSet class for getColumns.
*/
public class GremlinResultSetGetColumns extends ResultSetGetColumns implements java.sql.ResultSet {
private static final Map<String, Class<?>> COLUMN_TYPE_MAP = new HashMap<>();
static {
COLUMN_TYPE_MAP.put("TABLE_CAT", String.class);
COLUMN_TYPE_MAP.put("TABLE_SCHEM", String.class);
COLUMN_TYPE_MAP.put("TABLE_NAME", String.class);
COLUMN_TYPE_MAP.put("COLUMN_NAME", String.class);
COLUMN_TYPE_MAP.put("DATA_TYPE", Integer.class);
COLUMN_TYPE_MAP.put("TYPE_NAME", String.class);
COLUMN_TYPE_MAP.put("COLUMN_SIZE", Integer.class);
COLUMN_TYPE_MAP.put("BUFFER_LENGTH", Integer.class);
COLUMN_TYPE_MAP.put("DECIMAL_DIGITS", Integer.class);
COLUMN_TYPE_MAP.put("NUM_PREC_RADIX", Integer.class);
COLUMN_TYPE_MAP.put("NULLABLE", Integer.class);
COLUMN_TYPE_MAP.put("REMARKS", String.class);
COLUMN_TYPE_MAP.put("COLUMN_DEF", String.class);
COLUMN_TYPE_MAP.put("SQL_DATA_TYPE", Integer.class);
COLUMN_TYPE_MAP.put("SQL_DATETIME_SUB", Integer.class);
COLUMN_TYPE_MAP.put("CHAR_OCTET_LENGTH", Integer.class);
COLUMN_TYPE_MAP.put("ORDINAL_POSITION", Integer.class);
COLUMN_TYPE_MAP.put("IS_NULLABLE", String.class);
COLUMN_TYPE_MAP.put("SCOPE_CATALOG", String.class);
COLUMN_TYPE_MAP.put("SCOPE_SCHEMA", String.class);
COLUMN_TYPE_MAP.put("SCOPE_TABLE", String.class);
COLUMN_TYPE_MAP.put("SOURCE_DATA_TYPE", Integer.class);
COLUMN_TYPE_MAP.put("IS_AUTOINCREMENT", String.class);
COLUMN_TYPE_MAP.put("IS_GENERATEDCOLUMN", String.class);
}
/**
* OpenCypherResultSetGetColumns constructor, initializes super class.
*
* @param statement Statement Object.
* @param gremlinSchema GremlinSchema Object.
* @param resultSetInfoWithoutRows ResultSetInfoWithoutRows Object.
*/
public GremlinResultSetGetColumns(final Statement statement,
final GremlinSchema gremlinSchema,
final ResultSetInfoWithoutRows resultSetInfoWithoutRows)
throws SQLException {
super(statement, gremlinSchema, resultSetInfoWithoutRows);
}
@Override
protected ResultSetMetaData getResultMetadata() {
final List<String> orderedColumns = getColumns();
final List<Class<?>> rowTypes = new ArrayList<>();
for (final String column : orderedColumns) {
rowTypes.add(COLUMN_TYPE_MAP.get(column));
}
return new GremlinResultSetMetadata(orderedColumns, rowTypes);
}
}
| 7,400 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/resultset/GremlinResultSetGetCatalogs.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.resultset;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetCatalogs;
import java.sql.ResultSetMetaData;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
/**
* Gremlin ResultSet class for getCatalogs.
*/
public class GremlinResultSetGetCatalogs extends ResultSetGetCatalogs {
/**
* Constructor for GremlinResultSetGetCatalogs.
*
* @param statement Statement Object.
*/
public GremlinResultSetGetCatalogs(final Statement statement) {
super(statement);
}
@Override
protected ResultSetMetaData getResultMetadata() {
final List<Class<?>> rowTypes = new ArrayList<>();
for (int i = 0; i < getColumns().size(); i++) {
rowTypes.add(String.class);
}
return new GremlinResultSetMetadata(getColumns(), rowTypes);
}
}
| 7,401 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/resultset/GremlinResultSet.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.resultset;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.gremlin.GremlinTypeMapping;
import software.aws.neptune.jdbc.ResultSet;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Gremlin ResultSet class.
*/
public class GremlinResultSet extends ResultSet implements java.sql.ResultSet {
private static final Logger LOGGER = LoggerFactory.getLogger(GremlinResultSet.class);
private final List<String> columns;
private final List<Map<String, Object>> rows;
private final Map<String, Class<?>> columnTypes;
private boolean wasNull = false;
/**
* GremlinResultSet constructor, initializes super class.
*
* @param statement Statement Object.
* @param resultSetInfo ResultSetInfoWithRows Object.
*/
public GremlinResultSet(final java.sql.Statement statement, final ResultSetInfoWithRows resultSetInfo) {
super(statement, resultSetInfo.getColumns(), resultSetInfo.getRows().size());
this.columns = resultSetInfo.getColumns();
this.rows = resultSetInfo.getRows();
this.columnTypes = resultSetInfo.getColumnsTypes();
}
/**
* GremlinResultSet constructor, initializes super class.
*
* @param statement Statement Object.
* @param resultSetInfo ResultSetInfoWithoutRows Object.
*/
public GremlinResultSet(final java.sql.Statement statement, final ResultSetInfoWithoutRows resultSetInfo) {
super(statement, resultSetInfo.getColumns(), resultSetInfo.getRowCount());
this.columns = resultSetInfo.getColumns();
this.columnTypes = new HashMap<>();
this.rows = null;
}
@Override
protected void doClose() throws SQLException {
}
@Override
public boolean wasNull() throws SQLException {
return wasNull;
}
@Override
protected ResultSetMetaData getResultMetadata() throws SQLException {
final List<Class<?>> rowTypes = new ArrayList<>();
for (final String column : columns) {
rowTypes.add(columnTypes.getOrDefault(column, String.class));
}
return new GremlinResultSetMetadata(columns, rowTypes);
}
protected Object getConvertedValue(final int columnIndex) throws SQLException {
final Object value = getValue(columnIndex);
return (value == null) || GremlinTypeMapping.checkContains(value.getClass())
? value
: value.toString();
}
private Object getValue(final int columnIndex) throws SQLException {
verifyOpen();
if (rows == null) {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.UNSUPPORTED_RESULT_SET_TYPE);
}
validateRowColumn(columnIndex);
final String colName = columns.get(columnIndex - 1);
final Map<String, Object> row = rows.get(getRowIndex());
final Object value = row.getOrDefault(colName, null);
wasNull = (value == null);
return value;
}
@Override
public Object getObject(final int columnIndex, final Map<String, Class<?>> map) throws SQLException {
LOGGER.trace("Getting column {} as an Object using provided Map.", columnIndex);
final Object value = getValue(columnIndex);
return getObject(columnIndex, map.get(GremlinTypeMapping.getJDBCType(value.getClass()).name()));
}
@AllArgsConstructor
@Getter
public static class ResultSetInfoWithRows {
private final List<Map<String, Object>> rows;
private final Map<String, Class<?>> columnsTypes;
private final List<String> columns;
}
}
| 7,402 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/resultset/GremlinResultSetGetSchemas.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.resultset;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetSchemas;
import java.sql.ResultSetMetaData;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
/**
* Gremlin ResultSet class for getSchemas.
*/
public class GremlinResultSetGetSchemas extends ResultSetGetSchemas {
/**
* Constructor for GremlinResultSetGetSchemas.
*
* @param statement Statement Object.
*/
public GremlinResultSetGetSchemas(final Statement statement) {
super(statement);
}
@Override
protected ResultSetMetaData getResultMetadata() {
final List<Class<?>> rowTypes = new ArrayList<>();
for (int i = 0; i < getColumns().size(); i++) {
rowTypes.add(String.class);
}
return new GremlinResultSetMetadata(getColumns(), rowTypes);
}
}
| 7,403 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/resultset/GremlinResultSetGetTables.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.resultset;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetTables;
import java.sql.ResultSetMetaData;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Gremlin ResultSet class for getTables.
*/
public class GremlinResultSetGetTables extends ResultSetGetTables implements java.sql.ResultSet {
private static final Map<String, Class<?>> COLUMN_TYPE_MAP = new HashMap<>();
// TODO getTables() JavaDoc description has less properties listed, should this reflect that?
static {
// TODO AN-577 move this stuff to common.
COLUMN_TYPE_MAP.put("TABLE_CAT", String.class);
COLUMN_TYPE_MAP.put("TABLE_SCHEM", String.class);
COLUMN_TYPE_MAP.put("TABLE_NAME", String.class);
COLUMN_TYPE_MAP.put("TABLE_TYPE", String.class);
COLUMN_TYPE_MAP.put("REMARKS", String.class);
COLUMN_TYPE_MAP.put("TYPE_CAT", String.class);
COLUMN_TYPE_MAP.put("TYPE_SCHEM", String.class);
COLUMN_TYPE_MAP.put("TYPE_NAME", String.class);
COLUMN_TYPE_MAP.put("SELF_REFERENCING_COL_NAME", String.class);
COLUMN_TYPE_MAP.put("REF_GENERATION", String.class);
}
/**
* OpenCypherResultSetGetColumns constructor, initializes super class.
*
* @param statement Statement Object.
* @param gremlinSchema GremlinSchema Object.
* @param resultSetInfoWithoutRows ResultSetInfoWithoutRows Object.
*/
public GremlinResultSetGetTables(final Statement statement,
final GremlinSchema gremlinSchema,
final ResultSetInfoWithoutRows resultSetInfoWithoutRows) {
super(statement, gremlinSchema, resultSetInfoWithoutRows);
}
@Override
protected ResultSetMetaData getResultMetadata() {
final List<String> orderedColumns = getColumns();
final List<Class<?>> rowTypes = new ArrayList<>();
for (final String orderedColumn : orderedColumns) {
rowTypes.add(COLUMN_TYPE_MAP.get(orderedColumn));
}
return new GremlinResultSetMetadata(orderedColumns, rowTypes);
}
}
| 7,404 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/resultset/GremlinResultSetGetTypeInfo.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.resultset;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetTypeInfo;
import java.sql.Statement;
import java.sql.Types;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class GremlinResultSetGetTypeInfo extends ResultSetGetTypeInfo {
private static final List<Map<String, Object>> TYPE_INFO = new ArrayList<>();
static {
// The order added to TYPE_INFO matters
putInfo(TYPE_INFO, "Boolean", Types.BIT, false, false);
putInfo(TYPE_INFO, "Byte", Types.TINYINT, false, true);
putInfo(TYPE_INFO, "Long", Types.BIGINT, false, true);
putInfo(TYPE_INFO, "Integer", Types.INTEGER, false, true);
putInfo(TYPE_INFO, "Short", Types.SMALLINT, false, true);
putInfo(TYPE_INFO, "Float", Types.REAL, false, true);
putInfo(TYPE_INFO, "Double", Types.DOUBLE, false, true);
putInfo(TYPE_INFO, "String", Types.VARCHAR, true, false);
putInfo(TYPE_INFO, "byte[]", Types.VARCHAR, false, false);
putInfo(TYPE_INFO, "sql.Date", Types.DATE, false, false);
putInfo(TYPE_INFO, "util.Date", Types.DATE, false, false);
putInfo(TYPE_INFO, "Time", Types.TIME, false, false);
putInfo(TYPE_INFO, "Timestamp", Types.TIMESTAMP, false, false);
populateConstants(TYPE_INFO);
}
/**
* GremlinResultSetGetTypeInfo constructor, initializes super class.
*
* @param statement Statement Object.
*/
public GremlinResultSetGetTypeInfo(final Statement statement) {
super(statement, new ArrayList<>(TYPE_INFO));
}
}
| 7,405 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/resultset/GremlinResultSetGetTableTypes.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.resultset;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetTableTypes;
import java.sql.ResultSetMetaData;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
/**
* Gremlin ResultSet class for getTableTypes.
*/
public class GremlinResultSetGetTableTypes extends ResultSetGetTableTypes {
/**
* Constructor for GremlinResultSetGetTableTypes.
*
* @param statement Statement Object.
*/
public GremlinResultSetGetTableTypes(final Statement statement) {
super(statement);
}
@Override
protected ResultSetMetaData getResultMetadata() {
final List<Class<?>> rowTypes = new ArrayList<>();
for (int i = 0; i < getColumns().size(); i++) {
rowTypes.add(String.class);
}
return new GremlinResultSetMetadata(getColumns(), rowTypes);
}
}
| 7,406 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/resultset/GremlinResultSetMetadata.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.resultset;
import software.aws.neptune.gremlin.GremlinTypeMapping;
import software.aws.neptune.jdbc.ResultSetMetaData;
import java.sql.SQLException;
import java.util.List;
/**
* Gremlin implementation of ResultSetMetadata.
*/
public class GremlinResultSetMetadata extends ResultSetMetaData
implements java.sql.ResultSetMetaData {
private final List<Class<?>> columnTypes;
/**
* GremlinResultSetMetadata constructor.
*
* @param columns List of column names.
* @param columnTypes List of column types.
*/
public GremlinResultSetMetadata(final List<String> columns, final List<Class<?>> columnTypes) {
super(columns);
this.columnTypes = columnTypes;
}
/**
* Get Gremlin type of a given column.
*
* @param column the 1-based column index.
* @return Bolt Type Object for column.
*/
protected Class<?> getColumnGremlinType(final int column) {
// TODO: Loop rows to find common type and cache it.
return columnTypes.get(column - 1);
}
@Override
public int getColumnType(final int column) throws SQLException {
verifyColumnIndex(column);
return GremlinTypeMapping.getJDBCType(getColumnGremlinType(column)).getJdbcCode();
}
@Override
public String getColumnTypeName(final int column) throws SQLException {
verifyColumnIndex(column);
return getColumnGremlinType(column).getName();
}
@Override
public String getColumnClassName(final int column) throws SQLException {
verifyColumnIndex(column);
return getColumnGremlinType(column).getName();
}
}
| 7,407 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/SqlMetadata.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter;
import lombok.Getter;
import org.apache.calcite.sql.SqlAggFunction;
import org.apache.calcite.sql.SqlCall;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlNodeList;
import org.apache.calcite.sql.SqlOperator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinEdgeTable;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinProperty;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinTableBase;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinVertexTable;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import static software.aws.neptune.gremlin.adapter.util.SqlGremlinError.UNRECOGNIZED_TYPE;
/**
* This module contains traversal and query metadata used by the adapter.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
@Getter
public class SqlMetadata {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlMetadata.class);
private final GremlinSchema gremlinSchema;
private final Map<String, String> tableRenameMap = new HashMap<>();
private final Map<String, String> columnRenameMap = new HashMap<>();
private final Map<String, List<String>> columnOutputListMap = new HashMap<>();
// maps the aggregated columns to type
private final Map<String, String> aggregateTypeMap = new HashMap<>();
private boolean isAggregate = false;
private boolean isGrouped = false;
private boolean doneFilters = false;
public SqlMetadata(final GremlinSchema gremlinSchema) {
this.gremlinSchema = gremlinSchema;
}
private static boolean isAggregate(final SqlNode sqlNode) {
if (sqlNode instanceof SqlCall) {
final SqlCall sqlCall = (SqlCall) sqlNode;
if (isAggregate(sqlCall.getOperator())) {
return true;
}
for (final SqlNode tmpSqlNode : sqlCall.getOperandList()) {
if (isAggregate(tmpSqlNode)) {
return true;
}
}
}
return false;
}
private static boolean isAggregate(final SqlOperator sqlOperator) {
return sqlOperator instanceof SqlAggFunction;
}
public boolean getIsProjectFoldRequired() {
// Grouping invokes an implicit fold before the project that does not require additional unfolding.
// Folding is required for aggregates that are not grouped.
return getIsAggregate() && !getIsGrouped();
}
public void setIsDoneFilters(final boolean value) {
doneFilters = value;
}
public boolean getIsGrouped() {
return isGrouped;
}
public boolean getIsColumnEdge(final String tableName, final String columnName) throws SQLException {
return getGremlinTable(tableName).getIsVertex() &&
(columnName.endsWith(GremlinTableBase.IN_ID) || columnName.endsWith(GremlinTableBase.OUT_ID));
}
public String getColumnEdgeLabel(final String column) throws SQLException {
final String columnName = getRenamedColumn(column);
final GremlinTableBase gremlinTableBase;
if (columnName.endsWith(GremlinTableBase.IN_ID)) {
gremlinTableBase = getGremlinTable(column.substring(0, column.length() - GremlinTableBase.IN_ID.length()));
} else if (columnName.endsWith(GremlinTableBase.OUT_ID)) {
gremlinTableBase = getGremlinTable(column.substring(0, column.length() - GremlinTableBase.OUT_ID.length()));
} else {
throw SqlGremlinError.create(SqlGremlinError.EDGE_LABEL_END_MISMATCH, GremlinTableBase.IN_ID,
GremlinTableBase.OUT_ID);
}
if (gremlinTableBase.getIsVertex()) {
throw SqlGremlinError.create(SqlGremlinError.EDGE_EXPECTED);
}
return gremlinTableBase.getLabel();
}
public boolean isLeftInRightOut(final String leftVertexLabel, final String rightVertexLabel) {
for (final GremlinVertexTable leftVertexTable : gremlinSchema.getVertices()) {
for (final GremlinVertexTable rightVertexTable : gremlinSchema.getVertices()) {
if (leftVertexTable.hasInEdge(leftVertexLabel) && rightVertexTable.hasOutEdge(rightVertexLabel)) {
return true;
}
}
}
return false;
}
public boolean isRightInLeftOut(final String leftVertexLabel, final String rightVertexLabel) {
for (final GremlinVertexTable leftVertexTable : gremlinSchema.getVertices()) {
for (final GremlinVertexTable rightVertexTable : gremlinSchema.getVertices()) {
if (leftVertexTable.hasOutEdge(leftVertexLabel) && rightVertexTable.hasInEdge(rightVertexLabel)) {
return true;
}
}
}
return false;
}
public Set<String> getRenamedColumns() {
return new HashSet<>(columnRenameMap.keySet());
}
public void setColumnOutputList(final String table, final List<String> columnOutputList) {
columnOutputListMap.put(table, new ArrayList<>(columnOutputList));
}
public Set<GremlinTableBase> getTables() throws SQLException {
final Set<GremlinTableBase> tables = new HashSet<>();
for (final String table : tableRenameMap.values()) {
tables.add(getGremlinTable(table));
}
return tables;
}
public boolean isVertex(final String table) throws SQLException {
final String renamedTableName = getRenamedTable(table);
for (final GremlinVertexTable gremlinVertexTable : gremlinSchema.getVertices()) {
if (gremlinVertexTable.getLabel().equalsIgnoreCase(renamedTableName)) {
return true;
}
}
for (final GremlinEdgeTable gremlinEdgeTable : gremlinSchema.getEdges()) {
if (gremlinEdgeTable.getLabel().equalsIgnoreCase(renamedTableName)) {
return false;
}
}
throw SqlGremlinError.create(SqlGremlinError.TABLE_DOES_NOT_EXIST, renamedTableName);
}
public GremlinTableBase getGremlinTable(final String table) throws SQLException {
final String renamedTableName = getRenamedTable(table);
for (final GremlinTableBase gremlinTableBase : gremlinSchema.getAllTables()) {
if (gremlinTableBase.getLabel().equalsIgnoreCase(renamedTableName)) {
return gremlinTableBase;
}
}
throw SqlGremlinError.create(SqlGremlinError.TABLE_DOES_NOT_EXIST, renamedTableName);
}
public void addRenamedTable(final String actualName, final String renameName) {
tableRenameMap.put(renameName, actualName);
}
public String getRenamedTable(final String table) {
return tableRenameMap.getOrDefault(table, table);
}
public void addRenamedColumn(final String actualName, final String renameName) {
columnRenameMap.put(renameName, actualName);
}
public String getRenamedColumn(final String column) {
return columnRenameMap.getOrDefault(column, column);
}
public boolean aggregateTypeExists(final String column) {
return aggregateTypeMap.containsKey(column);
}
public String getRenameFromActual(final String actual) {
final Optional<Map.Entry<String, String>>
rename = tableRenameMap.entrySet().stream().filter(t -> t.getValue().equals(actual)).findFirst();
if (rename.isPresent()) {
return rename.get().getKey();
}
return actual;
}
public String getActualColumnName(final GremlinTableBase table, final String column) throws SQLException {
if (table.hasColumn(column)) {
return table.getColumn(column).getName();
} else if (columnRenameMap.containsKey(column)) {
return table.getColumn(getRenamedColumn(column)).getName();
}
final Optional<String> actualName = columnRenameMap.entrySet().stream().
filter(entry -> entry.getValue().equals(column)).map(Map.Entry::getKey).findFirst();
return table.getColumn(actualName.orElse(column)).getName();
}
public boolean getTableHasColumn(final GremlinTableBase table, final String column) {
final String actualColumnName = getRenamedColumn(column);
return table.hasColumn(actualColumnName);
}
public String getActualTableName(final String table) throws SQLException {
final String renamedTableName = getRenamedTable(table);
for (final GremlinVertexTable gremlinVertexTable : gremlinSchema.getVertices()) {
if (gremlinVertexTable.getLabel().equalsIgnoreCase(renamedTableName)) {
return gremlinVertexTable.getLabel();
}
}
for (final GremlinEdgeTable gremlinEdgeTable : gremlinSchema.getEdges()) {
if (gremlinEdgeTable.getLabel().equalsIgnoreCase(renamedTableName)) {
return gremlinEdgeTable.getLabel();
}
}
throw SqlGremlinError.create(SqlGremlinError.ERROR_TABLE, table);
}
public void checkAggregate(final SqlNodeList sqlNodeList) {
isAggregate = sqlNodeList.getList().stream().anyMatch(SqlMetadata::isAggregate);
}
public void checkGroupByNodeIsNull(final SqlNode sqlNode) {
isGrouped = sqlNode != null;
}
public boolean getIsAggregate() {
return isAggregate;
}
public GremlinProperty getGremlinProperty(final String table, final String column) throws SQLException {
final String actualColumnName = getActualColumnName(getGremlinTable(table), column);
return getGremlinTable(table).getColumn(actualColumnName);
}
public void addOutputType(final String outputName, final String colType) {
aggregateTypeMap.put(outputName, colType);
}
public String getOutputType(final String outputName, final String colType) {
return aggregateTypeMap.getOrDefault(outputName, colType);
}
public String getType(final String column) throws SQLException {
final List<GremlinTableBase> gremlinTableBases = new ArrayList<>();
for (final String table : getColumnOutputListMap().keySet()) {
gremlinTableBases.add(getGremlinTable(table));
}
if (aggregateTypeExists(column)) {
return getOutputType(column, "string");
}
String renamedColumn = getRenamedColumn(column);
if (!aggregateTypeExists(renamedColumn)) {
// Sometimes columns are double renamed.
renamedColumn = getRenamedColumn(renamedColumn);
for (final GremlinTableBase gremlinTableBase : gremlinTableBases) {
if (getTableHasColumn(gremlinTableBase, renamedColumn)) {
return getGremlinProperty(gremlinTableBase.getLabel(), renamedColumn).getType();
}
}
}
return getOutputType(renamedColumn, "string");
}
public Object getDefaultCoalesceValue(final String column) throws SQLException {
final String type = getType(column);
switch (getType(column)) {
case "string":
return "";
case "boolean":
return false;
case "byte":
return Byte.MAX_VALUE;
case "short":
return Short.MAX_VALUE;
case "integer":
return Integer.MAX_VALUE;
case "long":
return Long.MAX_VALUE;
case "float":
return Float.MAX_VALUE;
case "double":
return Double.MAX_VALUE;
case "date":
return Date.from(Instant.EPOCH);
}
throw SqlGremlinError.create(UNRECOGNIZED_TYPE, type);
}
}
| 7,408 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/SqlConverter.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter;
import com.google.common.collect.ImmutableList;
import lombok.Getter;
import org.apache.calcite.avatica.util.Quoting;
import org.apache.calcite.config.Lex;
import org.apache.calcite.plan.ConventionTraitDef;
import org.apache.calcite.plan.RelTraitDef;
import org.apache.calcite.rel.RelCollationTraitDef;
import org.apache.calcite.schema.SchemaPlus;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlSelect;
import org.apache.calcite.sql.parser.SqlParser;
import org.apache.calcite.tools.FrameworkConfig;
import org.apache.calcite.tools.Frameworks;
import org.apache.calcite.tools.Planner;
import org.apache.calcite.tools.Program;
import org.apache.calcite.tools.Programs;
import org.apache.tinkerpop.gremlin.process.traversal.translator.GroovyTranslator;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlFactory;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.select.GremlinSqlSelect;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.gremlin.adapter.results.SqlGremlinQueryResult;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.List;
/**
* This module is the entry point of the SqlGremlin conversion.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public class SqlConverter {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlConverter.class);
private static final List<RelTraitDef> TRAIT_DEFS =
ImmutableList.of(ConventionTraitDef.INSTANCE, RelCollationTraitDef.INSTANCE);
private static final SqlParser.Config PARSER_CONFIG =
SqlParser.config().withLex(Lex.MYSQL).withQuoting(Quoting.DOUBLE_QUOTE);
private static final Program PROGRAM =
Programs.sequence(Programs.ofRules(Programs.RULE_SET), Programs.CALC_PROGRAM);
private final FrameworkConfig frameworkConfig;
private final GremlinSchema gremlinSchema;
public SqlConverter(final GremlinSchema gremlinSchema) {
this.gremlinSchema = gremlinSchema;
final SchemaPlus rootSchema = Frameworks.createRootSchema(true);
this.frameworkConfig = Frameworks.newConfigBuilder()
.parserConfig(PARSER_CONFIG)
.defaultSchema(rootSchema.add("gremlin", gremlinSchema))
.traitDefs(TRAIT_DEFS)
.programs(PROGRAM)
.build();
}
private GremlinSqlSelect getSelect(final GraphTraversalSource g, final String query) throws SQLException {
GremlinSqlFactory.setSqlMetadata(new SqlMetadata(gremlinSchema));
final QueryPlanner queryPlanner = new QueryPlanner(frameworkConfig);
queryPlanner.plan(query);
final SqlNode sqlNode = queryPlanner.getValidate();
if (sqlNode instanceof SqlSelect) {
return GremlinSqlFactory.createSelect((SqlSelect) sqlNode, g);
} else {
throw SqlGremlinError.createNotSupported(SqlGremlinError.SQL_SELECT_ONLY);
}
}
public SqlGremlinQueryResult executeQuery(final GraphTraversalSource g, final String query) throws SQLException {
return getSelect(g, query).executeTraversal();
}
private GraphTraversal<?, ?> getGraphTraversal(GraphTraversalSource g, final String query) throws SQLException {
return getSelect(g, query).generateTraversal();
}
public String getStringTraversal(final GraphTraversalSource g, final String query) throws SQLException {
return GroovyTranslator.of("g").translate(getGraphTraversal(g, query).asAdmin().getBytecode()).toString();
}
@Getter
private static class QueryPlanner {
private final Planner planner;
private SqlNode validate;
QueryPlanner(final FrameworkConfig frameworkConfig) {
this.planner = Frameworks.getPlanner(frameworkConfig);
}
public void plan(final String sql) throws SQLException {
try {
validate = planner.validate(planner.parse(sql));
} catch (final Exception e) {
throw SqlGremlinError.create(SqlGremlinError.PARSE_ERROR, e, sql);
}
}
}
}
| 7,409 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/SqlTraversalEngine.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands.GremlinSqlIdentifier;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.select.StepDirection;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinTableBase;
import software.aws.neptune.gremlin.adapter.results.SqlGremlinQueryResult;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import static software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinTableBase.IN_ID;
import static software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinTableBase.OUT_ID;
/**
* Traversal engine for SQL-Gremlin. This module is responsible for generating the gremlin traversals.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public class SqlTraversalEngine {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlTraversalEngine.class);
public static GraphTraversal<?, ?> generateInitialSql(final List<GremlinSqlIdentifier> gremlinSqlIdentifiers,
final SqlMetadata sqlMetadata,
final GraphTraversalSource g) throws SQLException {
if (gremlinSqlIdentifiers.size() != 2) {
throw SqlGremlinError.create(SqlGremlinError.IDENTIFIER_SIZE_INCORRECT);
}
final String label = sqlMetadata.getActualTableName(gremlinSqlIdentifiers.get(0).getName(1));
final GraphTraversal<?, ?> graphTraversal = sqlMetadata.isVertex(label) ? g.V() : g.E();
graphTraversal.hasLabel(label);
return graphTraversal;
}
public static void applyAggregateFold(final SqlMetadata sqlMetadata, final GraphTraversal<?, ?> graphTraversal) {
if (sqlMetadata.getIsProjectFoldRequired()) {
graphTraversal.fold();
}
}
public static void applyAggregateUnfold(final SqlMetadata sqlMetadata, final GraphTraversal<?, ?> graphTraversal) {
if (sqlMetadata.getIsProjectFoldRequired()) {
graphTraversal.unfold();
}
}
public static GraphTraversal<?, ?> getEmptyTraversal(final StepDirection direction, final SqlMetadata sqlMetadata) {
final GraphTraversal<?, ?> graphTraversal = __.unfold();
applyAggregateUnfold(sqlMetadata, graphTraversal);
switch (direction) {
case Out:
return graphTraversal.outV();
case In:
return graphTraversal.inV();
}
return graphTraversal;
}
public static void addProjection(final List<GremlinSqlIdentifier> gremlinSqlIdentifiers,
final SqlMetadata sqlMetadata,
final GraphTraversal<?, ?> graphTraversal) throws SQLException {
if (gremlinSqlIdentifiers.size() != 2) {
throw SqlGremlinError.create(SqlGremlinError.IDENTIFIER_SIZE_INCORRECT);
}
final String label = sqlMetadata.getActualTableName(gremlinSqlIdentifiers.get(0).getName(1));
final String projectLabel = gremlinSqlIdentifiers.get(1).getName(0);
graphTraversal.project(projectLabel);
sqlMetadata.addRenamedTable(label, projectLabel);
}
public static GraphTraversal<?, ?> getEmptyTraversal(final SqlMetadata sqlMetadata) {
return getEmptyTraversal(StepDirection.None, sqlMetadata);
}
public static void applyTraversal(GraphTraversal graphTraversal,
final GraphTraversal subGraphTraversal,
final boolean apply) {
graphTraversal.by((apply ? __.coalesce(subGraphTraversal, __.constant(SqlGremlinQueryResult.NULL_VALUE)) : subGraphTraversal));
}
public static void applyTraversal(final GraphTraversal graphTraversal,
final GraphTraversal subGraphTraversal) {
applyTraversal(graphTraversal, subGraphTraversal, false);
}
public static void applySqlIdentifier(final GremlinSqlIdentifier sqlIdentifier,
final SqlMetadata sqlMetadata,
final GraphTraversal<?, ?> graphTraversal) throws SQLException {
if (sqlIdentifier.isStar()) {
// SELECT * will be fixed by calcite so that it is all the underlying nodes.
// SELECT COUNT(*) on the other hand will actually just be replaced with an empty string.
// So we need to inject something into our traversal for this.
graphTraversal.constant(1);
} else if (sqlIdentifier.getNameCount() == 2) {
// With size 2 format of identifier is 'table'.'column' => ['table', 'column']
appendGraphTraversal(sqlIdentifier.getName(0), sqlMetadata.getRenamedColumn(sqlIdentifier.getName(1)),
sqlMetadata, graphTraversal);
} else {
// With size 1, format of identifier is 'column'.
appendGraphTraversal(sqlMetadata.getRenamedColumn(sqlIdentifier.getName(0)), sqlMetadata, graphTraversal);
}
}
public static GraphTraversal<?, ?> applyColumnRenames(final List<String> columnsRenamed) throws SQLException {
final String firstColumn = columnsRenamed.remove(0);
final String[] remaining = columnsRenamed.toArray(new String[] {});
return __.project(firstColumn, remaining);
}
private static void appendColumnSelect(final SqlMetadata sqlMetadata, final String column,
final GraphTraversal<?, ?> graphTraversal) {
if (sqlMetadata.isDoneFilters()) {
graphTraversal.choose(__.has(column), __.values(column), __.constant(SqlGremlinQueryResult.NULL_VALUE));
} else {
graphTraversal.has(column).values(column);
}
}
private static void appendGraphTraversal(final String table, final String column,
final SqlMetadata sqlMetadata,
final GraphTraversal<?, ?> graphTraversal) throws SQLException {
final GremlinTableBase gremlinTableBase = sqlMetadata.getGremlinTable(table);
final String columnName = sqlMetadata.getActualColumnName(gremlinTableBase, column);
// Primary/foreign key, need to traverse appropriately.
if (!columnName.endsWith(GremlinTableBase.ID)) {
if (sqlMetadata.getIsAggregate()) {
graphTraversal.has(columnName).values(columnName);
} else {
appendColumnSelect(sqlMetadata, columnName, graphTraversal);
}
} else {
// It's this vertex/edge.
if (columnName.toLowerCase(Locale.getDefault())
.startsWith(gremlinTableBase.getLabel().toLowerCase(Locale.getDefault()))) {
graphTraversal.id();
} else {
if (columnName.endsWith(IN_ID)) {
// Vertices can have many connected, edges (thus we need to fold). Edges can only connect to 1 vertex.
if (gremlinTableBase.getIsVertex()) {
graphTraversal.coalesce(__.inE().hasLabel(columnName.replace(IN_ID, "")).id().fold(),
__.constant(new ArrayList<>()));
} else {
graphTraversal.coalesce(__.inV().hasLabel(columnName.replace(IN_ID, "")).id(),
__.constant(new ArrayList<>()));
}
} else if (column.endsWith(OUT_ID)) {
// Vertices can have many connected, edges (thus we need to fold). Edges can only connect to 1 vertex.
if (gremlinTableBase.getIsVertex()) {
graphTraversal.coalesce(__.outE().hasLabel(columnName.replace(OUT_ID, "")).id().fold(),
__.constant(new ArrayList<>()));
} else {
graphTraversal.coalesce(__.outV().hasLabel(columnName.replace(OUT_ID, "")).id(),
__.constant(new ArrayList<>()));
}
} else {
graphTraversal.constant(new ArrayList<>());
}
}
}
}
private static void appendGraphTraversal(final String columnName,
final SqlMetadata sqlMetadata,
final GraphTraversal<?, ?> graphTraversal) throws SQLException {
// Primary/foreign key, need to traverse appropriately.
if (columnName.endsWith(GremlinTableBase.ID)) {
throw SqlGremlinError.create(SqlGremlinError.ID_BASED_APPEND);
}
if (sqlMetadata.getIsAggregate()) {
graphTraversal.has(columnName).values(columnName);
} else {
appendColumnSelect(sqlMetadata, columnName, graphTraversal);
}
}
}
| 7,410 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema/SqlSchemaGrabber.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.schema;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import lombok.AllArgsConstructor;
import lombok.NonNull;
import org.apache.calcite.util.Pair;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinEdgeTable;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinProperty;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinVertexTable;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
public final class SqlSchemaGrabber {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlSchemaGrabber.class);
private static final Map<Class<?>, String> TYPE_MAP = new HashMap<>();
private static final String VERTEX_EDGES_LABEL_QUERY = "g.V().hasLabel('%s').%sE().label().dedup()";
private static final String PROPERTIES_VALUE_QUERY = "g.%s().hasLabel('%s').values('%s').%s";
private static final String PROPERTY_KEY_QUERY = "g.%s().hasLabel('%s').properties().key().dedup()";
private static final String LABELS_QUERY = "g.%s().label().dedup()";
private static final String IN_OUT_VERTEX_QUERY =
"g.E().hasLabel('%s').project('in','out').by(inV().label()).by(outV().label()).dedup()";
static {
TYPE_MAP.put(String.class, "String");
TYPE_MAP.put(Boolean.class, "Boolean");
TYPE_MAP.put(Byte.class, "Byte");
TYPE_MAP.put(Short.class, "Short");
TYPE_MAP.put(Integer.class, "Integer");
TYPE_MAP.put(Long.class, "Long");
TYPE_MAP.put(Float.class, "Float");
TYPE_MAP.put(Double.class, "Double");
TYPE_MAP.put(Date.class, "Date");
}
private SqlSchemaGrabber() {
}
public static GremlinSchema getSchema(final GraphTraversalSource g, final ScanType scanType) throws SQLException {
final ExecutorService executor = Executors.newFixedThreadPool(96,
new ThreadFactoryBuilder().setNameFormat("RxSessionRunner-%d").setDaemon(true).build());
try {
final Future<List<GremlinVertexTable>> gremlinVertexTablesFuture =
executor.submit(new RunGremlinQueryVertices(g, executor, scanType));
final Future<List<GremlinEdgeTable>> gremlinEdgeTablesFuture =
executor.submit(new RunGremlinQueryEdges(g, executor, scanType));
final GremlinSchema gremlinSchema =
new GremlinSchema(gremlinVertexTablesFuture.get(), gremlinEdgeTablesFuture.get());
executor.shutdown();
return gremlinSchema;
} catch (final ExecutionException | InterruptedException e) {
e.printStackTrace();
executor.shutdown();
throw new SQLException("Error occurred during schema collection. '" + e.getMessage() + "'.");
}
}
private static String getType(final Set<?> data) {
final Set<String> types = new HashSet<>();
for (final Object d : data) {
types.add(TYPE_MAP.getOrDefault(d.getClass(), "String"));
}
if (types.size() == 1) {
return types.iterator().next();
} else if (types.size() > 1) {
if (types.contains("String") || types.contains("Date")) {
return "String";
} else if (types.contains("Double")) {
return "Double";
} else if (types.contains("Float")) {
return "Float";
} else if (types.contains("Long")) {
return "Long";
} else if (types.contains("Integer")) {
return "Integer";
} else if (types.contains("Short")) {
return "Short";
} else if (types.contains("Byte")) {
return "Byte";
}
}
return "String";
}
public enum ScanType {
First("First"),
All("All");
private final String stringValue;
ScanType(@NonNull final String stringValue) {
this.stringValue = stringValue;
}
/**
* Converts case-insensitive string to enum value.
*
* @param in The case-insensitive string to be converted to enum.
* @return The enum value if string is recognized as a valid value, otherwise null.
*/
public static ScanType fromString(@NonNull final String in) {
for (final ScanType scheme : ScanType.values()) {
if (scheme.stringValue.equalsIgnoreCase(in)) {
return scheme;
}
}
return null;
}
@Override
public String toString() {
return this.stringValue;
}
}
@AllArgsConstructor
static
class RunGremlinQueryVertices implements Callable<List<GremlinVertexTable>> {
private final GraphTraversalSource g;
private final ExecutorService service;
private final ScanType scanType;
@Override
public List<GremlinVertexTable> call() throws Exception {
final List<Future<List<GremlinProperty>>> gremlinProperties = new ArrayList<>();
final List<Future<List<String>>> gremlinVertexInEdgeLabels = new ArrayList<>();
final List<Future<List<String>>> gremlinVertexOutEdgeLabels = new ArrayList<>();
final List<String> labels = service.submit(new RunGremlinQueryLabels(true, g)).get();
for (final String label : labels) {
gremlinProperties.add(service.submit(
new RunGremlinQueryPropertiesList(true, label, g, scanType, service)));
gremlinVertexInEdgeLabels.add(service.submit(new RunGremlinQueryVertexEdges(g, label, "in")));
gremlinVertexOutEdgeLabels.add(service.submit(new RunGremlinQueryVertexEdges(g, label, "out")));
}
final List<GremlinVertexTable> gremlinVertexTables = new ArrayList<>();
for (int i = 0; i < labels.size(); i++) {
gremlinVertexTables.add(new GremlinVertexTable(labels.get(i), gremlinProperties.get(i).get(),
gremlinVertexInEdgeLabels.get(i).get(), gremlinVertexOutEdgeLabels.get(i).get()));
}
return gremlinVertexTables;
}
}
@AllArgsConstructor
static class RunGremlinQueryEdges implements Callable<List<GremlinEdgeTable>> {
private final GraphTraversalSource g;
private final ExecutorService service;
private final ScanType scanType;
@Override
public List<GremlinEdgeTable> call() throws Exception {
final List<Future<List<GremlinProperty>>> futureTableColumns = new ArrayList<>();
final List<Future<List<Pair<String, String>>>> inOutLabels = new ArrayList<>();
final List<String> labels = service.submit(new RunGremlinQueryLabels(false, g)).get();
for (final String label : labels) {
futureTableColumns.add(service.submit(
new RunGremlinQueryPropertiesList(false, label, g, scanType, service)));
inOutLabels.add(service.submit(new RunGremlinQueryInOutV(g, label)));
}
final List<GremlinEdgeTable> gremlinEdgeTables = new ArrayList<>();
for (int i = 0; i < labels.size(); i++) {
gremlinEdgeTables.add(new GremlinEdgeTable(labels.get(i), futureTableColumns.get(i).get(),
inOutLabels.get(i).get()));
}
return gremlinEdgeTables;
}
}
@AllArgsConstructor
static class RunGremlinQueryVertexEdges implements Callable<List<String>> {
private final GraphTraversalSource g;
private final String label;
private final String direction;
@Override
public List<String> call() throws Exception {
final String query = String.format(VERTEX_EDGES_LABEL_QUERY, label, direction);
LOGGER.debug(String.format("Start %s%n", query));
final List<String> labels = "in".equals(direction) ? g.V().hasLabel(label).inE().label().dedup().toList() :
g.V().hasLabel(label).outE().label().dedup().toList();
LOGGER.debug(String.format("End %s%n", query));
return labels;
}
}
@AllArgsConstructor
static class RunGremlinQueryPropertyType implements Callable<String> {
private final boolean isVertex;
private final String label;
private final String property;
private final GraphTraversalSource g;
private final ScanType strategy;
@Override
public String call() {
final String query = String.format(PROPERTIES_VALUE_QUERY, isVertex ? "V" : "E", label, property,
strategy.equals(ScanType.First) ? "next(1)" : "toSet()");
LOGGER.debug(String.format("Start %s%n", query));
final GraphTraversal<?, ?> graphTraversal = isVertex ? g.V() : g.E();
graphTraversal.hasLabel(label).values(property);
final HashSet<?> data =
new HashSet<>(strategy.equals(ScanType.First) ? graphTraversal.next(1) : graphTraversal.toList());
LOGGER.debug(String.format("End %s%n", query));
return getType(data);
}
}
@AllArgsConstructor
static class RunGremlinQueryPropertiesList implements Callable<List<GremlinProperty>> {
private final boolean isVertex;
private final String label;
private final GraphTraversalSource g;
private final ScanType scanType;
private final ExecutorService service;
@Override
public List<GremlinProperty> call() throws ExecutionException, InterruptedException {
final String query = String.format(PROPERTY_KEY_QUERY, isVertex ? "V" : "E", label);
LOGGER.debug(String.format("Start %s%n", query));
final List<String> properties = isVertex ?
g.V().hasLabel(label).properties().key().dedup().toList() :
g.E().hasLabel(label).properties().key().dedup().toList();
final List<Future<String>> propertyTypes = new ArrayList<>();
for (final String property : properties) {
propertyTypes.add(service
.submit(new RunGremlinQueryPropertyType(isVertex, label, property, g, scanType)));
}
final List<GremlinProperty> columns = new ArrayList<>();
for (int i = 0; i < properties.size(); i++) {
columns.add(new GremlinProperty(properties.get(i), propertyTypes.get(i).get().toLowerCase(Locale.getDefault())));
}
LOGGER.debug(String.format("End %s%n", query));
return columns;
}
}
@AllArgsConstructor
static class RunGremlinQueryLabels implements Callable<List<String>> {
private final boolean isVertex;
private final GraphTraversalSource g;
@Override
public List<String> call() {
final String query = String.format(LABELS_QUERY, isVertex ? "V" : "E");
LOGGER.debug(String.format("Start %s%n", query));
final List<String> labels = isVertex ? g.V().label().dedup().toList() : g.E().label().dedup().toList();
LOGGER.debug(String.format("End %s%n", query));
return labels;
}
}
@AllArgsConstructor
static class RunGremlinQueryInOutV implements Callable<List<Pair<String, String>>> {
private final GraphTraversalSource g;
private final String label;
@Override
public List<Pair<String, String>> call() {
final String query = String.format(IN_OUT_VERTEX_QUERY, label);
LOGGER.debug(String.format("Start %s%n", query));
final List<Map<String, Object>> result = g.E().hasLabel(label).
project("in", "out").
by(__.inV().label()).
by(__.outV().label()).
dedup().toList();
final List<Pair<String, String>> labels = new ArrayList<>();
result.stream().iterator().forEachRemaining(map -> map.forEach((key, value) -> {
labels.add(new Pair<>(map.get("in").toString(), map.get("out").toString()));
}));
LOGGER.debug(String.format("End %s%n", query));
return labels;
}
}
}
| 7,411 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema/calcite/GremlinTableScan.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.schema.calcite;
import com.google.common.collect.ImmutableList;
import org.apache.calcite.plan.RelOptCluster;
import org.apache.calcite.plan.RelOptPlanner;
import org.apache.calcite.plan.RelOptRule;
import org.apache.calcite.plan.RelOptTable;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.core.TableScan;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rel.type.RelDataTypeFactory;
import org.apache.calcite.rel.type.RelDataTypeField;
import java.util.List;
/**
* Created by twilmes on 9/25/15.
* Modified by lyndonb-bq on 05/17/21.
*/
public class GremlinTableScan extends TableScan implements GremlinRel {
/**
* Calling convention for relational operations that occur in Gremlin.
*/
private final int[] fields;
public GremlinTableScan(final RelOptCluster cluster, final RelTraitSet traitSet,
final RelOptTable table, final int[] fields) {
super(cluster, traitSet, ImmutableList.of(), table);
this.fields = fields.clone();
}
@Override
public RelNode copy(final RelTraitSet traitSet, final List<RelNode> inputs) {
assert inputs.isEmpty();
return this;
}
@Override
public RelDataType deriveRowType() {
final List<RelDataTypeField> fieldList = table.getRowType().getFieldList();
final RelDataTypeFactory.FieldInfoBuilder builder =
getCluster().getTypeFactory().builder();
for (final int field : fields) {
builder.add(fieldList.get(field));
}
return builder.build();
}
@Override
public void register(final RelOptPlanner planner) {
planner.addRule(GremlinToEnumerableConverterRule.INSTANCE);
for (final RelOptRule rule : GremlinRules.RULES) {
planner.addRule(rule);
}
}
}
| 7,412 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema/calcite/GremlinFilter.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.schema.calcite;
import org.apache.calcite.plan.RelOptCluster;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.core.Filter;
import org.apache.calcite.rex.RexNode;
/**
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
* @author Adapted from implementation by twilmes (https://github.com/twilmes/sql-gremlin)
*
*
*/
public class GremlinFilter extends Filter implements GremlinRel {
public GremlinFilter(
final RelOptCluster cluster,
final RelTraitSet traitSet,
final RelNode child,
final RexNode condition) {
super(cluster, traitSet, child, condition);
}
@Override
public Filter copy(final RelTraitSet traitSet, final RelNode input, final RexNode condition) {
return new GremlinFilter(getCluster(), traitSet, input, condition);
}
}
| 7,413 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema/calcite/GremlinToEnumerableConverter.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.schema.calcite;
import org.apache.calcite.adapter.enumerable.EnumerableRel;
import org.apache.calcite.adapter.enumerable.EnumerableRelImplementor;
import org.apache.calcite.plan.ConventionTraitDef;
import org.apache.calcite.plan.RelOptCluster;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.convert.ConverterImpl;
import java.util.List;
/**
* Created by twilmes on 9/25/15.
* Modified by lyndonb-bq on 05/17/21.
* Relational expression representing a scan of a table in a TinkerPop data source.
*/
public class GremlinToEnumerableConverter
extends ConverterImpl
implements EnumerableRel {
protected GremlinToEnumerableConverter(
final RelOptCluster cluster,
final RelTraitSet traits,
final RelNode input) {
super(cluster, ConventionTraitDef.INSTANCE, traits, input);
}
@Override
public RelNode copy(final RelTraitSet traitSet, final List<RelNode> inputs) {
return new GremlinToEnumerableConverter(
getCluster(), traitSet, sole(inputs));
}
@Override
public Result implement(final EnumerableRelImplementor implementor, final Prefer pref) {
return null;
}
}
| 7,414 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema/calcite/GremlinToEnumerableConverterRule.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.schema.calcite;
/**
* Created by twilmes on 9/25/15.
* Modified by lyndonb-bq on 05/17/21.
*/
import org.apache.calcite.adapter.enumerable.EnumerableConvention;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.convert.ConverterRule;
/**
* Rule to convert a relational expression from
* {@link GremlinRel#CONVENTION} to {@link EnumerableConvention}.
*/
public final class GremlinToEnumerableConverterRule extends ConverterRule {
public static final ConverterRule INSTANCE =
new GremlinToEnumerableConverterRule();
private GremlinToEnumerableConverterRule() {
super(RelNode.class, GremlinRel.CONVENTION, EnumerableConvention.INSTANCE,"GremlinToEnumerableConverterRule");
}
@Override
public RelNode convert(final RelNode rel) {
final RelTraitSet newTraitSet = rel.getTraitSet().replace(getOutConvention());
return new GremlinToEnumerableConverter(rel.getCluster(), newTraitSet, rel);
}
}
| 7,415 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema/calcite/GremlinRules.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.schema.calcite;
import lombok.SneakyThrows;
import org.apache.calcite.plan.Convention;
import org.apache.calcite.plan.RelOptRule;
import org.apache.calcite.plan.RelTrait;
import org.apache.calcite.plan.RelTraitSet;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.convert.ConverterRule;
import org.apache.calcite.rel.logical.LogicalFilter;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
/**
* List of rules that get pushed down and converted into GremlinTraversals. Right now
* only filter is pushed down using rules. Joins are converted, but handled the by RelWalker
* utilities.
* <p>
* Created by twilmes on 11/14/15.
* Modified by lyndonb-bq on 05/17/21.
*/
class GremlinRules {
public static final RelOptRule[] RULES = {
GremlinFilterRule.INSTANCE
};
abstract static class GremlinConverterRule extends ConverterRule {
private final Convention out;
GremlinConverterRule(
final Class<? extends RelNode> clazz,
final RelTrait in,
final Convention out,
final String description) {
super(clazz, in, out, description);
this.out = out;
}
protected Convention getOut() {
return out;
}
}
private static final class GremlinFilterRule extends GremlinConverterRule {
private static final GremlinFilterRule INSTANCE = new GremlinFilterRule();
private GremlinFilterRule() {
super(LogicalFilter.class, Convention.NONE, GremlinRel.CONVENTION, "GremlinFilterRule");
}
@SneakyThrows
public RelNode convert(final RelNode rel) {
if (!(rel instanceof LogicalFilter)) {
throw SqlGremlinError.create(SqlGremlinError.NOT_LOGICAL_FILTER, rel.getClass().getName(),
LogicalFilter.class.getName());
}
final LogicalFilter filter = (LogicalFilter) rel;
final RelTraitSet traitSet = filter.getTraitSet().replace(getOut());
return new GremlinFilter(rel.getCluster(), traitSet, convert(filter.getInput(), getOut()),
filter.getCondition());
}
}
}
| 7,416 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema/calcite/GremlinSchema.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.schema.calcite;
import com.google.common.collect.ImmutableMap;
import lombok.AllArgsConstructor;
import org.apache.calcite.schema.Table;
import org.apache.calcite.schema.impl.AbstractSchema;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinEdgeTable;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinTableBase;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinVertexTable;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Created by twilmes on 9/22/15.
* Modified by lyndonb-bq on 05/17/21.
*/
@AllArgsConstructor
public class GremlinSchema extends AbstractSchema {
private final List<GremlinVertexTable> vertices;
private final List<GremlinEdgeTable> edges;
@Override
protected Map<String, Table> getTableMap() {
final ImmutableMap.Builder<String, Table> builder = ImmutableMap.builder();
builder.putAll(vertices.stream().collect(Collectors.toMap(GremlinTableBase::getLabel, t -> t)));
builder.putAll(edges.stream().collect(Collectors.toMap(GremlinTableBase::getLabel, t -> t)));
final Map<String, Table> tableMap = builder.build();
return tableMap;
}
public List<GremlinVertexTable> getVertices() {
return new ArrayList<>(vertices);
}
public List<GremlinEdgeTable> getEdges() {
return new ArrayList<>(edges);
}
public List<GremlinTableBase> getAllTables() {
final List<GremlinTableBase> gremlinTableBases = new ArrayList<>();
gremlinTableBases.addAll(vertices);
gremlinTableBases.addAll(edges);
return gremlinTableBases;
}
}
| 7,417 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema/calcite/GremlinRel.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.schema.calcite;
import org.apache.calcite.plan.Convention;
import org.apache.calcite.rel.RelNode;
/**
* Created by twilmes on 9/25/15.
* Modified by lyndonb-bq on 05/17/21.
*/
public interface GremlinRel extends RelNode {
/**
* Calling convention for relational operations that occur in Gremlin.
*/
Convention CONVENTION = new Convention.Impl("GREMLIN", GremlinRel.class);
}
| 7,418 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema/gremlin/GremlinTableBase.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.schema.gremlin;
import lombok.Getter;
import org.apache.calcite.adapter.java.AbstractQueryableTable;
import org.apache.calcite.linq4j.QueryProvider;
import org.apache.calcite.linq4j.Queryable;
import org.apache.calcite.plan.RelOptTable;
import org.apache.calcite.rel.RelNode;
import org.apache.calcite.rel.type.RelDataType;
import org.apache.calcite.rel.type.RelDataTypeFactory;
import org.apache.calcite.schema.SchemaPlus;
import org.apache.calcite.schema.TranslatableTable;
import org.apache.calcite.util.Pair;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinRel;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinTableScan;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@Getter
public class GremlinTableBase extends AbstractQueryableTable implements TranslatableTable {
private final String label;
private final Boolean isVertex;
private final Map<String, GremlinProperty> columns;
public static final String ID = "_ID";
public static final String IN_ID = "_IN" + ID;
public static final String OUT_ID = "_OUT" + ID;
public GremlinTableBase(final String label, final Boolean isVertex,
final Map<String, GremlinProperty> columns) {
super(Object[].class);
this.label = label;
this.isVertex = isVertex;
this.columns = columns;
}
public GremlinProperty getColumn(final String column) throws SQLException {
for (final Map.Entry<String, GremlinProperty> entry : columns.entrySet()) {
if (entry.getKey().equalsIgnoreCase(column)) {
return entry.getValue();
}
}
throw SqlGremlinError.create(SqlGremlinError.COLUMN_NOT_FOUND, column, isVertex ? "vertex" : "edge", label);
}
public boolean hasColumn(final String column) {
for (final Map.Entry<String, GremlinProperty> entry : columns.entrySet()) {
if (entry.getKey().equalsIgnoreCase(column)) {
return true;
}
}
return false;
}
@Override
public <T> Queryable<T> asQueryable(final QueryProvider queryProvider, final SchemaPlus schema, final String tableName) {
return null;
}
@Override
public RelNode toRel(final RelOptTable.ToRelContext context, final RelOptTable relOptTable) {
final int[] fields = new int[columns.size()];
for (int i = 0; i < fields.length; i++) {
fields[i] = i;
}
return new GremlinTableScan(context.getCluster(), context.getCluster().traitSetOf(GremlinRel.CONVENTION), relOptTable, fields);
}
@Override
public RelDataType getRowType(final RelDataTypeFactory relDataTypeFactory) {
final List<String> names = new ArrayList<>();
final List<RelDataType> types = new ArrayList<>();
for (final Map.Entry<String, GremlinProperty> entry : columns.entrySet()) {
names.add(entry.getKey());
types.add(relDataTypeFactory.createJavaType(getType(entry.getValue().getType())));
}
return relDataTypeFactory.createStructType(Pair.zip(names, types));
}
private Class<?> getType(final String className) {
if ("string".equalsIgnoreCase(className)) {
return String.class;
} else if ("integer".equalsIgnoreCase(className)) {
return Integer.class;
} else if ("float".equalsIgnoreCase(className)) {
return Float.class;
} else if ("byte".equalsIgnoreCase(className)) {
return Byte.class;
} else if ("short".equalsIgnoreCase(className)) {
return Short.class;
} else if ("double".equalsIgnoreCase(className)) {
return Double.class;
} else if ("long".equalsIgnoreCase(className)) {
return Long.class;
} else if ("boolean".equalsIgnoreCase(className)) {
return Boolean.class;
} else if ("date".equalsIgnoreCase(className) || "long_date".equalsIgnoreCase(className)) {
return java.sql.Date.class;
} else if ("timestamp".equalsIgnoreCase(className) || "long_timestamp".equalsIgnoreCase(className)) {
return java.sql.Timestamp.class;
} else {
return null;
}
}
}
| 7,419 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema/gremlin/GremlinVertexTable.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.schema.gremlin;
import lombok.Getter;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@Getter
public class GremlinVertexTable extends GremlinTableBase {
private final List<String> inEdges;
private final List<String> outEdges;
public GremlinVertexTable(final String label, final List<GremlinProperty> columns, final List<String> inEdges, final List<String> outEdges) {
super(label, true, convert(label, columns, inEdges, outEdges));
this.inEdges = inEdges;
this.outEdges = outEdges;
}
// String for edges because 1 vertex can be connected to many edges (may required representation like '[1, 2, 3]"
// Long type for vertices because an edge can only be connected to one vertex (on each side).
private static Map<String, GremlinProperty> convert(
final String label, final List<GremlinProperty> columns,
final List<String> inEdges, final List<String> outEdges) {
final Map<String, GremlinProperty> columnsWithPKFK =
columns.stream().collect(Collectors.toMap(GremlinProperty::getName, t -> t));
// Uppercase vertex label appended with '_ID' represents an vertex, this is a string type.
final GremlinProperty pk = new GremlinProperty(label + ID, "string");
columnsWithPKFK.put(pk.getName(), pk);
// Get in and out foreign keys of edge.
inEdges.forEach(inEdgeLabel -> {
// Uppercase edge label appended with 'IN_ID'/'OUT_ID' represents a connected edge, this is a string type.
final GremlinProperty inFk = new GremlinProperty(inEdgeLabel + IN_ID, "string");
columnsWithPKFK.put(inFk.getName(), inFk);
});
outEdges.forEach(outEdgeLabel -> {
// Uppercase edge label appended with 'IN_ID'/'OUT_ID' represents a connected edge, this is a string type.
final GremlinProperty inFk = new GremlinProperty(outEdgeLabel + OUT_ID, "string");
columnsWithPKFK.put(inFk.getName(), inFk);
});
return columnsWithPKFK;
}
public boolean hasInEdge(final String label) {
return inEdges.stream().anyMatch(e -> e.equalsIgnoreCase(label.replace(IN_ID, "")));
}
public boolean hasOutEdge(final String label) {
return outEdges.stream().anyMatch(e -> e.equalsIgnoreCase(label.replace(OUT_ID, "")));
}
}
| 7,420 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema/gremlin/GremlinEdgeTable.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.schema.gremlin;
import lombok.Getter;
import org.apache.calcite.util.Pair;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Created by twilmes on 10/10/15.
* Modified by lyndonb-bq on 05/17/21.
*/
@Getter
public class GremlinEdgeTable extends GremlinTableBase {
private final List<Pair<String, String>> inOutVertexPairs;
public GremlinEdgeTable(final String label, final List<GremlinProperty> columns,
final List<Pair<String, String>> inOutVertexPairs) {
super(label, false, convert(label, columns, inOutVertexPairs));
this.inOutVertexPairs = inOutVertexPairs;
}
private static Map<String, GremlinProperty> convert(
final String label, final List<GremlinProperty> columns,
final List<Pair<String, String>> inOutTablePairs) {
final Map<String, GremlinProperty> columnsWithPKFK =
columns.stream().collect(Collectors.toMap(GremlinProperty::getName, t -> t));
// Uppercase edge label appended with '_ID' represents an edge, this is a string type.
final GremlinProperty pk = new GremlinProperty(label + GremlinTableBase.ID, "string");
columnsWithPKFK.put(pk.getName(), pk);
// Get in and out foreign keys of edge.
inOutTablePairs.forEach(inOutPair -> {
// Uppercase vertex label appended with 'IN_ID'/'OUT_ID' represents a connected vertex, this is a string type.
final GremlinProperty inFk = new GremlinProperty(inOutPair.getKey() + GremlinTableBase.IN_ID, "string");
final GremlinProperty outFk = new GremlinProperty(inOutPair.getValue() + GremlinTableBase.OUT_ID, "string");
columnsWithPKFK.put(inFk.getName(), inFk);
columnsWithPKFK.put(outFk.getName(), outFk);
});
return columnsWithPKFK;
}
public boolean isEdgeBetween(final String in, final String out) {
for (final Pair<String, String> inOutPair : inOutVertexPairs) {
if (inOutPair.getKey().equalsIgnoreCase(in + GremlinTableBase.IN_ID)
&& inOutPair.getValue().equalsIgnoreCase(out + GremlinTableBase.OUT_ID)) {
return true;
}
}
return false;
}
public boolean hasInVertex(final String inVertexLabel) {
final String label = inVertexLabel.replace(IN_ID, "");
for (final Pair<String, String> pair : inOutVertexPairs) {
if (pair.getKey().equalsIgnoreCase(label)) {
return true;
}
}
return false;
}
public boolean hasOutVertex(final String outVertexLabel) {
final String label = outVertexLabel.replace(IN_ID, "");
for (final Pair<String, String> pair : inOutVertexPairs) {
if (pair.getValue().equalsIgnoreCase(label)) {
return true;
}
}
return false;
}
}
| 7,421 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/schema/gremlin/GremlinProperty.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.schema.gremlin;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* Created by twilmes on 10/10/15.
* Modified by lyndonb-bq on 05/17/21.
*/
@Getter
@AllArgsConstructor
public class GremlinProperty {
private final String name;
private final String type;
}
| 7,422 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/GremlinSqlNode.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes;
import lombok.AllArgsConstructor;
import org.apache.calcite.sql.SqlNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
/**
* This abstract class in the GremlinSql equivalent of SqlNode.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
* @author Adapted from implementation by twilmes (https://github.com/twilmes/sql-gremlin)
*/
@AllArgsConstructor
public abstract class GremlinSqlNode {
private static final Logger LOGGER = LoggerFactory.getLogger(GremlinSqlNode.class);
private final SqlNode sqlNode;
private final SqlMetadata sqlMetadata;
}
| 7,423 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/GremlinSqlFactory.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes;
import org.apache.calcite.sql.SqlAggFunction;
import org.apache.calcite.sql.SqlAsOperator;
import org.apache.calcite.sql.SqlBasicCall;
import org.apache.calcite.sql.SqlBinaryOperator;
import org.apache.calcite.sql.SqlCall;
import org.apache.calcite.sql.SqlIdentifier;
import org.apache.calcite.sql.SqlJoin;
import org.apache.calcite.sql.SqlLiteral;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.SqlPostfixOperator;
import org.apache.calcite.sql.SqlPrefixOperator;
import org.apache.calcite.sql.SqlSelect;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands.GremlinSqlIdentifier;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlAsOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlBasicCall;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlPostfixOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlPrefixOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.aggregate.GremlinSqlAggFunction;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic.GremlinSqlBinaryOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic.GremlinSqlLiteral;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.select.GremlinSqlSelect;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.select.GremlinSqlSelectMulti;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.select.GremlinSqlSelectSingle;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.select.join.GremlinSqlJoinComparison;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
/**
* This factory converts different types of Calcite's SqlNode/SqlOperator's to SqlGremlin equivalents.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
* @author Adapted from implementation by twilmes (https://github.com/twilmes/sql-gremlin)
*/
public class GremlinSqlFactory {
private static SqlMetadata sqlMetadata = null;
public static void setSqlMetadata(final SqlMetadata sqlMetadata1) {
sqlMetadata = sqlMetadata1;
}
public static SqlMetadata getGremlinSqlMetadata() throws SQLException {
if (sqlMetadata == null) {
throw SqlGremlinError.create(SqlGremlinError.SCHEMA_NOT_SET);
}
return sqlMetadata;
}
public static GremlinSqlJoinComparison createJoinEquality(final SqlNode sqlNode)
throws SQLException {
if (sqlNode instanceof SqlBasicCall) {
final SqlBasicCall sqlBasicCall = (SqlBasicCall) sqlNode;
if (sqlBasicCall.getOperator() instanceof SqlBinaryOperator) {
return new GremlinSqlJoinComparison((SqlBasicCall) sqlNode,
(SqlBinaryOperator) sqlBasicCall.getOperator(), createNodeList(sqlBasicCall.getOperandList()),
getGremlinSqlMetadata());
}
}
throw SqlGremlinError.create(SqlGremlinError.UNKNOWN_NODE, sqlNode.getClass().getName());
}
public static GremlinSqlOperator createOperator(final SqlOperator sqlOperator, final List<SqlNode> sqlOperands)
throws SQLException {
if (sqlOperator instanceof SqlAsOperator) {
return new GremlinSqlAsOperator((SqlAsOperator) sqlOperator, createNodeList(sqlOperands),
getGremlinSqlMetadata());
} else if (sqlOperator instanceof SqlAggFunction) {
return new GremlinSqlAggFunction((SqlAggFunction) sqlOperator, createNodeList(sqlOperands),
getGremlinSqlMetadata());
} else if (sqlOperator instanceof SqlBinaryOperator) {
return new GremlinSqlBinaryOperator((SqlBinaryOperator) sqlOperator, createNodeList(sqlOperands),
getGremlinSqlMetadata());
} else if (sqlOperator instanceof SqlPostfixOperator) {
return new GremlinSqlPostfixOperator((SqlPostfixOperator) sqlOperator, createNodeList(sqlOperands),
getGremlinSqlMetadata());
} else if (sqlOperator instanceof SqlPrefixOperator) {
return new GremlinSqlPrefixOperator((SqlPrefixOperator) sqlOperator, createNodeList(sqlOperands),
getGremlinSqlMetadata());
}
throw SqlGremlinError.create(SqlGremlinError.UNKNOWN_OPERATOR, sqlOperator.getKind().sql);
}
public static GremlinSqlNode createNode(final SqlNode sqlNode) throws SQLException {
if (sqlNode instanceof SqlBasicCall) {
return new GremlinSqlBasicCall((SqlBasicCall) sqlNode, getGremlinSqlMetadata());
} else if (sqlNode instanceof SqlIdentifier) {
return new GremlinSqlIdentifier((SqlIdentifier) sqlNode, getGremlinSqlMetadata());
} else if (sqlNode instanceof SqlLiteral) {
return new GremlinSqlLiteral((SqlLiteral) sqlNode, getGremlinSqlMetadata());
}
throw SqlGremlinError.create(SqlGremlinError.UNKNOWN_NODE, sqlNode.getClass().getName());
}
public static List<GremlinSqlNode> createNodeList(final List<SqlNode> sqlNodes) throws SQLException {
final List<GremlinSqlNode> gremlinSqlNodes = new ArrayList<>();
for (final SqlNode sqlNode : sqlNodes) {
gremlinSqlNodes.add(createNode(sqlNode));
}
return gremlinSqlNodes;
}
@SuppressWarnings("unchecked")
public static <T> T createNodeCheckType(final SqlNode sqlNode, final Class<T> clazz) throws SQLException {
final GremlinSqlNode gremlinSqlNode = createNode(sqlNode);
if (!gremlinSqlNode.getClass().equals(clazz)) {
throw SqlGremlinError.create(SqlGremlinError.TYPE_MISMATCH);
}
return (T) gremlinSqlNode;
}
public static GremlinSqlSelect createSelect(final SqlSelect selectRoot, final GraphTraversalSource g)
throws SQLException {
if (selectRoot.getFrom() == null) {
throw SqlGremlinError.createNotSupported(SqlGremlinError.UNSUPPORTED_LITERAL_EXPRESSION);
} else if (selectRoot.getFrom() instanceof SqlJoin) {
return new GremlinSqlSelectMulti(selectRoot, (SqlJoin) selectRoot.getFrom(), sqlMetadata, g);
} else if (selectRoot.getFrom() instanceof SqlBasicCall) {
return new GremlinSqlSelectSingle(selectRoot, (SqlBasicCall) selectRoot.getFrom(), sqlMetadata, g);
}
throw SqlGremlinError.create(SqlGremlinError.UNKNOWN_NODE_GETFROM, selectRoot.getFrom().getClass().getName());
}
public static boolean isTable(final SqlNode sqlNode, final String renamedTable) throws SQLException {
if (sqlNode instanceof SqlIdentifier) {
return (((SqlIdentifier) sqlNode).names.get(0).equalsIgnoreCase(renamedTable));
} else if (sqlNode instanceof SqlCall) {
for (final SqlNode tmpSqlNode : ((SqlCall) sqlNode).getOperandList()) {
if (isTable(tmpSqlNode, renamedTable)) {
return true;
}
}
} else {
throw SqlGremlinError.create(SqlGremlinError.UNKNOWN_NODE_ISTABLE);
}
return false;
}
}
| 7,424 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operands/GremlinSqlIdentifier.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands;
import org.apache.calcite.sql.SqlIdentifier;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
/**
* This module is a GremlinSql equivalent of Calcite's SqlIdentifier.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public class GremlinSqlIdentifier extends GremlinSqlNode {
private final SqlIdentifier sqlIdentifier;
public GremlinSqlIdentifier(final SqlIdentifier sqlIdentifier, final SqlMetadata sqlMetadata) {
super(sqlIdentifier, sqlMetadata);
this.sqlIdentifier = sqlIdentifier;
}
public String getName(final int idx) throws SQLException {
if (idx >= sqlIdentifier.names.size()) {
throw SqlGremlinError.create(SqlGremlinError.IDENTIFIER_INDEX_OUT_OF_BOUNDS);
}
return sqlIdentifier.names.get(idx);
}
public String getColumn() throws SQLException {
if (sqlIdentifier.names.size() < 1) {
throw SqlGremlinError.create(SqlGremlinError.IDENTIFIER_LIST_EMPTY);
}
return sqlIdentifier.names.get(sqlIdentifier.names.size() - 1);
}
public int getNameCount() {
return sqlIdentifier.names.size();
}
public boolean isStar() {
return sqlIdentifier.isStar();
}
}
| 7,425 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/select/GremlinSqlSelect.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.select;
import org.apache.calcite.sql.SqlNumericLiteral;
import org.apache.calcite.sql.SqlSelect;
import org.apache.tinkerpop.gremlin.process.traversal.translator.GroovyTranslator;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.SqlTraversalEngine;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands.GremlinSqlIdentifier;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlBasicCall;
import software.aws.neptune.gremlin.adapter.results.SqlGremlinQueryResult;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
/**
* This abstract class is a GremlinSql equivalent of Calcite's SqlSelect.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public abstract class GremlinSqlSelect extends GremlinSqlNode {
private static final Logger LOGGER = LoggerFactory.getLogger(GremlinSqlSelect.class);
private final GraphTraversalSource g;
private final SqlSelect sqlSelect;
private final SqlMetadata sqlMetadata;
public GremlinSqlSelect(final SqlSelect sqlSelect, final SqlMetadata sqlMetadata, final GraphTraversalSource g) {
super(sqlSelect, sqlMetadata);
this.sqlSelect = sqlSelect;
this.g = g;
this.sqlMetadata = sqlMetadata;
}
public SqlGremlinQueryResult executeTraversal() throws SQLException {
GraphTraversal<?, ?> graphTraversal = null;
try {
sqlMetadata.checkAggregate(sqlSelect.getSelectList());
sqlMetadata.checkGroupByNodeIsNull(sqlSelect.getGroup());
graphTraversal = generateTraversal();
applyDistinct(graphTraversal);
applyOffset(graphTraversal);
applyLimit(graphTraversal);
final SqlGremlinQueryResult sqlGremlinQueryResult = generateSqlGremlinQueryResult();
runTraversalExecutor(graphTraversal, sqlGremlinQueryResult);
return sqlGremlinQueryResult;
} catch (final SQLException e) {
if (graphTraversal != null) {
try {
graphTraversal.close();
} catch (final Exception ignored) {
}
}
throw e;
}
}
private SqlGremlinQueryResult generateSqlGremlinQueryResult() throws SQLException {
final List<String> columns = new ArrayList<>();
sqlMetadata.getColumnOutputListMap().forEach((key, value) -> columns.addAll(value));
return new SqlGremlinQueryResult(columns, sqlMetadata);
}
protected abstract void runTraversalExecutor(GraphTraversal<?, ?> traversal,
SqlGremlinQueryResult sqlGremlinQueryResult) throws SQLException;
public String getStringTraversal() throws SQLException {
return GroovyTranslator.of("g").translate(generateTraversal().asAdmin().getBytecode()).toString();
}
public abstract GraphTraversal<?, ?> generateTraversal() throws SQLException;
protected GraphTraversal<?, ?> applyColumnRenames(final List<GremlinSqlNode> sqlNodeList, final String table) throws SQLException {
// Determine what the names should be for renaming.
final List<String> columnsRenamed = new ArrayList<>();
for (final GremlinSqlNode gremlinSqlNode : sqlNodeList) {
if (gremlinSqlNode instanceof GremlinSqlIdentifier) {
columnsRenamed.add(((GremlinSqlIdentifier) gremlinSqlNode).getName(1));
} else if (gremlinSqlNode instanceof GremlinSqlBasicCall) {
columnsRenamed.add(((GremlinSqlBasicCall) gremlinSqlNode).getRename());
} else {
throw SqlGremlinError.create(SqlGremlinError.UNKNOWN_NODE_SELECTLIST, gremlinSqlNode.getClass().getName());
}
}
final List<String> renamedColumnsTemp = new ArrayList<>(columnsRenamed);
sqlMetadata.setColumnOutputList(table, columnsRenamed);
return SqlTraversalEngine.applyColumnRenames(renamedColumnsTemp);
}
protected void applyColumnRetrieval(final GraphTraversal<?, ?> graphTraversal, final String table,
final List<GremlinSqlNode> sqlNodeList, final StepDirection stepDirection)
throws SQLException {
// If there are no nodes, we should simply append a by and exit.
if (sqlNodeList.isEmpty()) {
graphTraversal.by();
return;
}
final GraphTraversal<?, ?> subGraphTraversal = applyColumnRenames(sqlNodeList, table);
for (final GremlinSqlNode gremlinSqlNode : sqlNodeList) {
if (gremlinSqlNode instanceof GremlinSqlIdentifier) {
final GraphTraversal<?, ?> subSubGraphTraversal =
SqlTraversalEngine.getEmptyTraversal(stepDirection, sqlMetadata);
SqlTraversalEngine
.applySqlIdentifier((GremlinSqlIdentifier) gremlinSqlNode, sqlMetadata, subSubGraphTraversal);
SqlTraversalEngine.applyTraversal(subGraphTraversal, subSubGraphTraversal, true);
} else if (gremlinSqlNode instanceof GremlinSqlBasicCall) {
final GraphTraversal<?, ?> subSubGraphTraversal =
SqlTraversalEngine.getEmptyTraversal(stepDirection, sqlMetadata);
((GremlinSqlBasicCall) gremlinSqlNode).generateTraversal(subSubGraphTraversal);
SqlTraversalEngine.applyTraversal(subGraphTraversal, subSubGraphTraversal, true);
} else {
throw SqlGremlinError.create(SqlGremlinError.UNKNOWN_NODE_SELECTLIST, gremlinSqlNode.getClass().getName());
}
}
SqlTraversalEngine.applyTraversal(graphTraversal, subGraphTraversal);
}
protected void applyColumnRetrieval(final GraphTraversal<?, ?> graphTraversal, final String table,
final List<GremlinSqlNode> sqlNodeList) throws SQLException {
applyColumnRetrieval(graphTraversal, table, sqlNodeList, StepDirection.None);
}
private void applyOffset(final GraphTraversal<?, ?> graphTraversal) throws SQLException {
// TODO: AN-885 implement OFFSET
// Gremlin doesn't seem to directly support offset,
// we probably need to inject numeric literal value
// into the pagination and have it know to jump the
// first X number of results.
if (sqlSelect.getOffset() != null) {
throw SqlGremlinError.createNotSupported(SqlGremlinError.OFFSET_NOT_SUPPORTED);
}
}
private void applyLimit(final GraphTraversal<?, ?> graphTraversal) {
if (sqlSelect.getFetch() instanceof SqlNumericLiteral) {
final SqlNumericLiteral limit = (SqlNumericLiteral) sqlSelect.getFetch();
final Long limitValue = limit.getValueAs(Long.class);
graphTraversal.limit(limitValue);
}
}
private void applyDistinct(final GraphTraversal<?, ?> graphTraversal) {
if (sqlSelect.isDistinct()) {
graphTraversal.dedup();
}
}
}
| 7,426 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/select/GremlinSqlSelectSingle.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.select;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.calcite.sql.SqlBasicCall;
import org.apache.calcite.sql.SqlIdentifier;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlPrefixOperator;
import org.apache.calcite.sql.SqlSelect;
import org.apache.tinkerpop.gremlin.process.traversal.translator.GroovyTranslator;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__;
import org.apache.tinkerpop.gremlin.structure.Column;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.SqlTraversalEngine;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlFactory;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands.GremlinSqlIdentifier;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlAsOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlBasicCall;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlPostfixOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic.GremlinSqlBinaryOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic.GremlinSqlLiteral;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinTableBase;
import software.aws.neptune.gremlin.adapter.results.SqlGremlinQueryResult;
import software.aws.neptune.gremlin.adapter.results.pagination.Pagination;
import software.aws.neptune.gremlin.adapter.results.pagination.SimpleDataReader;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
* This module is a GremlinSql equivalent of Calcite's SqlSelect for a non-JOIN operation.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public class GremlinSqlSelectSingle extends GremlinSqlSelect {
private static final Logger LOGGER = LoggerFactory.getLogger(GremlinSqlSelectSingle.class);
private final SqlSelect sqlSelect;
private final SqlMetadata sqlMetadata;
private final GraphTraversalSource g;
private final SqlBasicCall sqlBasicCall;
public GremlinSqlSelectSingle(final SqlSelect sqlSelect,
final SqlBasicCall sqlBasicCall,
final SqlMetadata sqlMetadata, final GraphTraversalSource g) {
super(sqlSelect, sqlMetadata, g);
this.sqlSelect = sqlSelect;
this.sqlMetadata = sqlMetadata;
this.g = g;
this.sqlBasicCall = sqlBasicCall;
}
@Override
protected void runTraversalExecutor(final GraphTraversal<?, ?> graphTraversal,
final SqlGremlinQueryResult sqlGremlinQueryResult) throws SQLException {
// Launch thread to continue grabbing results.
final ExecutorService executor = Executors.newSingleThreadExecutor(
new ThreadFactoryBuilder().setNameFormat("Data-Insert-Thread-%d").setDaemon(true).build());
final List<List<String>> columns = new ArrayList<>(sqlMetadata.getColumnOutputListMap().values());
if (columns.size() != 1) {
throw SqlGremlinError.create(SqlGremlinError.SINGLE_SELECT_MULTI_RETURN);
}
executor.execute(new Pagination(new SimpleDataReader(
sqlMetadata.getRenameFromActual(sqlMetadata.getTables().iterator().next().getLabel()), columns.get(0)),
graphTraversal, sqlGremlinQueryResult));
executor.shutdown();
}
@Override
public GraphTraversal<?, ?> generateTraversal() throws SQLException {
if (sqlSelect.getSelectList() == null) {
throw SqlGremlinError.create(SqlGremlinError.SELECT_NO_LIST);
}
final GremlinSqlOperator gremlinSqlOperator =
GremlinSqlFactory.createOperator(sqlBasicCall.getOperator(), sqlBasicCall.getOperandList());
if (!(gremlinSqlOperator instanceof GremlinSqlAsOperator)) {
throw SqlGremlinError.create(SqlGremlinError.UNEXPECTED_FROM_FORMAT);
}
final List<GremlinSqlNode> gremlinSqlOperands = GremlinSqlFactory.createNodeList(sqlBasicCall.getOperandList());
final List<GremlinSqlIdentifier> gremlinSqlIdentifiers = new ArrayList<>();
for (final GremlinSqlNode gremlinSqlOperand : gremlinSqlOperands) {
if (!(gremlinSqlOperand instanceof GremlinSqlIdentifier)) {
throw SqlGremlinError.create(SqlGremlinError.UNEXPECTED_FROM_FORMAT);
}
gremlinSqlIdentifiers.add((GremlinSqlIdentifier) gremlinSqlOperand);
}
GraphTraversal<?, ?> graphTraversal = null;
try {
graphTraversal =
SqlTraversalEngine.generateInitialSql(gremlinSqlIdentifiers, sqlMetadata, g);
final String label = sqlMetadata.getActualTableName(gremlinSqlIdentifiers.get(0).getName(1));
// This function basically generates the latter parts of the traversal, by doing this it prepares all the
// renamed labels in the metadata so that queries like 'SELECT foo AS bar FROM baz ORDER BY bar'
// can properly recognize that bar=>foo.
// __.__() is passed in as an anonymous traversal that will be discarded.
generateDataRetrieval(gremlinSqlIdentifiers, __.__());
// Generate actual traversal.
applyWhere(graphTraversal);
applyGroupBy(graphTraversal, label);
applySelectValues(graphTraversal);
applyOrderBy(graphTraversal, label);
applyHaving(graphTraversal);
sqlMetadata.setIsDoneFilters(true);
generateDataRetrieval(gremlinSqlIdentifiers, graphTraversal);
if (sqlMetadata.getRenamedColumns() == null) {
throw SqlGremlinError.create(SqlGremlinError.COLUMN_RENAME_LIST_EMPTY);
}
if (sqlMetadata.getTables().size() != 1) {
throw SqlGremlinError.create(SqlGremlinError.NO_TRAVERSAL_TABLE);
}
return graphTraversal;
} catch (final SQLException e) {
if (graphTraversal != null) {
try {
graphTraversal.close();
} catch (final Exception ignored) {
}
}
throw e;
}
}
private void generateDataRetrieval(final List<GremlinSqlIdentifier> gremlinSqlIdentifiers,
GraphTraversal<?, ?> graphTraversal) throws SQLException {
final String projectLabel = gremlinSqlIdentifiers.get(1).getName(0);
final GraphTraversal<?, Map<String, ?>> graphTraversalDataPath = __.__();
SqlTraversalEngine.addProjection(gremlinSqlIdentifiers, sqlMetadata, graphTraversalDataPath);
applyColumnRetrieval(graphTraversalDataPath, projectLabel,
GremlinSqlFactory.createNodeList(sqlSelect.getSelectList().getList()));
SqlTraversalEngine.applyAggregateFold(sqlMetadata, graphTraversal);
final GraphTraversal<?, ?> graphTraversalChoosePredicate = __.unfold();
SqlTraversalEngine.applyAggregateUnfold(sqlMetadata, graphTraversalChoosePredicate);
graphTraversal.choose(graphTraversalChoosePredicate, graphTraversalDataPath, __.__());
}
public String getStringTraversal() throws SQLException {
return GroovyTranslator.of("g").translate(generateTraversal().asAdmin().getBytecode()).toString();
}
private void applySelectValues(final GraphTraversal<?, ?> graphTraversal) {
graphTraversal.select(Column.values);
}
protected void applyGroupBy(final GraphTraversal<?, ?> graphTraversal, final String table) throws SQLException {
if ((sqlSelect.getGroup() == null) || (sqlSelect.getGroup().getList().isEmpty())) {
// If we group bys but we have aggregates, we need to shove things into groups by ourselves.-
graphTraversal.group().unfold();
} else {
final List<GremlinSqlNode> gremlinSqlNodes = new ArrayList<>();
for (final SqlNode sqlNode : sqlSelect.getGroup().getList()) {
gremlinSqlNodes.add(GremlinSqlFactory.createNodeCheckType(sqlNode, GremlinSqlIdentifier.class));
}
graphTraversal.group();
final List<GraphTraversal> byUnion = new ArrayList<>();
for (final GremlinSqlNode gremlinSqlNode : gremlinSqlNodes) {
final GraphTraversal graphTraversal1 = __.__();
toAppendToByGraphTraversal(gremlinSqlNode, table, graphTraversal1);
byUnion.add(graphTraversal1);
}
graphTraversal.by(__.union(byUnion.toArray(new GraphTraversal[0])).fold()).unfold();
}
}
protected void applyOrderBy(final GraphTraversal<?, ?> graphTraversal, final String table) throws SQLException {
graphTraversal.order();
if (sqlSelect.getOrderList() == null || sqlSelect.getOrderList().getList().isEmpty()) {
graphTraversal.by(__.unfold().id());
return;
}
final List<GremlinSqlNode> gremlinSqlIdentifiers = new ArrayList<>();
for (final SqlNode sqlNode : sqlSelect.getOrderList().getList()) {
gremlinSqlIdentifiers.add(GremlinSqlFactory.createNode(sqlNode));
}
for (final GremlinSqlNode gremlinSqlNode : gremlinSqlIdentifiers) {
appendByGraphTraversal(gremlinSqlNode, table, graphTraversal);
}
}
private void toAppendToByGraphTraversal(final GremlinSqlNode gremlinSqlNode, final String table,
final GraphTraversal graphTraversal)
throws SQLException {
if (gremlinSqlNode instanceof GremlinSqlIdentifier) {
final String column = sqlMetadata
.getActualColumnName(sqlMetadata.getGremlinTable(table),
((GremlinSqlIdentifier) gremlinSqlNode).getColumn());
if (column.endsWith(GremlinTableBase.IN_ID) || column.endsWith(GremlinTableBase.OUT_ID)) {
// TODO: Grouping edges that are not the edge that the vertex are connected - needs to be implemented.
throw SqlGremlinError.create(SqlGremlinError.CANNOT_GROUP_EDGES);
} else {
graphTraversal.values(sqlMetadata.getActualColumnName(sqlMetadata.getGremlinTable(table), column));
}
} else if (gremlinSqlNode instanceof GremlinSqlBasicCall) {
final GremlinSqlBasicCall gremlinSqlBasicCall = (GremlinSqlBasicCall) gremlinSqlNode;
gremlinSqlBasicCall.generateTraversal(graphTraversal);
}
}
private void appendByGraphTraversal(final GremlinSqlNode gremlinSqlNode, final String table,
final GraphTraversal graphTraversal)
throws SQLException {
final GraphTraversal graphTraversal1 = __.unfold();
if (gremlinSqlNode instanceof GremlinSqlIdentifier) {
final String column = sqlMetadata
.getActualColumnName(sqlMetadata.getGremlinTable(table),
((GremlinSqlIdentifier) gremlinSqlNode).getColumn());
if (column.endsWith(GremlinTableBase.IN_ID) || column.endsWith(GremlinTableBase.OUT_ID)) {
// TODO: Grouping edges that are not the edge that the vertex are connected - needs to be implemented.
throw SqlGremlinError.create(SqlGremlinError.CANNOT_GROUP_EDGES);
} else {
graphTraversal1.values(sqlMetadata.getActualColumnName(sqlMetadata.getGremlinTable(table), column));
}
graphTraversal.by(__.coalesce(graphTraversal1, __.constant(sqlMetadata.getDefaultCoalesceValue(column))));
} else if (gremlinSqlNode instanceof GremlinSqlBasicCall) {
final GremlinSqlBasicCall gremlinSqlBasicCall = (GremlinSqlBasicCall) gremlinSqlNode;
gremlinSqlBasicCall.generateTraversal(graphTraversal1);
if (gremlinSqlBasicCall.getGremlinSqlOperator() instanceof GremlinSqlPostfixOperator) {
final GremlinSqlPostfixOperator gremlinSqlPostFixOperator =
(GremlinSqlPostfixOperator) gremlinSqlBasicCall.getGremlinSqlOperator();
graphTraversal.by(__.coalesce(graphTraversal1,
__.constant(sqlMetadata.getDefaultCoalesceValue(gremlinSqlBasicCall.getOutputColumn()))),
gremlinSqlPostFixOperator.getOrder());
} else {
graphTraversal.by(__.coalesce(graphTraversal1,
__.constant(sqlMetadata.getDefaultCoalesceValue(gremlinSqlBasicCall.getOutputColumn()))));
}
} else if (gremlinSqlNode instanceof GremlinSqlLiteral) {
final GremlinSqlLiteral gremlinSqlLiteral = (GremlinSqlLiteral) gremlinSqlNode;
final List<SqlNode> sqlNodeList = sqlSelect.getSelectList().getList();
if (gremlinSqlLiteral.getValue() instanceof Number) {
final Number value = (Number) gremlinSqlLiteral.getValue();
if (sqlNodeList.size() <= value.intValue() || value.intValue() <= 0) {
appendByGraphTraversal(GremlinSqlFactory.createNode(sqlNodeList.get(value.intValue() - 1)), table,
graphTraversal);
} else {
throw SqlGremlinError.create(SqlGremlinError.ORDER_BY_ORDINAL_VALUE);
}
} else {
throw SqlGremlinError.create(SqlGremlinError.CANNOT_ORDER_COLUMN_LITERAL);
}
} else {
throw SqlGremlinError.createNotSupported(SqlGremlinError.CANNOT_ORDER_BY,
gremlinSqlNode.getClass().getName());
}
}
protected void applyHaving(final GraphTraversal<?, ?> graphTraversal) throws SQLException {
applySqlFilter(sqlSelect.getHaving(), graphTraversal);
}
protected void applyWhere(final GraphTraversal<?, ?> graphTraversal) throws SQLException {
applySqlFilter(sqlSelect.getWhere(), graphTraversal);
}
void applySqlFilter(SqlNode sqlNode, GraphTraversal<?, ?> graphTraversal) throws SQLException {
if (sqlNode == null) {
return;
}
if (sqlNode instanceof SqlBasicCall) {
SqlBasicCall sqlBasicCall = (SqlBasicCall) sqlNode;
if (sqlBasicCall.getOperator() instanceof SqlPrefixOperator) {
SqlPrefixOperator sqlPrefixOperator = (SqlPrefixOperator) sqlBasicCall.getOperator();
if (sqlPrefixOperator.kind.equals(SqlKind.NOT)) {
if (sqlBasicCall.getOperandList().size() == 1 && sqlBasicCall.operands.length == 1) {
final GraphTraversal<?, ?> subGraphTraversal = __.__();
applySqlFilter(sqlBasicCall.getOperandList().get(0), subGraphTraversal);
graphTraversal.not(subGraphTraversal);
return;
}
throw SqlGremlinError.createNotSupported(SqlGremlinError.WHERE_NOT_ONLY_BOOLEAN);
}
throw SqlGremlinError.createNotSupported(SqlGremlinError.WHERE_UNSUPPORTED_PREFIX);
}
GremlinSqlFactory.createNodeCheckType(sqlNode, GremlinSqlBasicCall.class)
.generateTraversal(graphTraversal);
return;
} else if (sqlNode instanceof SqlIdentifier) {
GremlinSqlBinaryOperator.appendBooleanEquals(sqlMetadata, graphTraversal,
GremlinSqlFactory.createNodeCheckType(sqlNode, GremlinSqlIdentifier.class), true);
return;
}
throw SqlGremlinError.createNotSupported(SqlGremlinError.WHERE_BASIC_LITERALS);
}
}
| 7,427 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/select/GremlinSqlSelectMulti.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.select;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.calcite.sql.JoinConditionType;
import org.apache.calcite.sql.JoinType;
import org.apache.calcite.sql.SqlBasicCall;
import org.apache.calcite.sql.SqlIdentifier;
import org.apache.calcite.sql.SqlJoin;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlNode;
import org.apache.calcite.sql.SqlOperator;
import org.apache.calcite.sql.SqlPrefixOperator;
import org.apache.calcite.sql.SqlSelect;
import org.apache.tinkerpop.gremlin.process.traversal.P;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__;
import org.apache.tinkerpop.gremlin.structure.Column;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.SqlTraversalEngine;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlFactory;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands.GremlinSqlIdentifier;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlAsOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlBasicCall;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic.GremlinSqlLiteral;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.select.join.GremlinSqlJoinComparison;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinTableBase;
import software.aws.neptune.gremlin.adapter.results.SqlGremlinQueryResult;
import software.aws.neptune.gremlin.adapter.results.pagination.JoinDataReader;
import software.aws.neptune.gremlin.adapter.results.pagination.Pagination;
import software.aws.neptune.gremlin.adapter.util.SQLNotSupportedException;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.function.Function;
import static software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlFactory.createNode;
/**
* This module is a GremlinSql equivalent of Calcite's SqlSelect for a JOIN operation.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public class GremlinSqlSelectMulti extends GremlinSqlSelect {
// Multi is a JOIN.
private static final Logger LOGGER = LoggerFactory.getLogger(GremlinSqlSelectMulti.class);
private final SqlSelect sqlSelect;
private final SqlMetadata sqlMetadata;
private final GraphTraversalSource g;
private final SqlJoin sqlJoin;
public GremlinSqlSelectMulti(final SqlSelect sqlSelect, final SqlJoin sqlJoin,
final SqlMetadata sqlMetadata, final GraphTraversalSource g) {
super(sqlSelect, sqlMetadata, g);
this.sqlMetadata = sqlMetadata;
this.sqlSelect = sqlSelect;
this.g = g;
this.sqlJoin = sqlJoin;
}
@Override
protected void runTraversalExecutor(final GraphTraversal<?, ?> graphTraversal,
final SqlGremlinQueryResult sqlGremlinQueryResult) throws SQLException {
// Launch thread to continue grabbing results.
final ExecutorService executor = Executors.newSingleThreadExecutor(
new ThreadFactoryBuilder().setNameFormat("Data-Insert-Thread-%d").setDaemon(true).build());
final Map<String, List<String>> tableColumns = sqlMetadata.getColumnOutputListMap();
if (tableColumns.keySet().size() > 2) {
throw SqlGremlinError.create(SqlGremlinError.JOIN_TABLE_COUNT);
}
executor.execute(new Pagination(new JoinDataReader(tableColumns), graphTraversal, sqlGremlinQueryResult));
executor.shutdown();
}
@Override
public GraphTraversal<?, ?> generateTraversal() throws SQLException {
final JoinType joinType = sqlJoin.getJoinType();
final JoinConditionType conditionType = sqlJoin.getConditionType();
final GremlinSqlBasicCall left =
GremlinSqlFactory.createNodeCheckType(sqlJoin.getLeft(), GremlinSqlBasicCall.class);
final GremlinSqlBasicCall right =
GremlinSqlFactory.createNodeCheckType(sqlJoin.getRight(), GremlinSqlBasicCall.class);
final GremlinSqlJoinComparison gremlinSqlJoinComparison =
GremlinSqlFactory.createJoinEquality(sqlJoin.getCondition());
if (!joinType.name().equals(JoinType.INNER.name())) {
throw SqlGremlinError.createNotSupported(SqlGremlinError.INNER_JOIN_ONLY);
}
if (!conditionType.equals(JoinConditionType.ON)) {
throw SqlGremlinError.createNotSupported(SqlGremlinError.JOIN_ON_ONLY);
}
if ((left.getGremlinSqlNodes().size() != 2) || (right.getGremlinSqlNodes().size() != 2)) {
throw SqlGremlinError.create(SqlGremlinError.LEFT_RIGHT_CONDITION_OPERANDS);
}
if (!(left.getGremlinSqlOperator() instanceof GremlinSqlAsOperator) ||
!(right.getGremlinSqlOperator() instanceof GremlinSqlAsOperator)) {
throw SqlGremlinError.create(SqlGremlinError.LEFT_RIGHT_AS_OPERATOR);
}
final GremlinSqlAsOperator leftAsOperator = (GremlinSqlAsOperator) left.getGremlinSqlOperator();
final String leftTableName = leftAsOperator.getActual();
final String leftTableRename = leftAsOperator.getRename();
sqlMetadata.addRenamedTable(leftTableName, leftTableRename);
final String leftColumn = gremlinSqlJoinComparison.getColumn(leftTableRename);
final GremlinSqlAsOperator rightAsOperator = (GremlinSqlAsOperator) right.getGremlinSqlOperator();
final String rightTableName = rightAsOperator.getActual();
final String rightTableRename = rightAsOperator.getRename();
sqlMetadata.addRenamedTable(rightTableName, rightTableRename);
final String rightColumn = gremlinSqlJoinComparison.getColumn(rightTableRename);
if (!sqlMetadata.getIsColumnEdge(leftTableRename, leftColumn) ||
!sqlMetadata.getIsColumnEdge(rightTableRename, rightColumn)) {
throw SqlGremlinError.create(SqlGremlinError.JOIN_EDGELESS_VERTICES);
}
final String edgeLabelRight =
rightColumn.replaceAll(GremlinTableBase.IN_ID, "").replaceAll(GremlinTableBase.OUT_ID, "");
final String edgeLabelLeft =
leftColumn.replaceAll(GremlinTableBase.IN_ID, "").replaceAll(GremlinTableBase.OUT_ID, "");
if (!edgeLabelRight.equals(edgeLabelLeft)) {
throw SqlGremlinError.create(SqlGremlinError.CANNOT_JOIN_DIFFERENT_EDGES, edgeLabelLeft, edgeLabelRight);
}
if (rightColumn.endsWith(GremlinTableBase.IN_ID)) {
if (!leftColumn.endsWith(GremlinTableBase.OUT_ID)) {
throw SqlGremlinError.create(SqlGremlinError.JOIN_EDGELESS_VERTICES);
}
} else if (rightColumn.endsWith(GremlinTableBase.OUT_ID)) {
if (!leftColumn.endsWith(GremlinTableBase.IN_ID)) {
throw SqlGremlinError.create(SqlGremlinError.JOIN_EDGELESS_VERTICES);
}
} else {
throw SqlGremlinError.create(SqlGremlinError.JOIN_EDGELESS_VERTICES);
}
final String edgeLabel = sqlMetadata.getColumnEdgeLabel(leftColumn);
// Cases to consider:
// 1. rightLabel == leftLabel
// 2. rightLabel != leftLabel, rightLabel->leftLabel
// 3. rightLabel != leftLabel, leftLabel->rightLabel
// 4. rightLabel != leftLabel, rightLabel->leftLabel, leftLabel->rightLabel
// Case 1 & 4 are logically equivalent.
// Determine which is in and which is out.
final boolean leftInRightOut = sqlMetadata.isLeftInRightOut(leftColumn, rightColumn);
final boolean rightInLeftOut = sqlMetadata.isRightInLeftOut(leftColumn, rightColumn);
final String inVLabel;
final String outVLabel;
final String inVRename;
final String outVRename;
if (leftInRightOut && rightInLeftOut &&
(leftTableName.replace(GremlinTableBase.IN_ID, "").replace(GremlinTableBase.OUT_ID, "")
.equals(rightTableName.replace(GremlinTableBase.IN_ID, "")
.replace(GremlinTableBase.OUT_ID, "")))) {
// Vertices of same label connected by an edge.
// Doesn't matter how we assign these, but renames need to be different.
inVLabel = leftTableName;
outVLabel = leftTableName;
inVRename = leftTableRename;
outVRename = rightTableRename;
} else if (leftInRightOut) {
// Left vertex is in, right vertex is out
inVLabel = leftTableName;
outVLabel = rightTableName;
inVRename = leftTableRename;
outVRename = rightTableRename;
} else if (rightInLeftOut) {
// Right vertex is in, left vertex is out
inVLabel = rightTableName;
outVLabel = leftTableName;
inVRename = rightTableRename;
outVRename = leftTableRename;
} else {
inVLabel = "";
outVLabel = "";
inVRename = "";
outVRename = "";
}
final List<GremlinSqlNode> gremlinSqlNodesIn = new ArrayList<>();
final List<GremlinSqlNode> gremlinSqlNodesOut = new ArrayList<>();
for (final SqlNode sqlNode : sqlSelect.getSelectList().getList()) {
if (GremlinSqlFactory.isTable(sqlNode, inVRename)) {
gremlinSqlNodesIn.add(GremlinSqlFactory.createNode(sqlNode));
} else if (GremlinSqlFactory.isTable(sqlNode, outVRename)) {
gremlinSqlNodesOut.add(GremlinSqlFactory.createNode(sqlNode));
}
}
GraphTraversal<?, ?> graphTraversal = null;
try {
graphTraversal = g.E().hasLabel(edgeLabel)
.where(__.inV().hasLabel(inVLabel))
.where(__.outV().hasLabel(outVLabel));
applyWhere(graphTraversal, inVRename, outVRename);
applyGroupBy(graphTraversal, edgeLabel, inVRename, outVRename);
applySelectValues(graphTraversal);
applyOrderBy(graphTraversal, edgeLabel, inVRename, outVRename);
applyHaving(graphTraversal, inVRename, outVRename);
SqlTraversalEngine.applyAggregateFold(sqlMetadata, graphTraversal);
graphTraversal.project(inVRename, outVRename);
sqlMetadata.setIsDoneFilters(true);
applyColumnRetrieval(graphTraversal, inVRename, gremlinSqlNodesIn, StepDirection.In);
applyColumnRetrieval(graphTraversal, outVRename, gremlinSqlNodesOut, StepDirection.Out);
return graphTraversal;
} catch (final SQLException e) {
if (graphTraversal != null) {
try {
graphTraversal.close();
} catch (final Exception ignored) {
}
}
throw e;
}
}
private void applySelectValues(final GraphTraversal<?, ?> graphTraversal) {
graphTraversal.select(Column.values);
}
// TODO: Fill in group by and place in correct position of traversal.
protected void applyGroupBy(final GraphTraversal<?, ?> graphTraversal, final String edgeLabel,
final String inVRename, final String outVRename) throws SQLException {
if ((sqlSelect.getGroup() == null) || (sqlSelect.getGroup().getList().isEmpty())) {
// If we group bys but we have aggregates, we need to shove things into groups by ourselves.-
graphTraversal.group().unfold();
} else {
final List<GremlinSqlIdentifier> gremlinSqlIdentifiers = new ArrayList<>();
for (final SqlNode sqlNode : sqlSelect.getGroup().getList()) {
gremlinSqlIdentifiers.add(GremlinSqlFactory.createNodeCheckType(sqlNode, GremlinSqlIdentifier.class));
}
graphTraversal.group();
final List<GraphTraversal> byUnion = new ArrayList<>();
for (final GremlinSqlIdentifier gremlinSqlIdentifier : gremlinSqlIdentifiers) {
final String renamedTable = gremlinSqlIdentifier.getName(0);
final String table = sqlMetadata.getRenamedTable(renamedTable);
final String column = sqlMetadata
.getActualColumnName(sqlMetadata.getGremlinTable(table), gremlinSqlIdentifier.getName(1));
if (column.replace(GremlinTableBase.ID, "").equalsIgnoreCase(edgeLabel)) {
byUnion.add(__.id());
} else if (column.endsWith(GremlinTableBase.ID)) {
// TODO: Grouping edges that are not the edge that the vertex are connected - needs to be implemented.
throw SqlGremlinError.create(SqlGremlinError.CANNOT_GROUP_EDGES);
} else {
if (inVRename.equals(renamedTable)) {
byUnion.add(__.inV().hasLabel(table)
.values(sqlMetadata.getActualColumnName(sqlMetadata.getGremlinTable(table), column)));
} else if (outVRename.equals(renamedTable)) {
byUnion.add(__.outV().hasLabel(table)
.values(sqlMetadata.getActualColumnName(sqlMetadata.getGremlinTable(table), column)));
} else {
throw SqlGremlinError.create(SqlGremlinError.CANNOT_GROUP_TABLE, table);
}
}
}
graphTraversal.by(__.union(byUnion.toArray(new GraphTraversal[0])).fold()).unfold();
}
}
protected void applyOrderBy(final GraphTraversal<?, ?> graphTraversal, final String edgeLabel,
final String inVRename, final String outVRename) throws SQLException {
graphTraversal.order();
if (sqlSelect.getOrderList() == null || sqlSelect.getOrderList().getList().isEmpty()) {
graphTraversal.by(__.unfold().id());
return;
}
final List<GremlinSqlIdentifier> gremlinSqlIdentifiers = new ArrayList<>();
for (final SqlNode sqlNode : sqlSelect.getOrderList().getList()) {
gremlinSqlIdentifiers.add(GremlinSqlFactory.createNodeCheckType(sqlNode, GremlinSqlIdentifier.class));
}
final GremlinTableBase outVTable = sqlMetadata.getGremlinTable(outVRename);
final GremlinTableBase inVTable = sqlMetadata.getGremlinTable(inVRename);
for (final GremlinSqlIdentifier gremlinSqlIdentifier : gremlinSqlIdentifiers) {
final String column = gremlinSqlIdentifier.getColumn();
if (column.endsWith(GremlinTableBase.IN_ID) || column.endsWith(GremlinTableBase.OUT_ID)) {
// TODO: Grouping edges that are not the edge that the vertex are connected - needs to be implemented.
throw SqlGremlinError.create(SqlGremlinError.CANNOT_GROUP_EDGES);
} else {
if (sqlMetadata.getTableHasColumn(inVTable, column)) {
graphTraversal.by(__.unfold().inV().hasLabel(inVTable.getLabel())
.values(sqlMetadata.getActualColumnName(inVTable, column)));
} else if (sqlMetadata.getTableHasColumn(outVTable, column)) {
graphTraversal.by(__.unfold().outV().hasLabel(outVTable.getLabel())
.values(sqlMetadata.getActualColumnName(outVTable, column)));
} else {
throw SqlGremlinError.create(SqlGremlinError.CANNOT_GROUP_COLUMN, column);
}
}
}
}
protected void applyHaving(final GraphTraversal<?, ?> graphTraversal,
final String inVRename, final String outVRename) throws SQLException {
SqlNode sqlNode = sqlSelect.getHaving();
if (sqlNode == null) {
return;
}
applySqlFilter(sqlNode, graphTraversal, inVRename, outVRename);
}
protected void applyWhere(final GraphTraversal<?, ?> graphTraversal,
final String inVRename, final String outVRename) throws SQLException {
SqlNode sqlNode = sqlSelect.getWhere();
if (sqlNode == null) {
return;
}
applySqlFilter(sqlNode, graphTraversal, inVRename, outVRename);
}
private void applySqlFilter(final SqlNode sqlNode, final GraphTraversal<?, ?> graphTraversal,
final String inVRename, final String outVRename) throws SQLException {
if (sqlNode instanceof SqlBasicCall) {
SqlBasicCall sqlBasicCall = (SqlBasicCall) sqlNode;
if (sqlBasicCall.getOperator() instanceof SqlPrefixOperator) {
if (sqlBasicCall.getOperator().kind.equals(SqlKind.NOT)) {
if (sqlBasicCall.getOperandList().size() == 1) {
// if operator == NOT => recursively calling applySqlFilter() and then apply NOT
final GraphTraversal<?, ?> subGraphTraversal = __.__();
applySqlFilter(sqlBasicCall.getOperandList().get(0), subGraphTraversal, inVRename, outVRename);
graphTraversal.not(subGraphTraversal);
return;
}
throw SqlGremlinError.createNotSupported(SqlGremlinError.WHERE_NOT_ONLY_BOOLEAN);
}
throw SqlGremlinError.createNotSupported(SqlGremlinError.WHERE_UNSUPPORTED_PREFIX);
} else if (sqlBasicCall.getOperandList().size() == 2) {
if (sqlBasicCall.getOperator().kind.equals(SqlKind.AND) ||
sqlBasicCall.getOperator().kind.equals(SqlKind.OR)) {
// if operator == AND or OR => recursively calling applySqlFilter() and then apply AND or OR
GraphTraversal<?, ?>[] list = new GraphTraversal[2];
for (int i = 0; i < 2; i++) {
SqlNode node = sqlBasicCall.getOperandList().get(i);
final GraphTraversal<?, ?> subGraphTraversal = __.__();
applySqlFilter(node, subGraphTraversal, inVRename, outVRename);
list[i] = subGraphTraversal;
}
if (sqlBasicCall.getOperator().kind.equals(SqlKind.AND)) {
graphTraversal.and(list);
} else {
graphTraversal.or(list);
}
return;
}
GremlinSqlNode op1 = createNode(sqlBasicCall.getOperandList().get(0));
final GremlinSqlLiteral gremlinSqlLiteral;
try {
gremlinSqlLiteral = GremlinSqlFactory
.createNodeCheckType(sqlBasicCall.getOperandList().get(1), GremlinSqlLiteral.class);
} catch (SQLException e) {
throw SqlGremlinError.createNotSupported(SqlGremlinError.UNSUPPORTED_BASIC_LITERALS);
}
P<Object> value = getPBySqlComparison(sqlBasicCall, gremlinSqlLiteral.getValue());
if (op1 instanceof GremlinSqlIdentifier) {
// if the first operand == GremlinSqlIdentifier => then a request of the form "op1 OPERATOR value"
final GremlinSqlIdentifier gremlinSqlIdentifier = GremlinSqlFactory
.createNodeCheckType(sqlBasicCall.getOperandList().get(0), GremlinSqlIdentifier.class);
generateTraversal(graphTraversal, gremlinSqlIdentifier, inVRename, outVRename, value);
} else if (op1 instanceof GremlinSqlBasicCall) {
// if the first operand == GremlinSqlBasicCall =>
// then a request of the form "FUNCTION(op1) OPERATOR value"
final GremlinSqlBasicCall gremlinSqlBasicCall = ((GremlinSqlBasicCall) op1);
final GremlinSqlIdentifier gremlinSqlIdentifier = GremlinSqlFactory
.createNodeCheckType(
gremlinSqlBasicCall.getSqlBasicCall().getOperandList().get(0),
GremlinSqlIdentifier.class);
final SqlOperator operator = gremlinSqlBasicCall.getSqlBasicCall().getOperator();
Function<GraphTraversal<?, ?>, GraphTraversal<?, ?>> function =
getTraversalFunctionByOperator(operator);
String table = gremlinSqlIdentifier.getName(0);
// filtering by where FUNCTION(op) is value after group by
if (table.equals(inVRename)) {
graphTraversal.where(__.group().by(
function.apply(__.unfold()
.inV()
.has(gremlinSqlIdentifier.getName(1))
.values(gremlinSqlIdentifier.getName(1))))
.unfold().where(__.select(Column.keys).is(value)));
} else if (table.equals(outVRename)) {
graphTraversal.where(__.group().by(
function.apply(__.unfold()
.outV()
.has(gremlinSqlIdentifier.getName(1))
.values(gremlinSqlIdentifier.getName(1))))
.unfold().where(__.select(Column.keys).is(value)));
}
}
}
return;
} else if (sqlNode instanceof SqlIdentifier) {
final GremlinSqlIdentifier gremlinSqlIdentifier = GremlinSqlFactory
.createNodeCheckType(sqlNode, GremlinSqlIdentifier.class);
generateTraversal(graphTraversal, gremlinSqlIdentifier, inVRename, outVRename, true);
return;
}
throw SqlGremlinError.createNotSupported(SqlGremlinError.WHERE_BASIC_LITERALS);
}
private void generateTraversal(final GraphTraversal<?, ?> graphTraversal,
final GremlinSqlIdentifier gremlinSqlIdentifier, final String inVRename,
final String outVRename, final Object value) throws SQLException {
// filtering by the inV/outV property
String table = gremlinSqlIdentifier.getName(0);
if (table.equals(inVRename)) {
graphTraversal.where(__.unfold().inV().has(gremlinSqlIdentifier.getName(1), value));
} else if (table.equals(outVRename)) {
graphTraversal.where(__.unfold().outV().has(gremlinSqlIdentifier.getName(1), value));
}
}
private P<Object> getPBySqlComparison(SqlBasicCall sqlBasicCall, Object value) throws SQLNotSupportedException {
switch (sqlBasicCall.getOperator().kind) {
case EQUALS:
return P.eq(value);
case NOT_EQUALS:
return P.neq(value);
case GREATER_THAN:
return P.gt(value);
case GREATER_THAN_OR_EQUAL:
return P.gte(value);
case LESS_THAN:
return P.lt(value);
case LESS_THAN_OR_EQUAL:
return P.lte(value);
}
throw SqlGremlinError.createNotSupported(SqlGremlinError.UNKNOWN_OPERATOR);
}
private Function<GraphTraversal<?, ?>, GraphTraversal<?, ?>> getTraversalFunctionByOperator(SqlOperator operator)
throws SQLNotSupportedException {
switch (operator.kind) {
case COUNT:
return GraphTraversal::count;
case MAX:
return GraphTraversal::max;
case MIN:
return GraphTraversal::min;
case AVG:
return GraphTraversal::mean;
case SUM:
return GraphTraversal::sum;
}
throw SqlGremlinError.createNotSupported(SqlGremlinError.UNKNOWN_OPERATOR);
}
}
| 7,428 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/select/StepDirection.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.select;
/**
* This enum just holds the direction of a given step in Gremlin.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public enum StepDirection {
In,
Out,
None
}
| 7,429 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/select | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/select/join/GremlinSqlJoinComparison.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.select.join;
import org.apache.calcite.sql.SqlBasicCall;
import org.apache.calcite.sql.SqlBinaryOperator;
import org.apache.calcite.sql.SqlKind;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands.GremlinSqlIdentifier;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.List;
/**
* This module is a GremlinSql equivalent of Calcite's SqlBinaryOperator in the context of a comparison of a JOIN.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public class GremlinSqlJoinComparison {
private static final Logger LOGGER = LoggerFactory.getLogger(GremlinSqlJoinComparison.class);
// See SqlKind.BINARY_COMPARISON for list of aggregate functions in Calcite.
private final SqlBasicCall sqlBasicCall;
private final SqlBinaryOperator sqlBinaryOperator;
private final SqlMetadata sqlMetadata;
private final List<GremlinSqlNode> gremlinSqlNodes;
public GremlinSqlJoinComparison(final SqlBasicCall sqlBasicCall,
final SqlBinaryOperator sqlBinaryOperator,
final List<GremlinSqlNode> gremlinSqlNodes,
final SqlMetadata sqlMetadata) {
this.sqlBasicCall = sqlBasicCall;
this.sqlBinaryOperator = sqlBinaryOperator;
this.sqlMetadata = sqlMetadata;
this.gremlinSqlNodes = gremlinSqlNodes;
}
public boolean isEquals() {
return sqlBinaryOperator.kind.sql.equals(SqlKind.EQUALS.sql);
}
public String getColumn(final String renamedTable) throws SQLException {
for (final GremlinSqlNode gremlinSqlNode : gremlinSqlNodes) {
if (!(gremlinSqlNode instanceof GremlinSqlIdentifier)) {
throw SqlGremlinError.create(SqlGremlinError.UNEXPECTED_JOIN_NODES);
}
final GremlinSqlIdentifier gremlinSqlIdentifier = (GremlinSqlIdentifier) gremlinSqlNode;
if (gremlinSqlIdentifier.getName(0).equals(renamedTable)) {
return gremlinSqlIdentifier.getName(1);
}
}
throw SqlGremlinError.create(SqlGremlinError.NO_JOIN_COLUMN);
}
}
| 7,430 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operator/GremlinSqlAsOperator.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator;
import org.apache.calcite.sql.SqlAsOperator;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.SqlTraversalEngine;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands.GremlinSqlIdentifier;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic.GremlinSqlLiteral;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.List;
/**
* This module is a GremlinSql equivalent of Calcite's SqlAsOperator.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public class GremlinSqlAsOperator extends GremlinSqlOperator {
private static final Logger LOGGER = LoggerFactory.getLogger(GremlinSqlAsOperator.class);
private final SqlAsOperator sqlAsOperator;
private final SqlMetadata sqlMetadata;
private final List<GremlinSqlNode> sqlOperands;
public GremlinSqlAsOperator(final SqlAsOperator sqlAsOperator, final List<GremlinSqlNode> gremlinSqlNodes,
final SqlMetadata sqlMetadata) {
super(sqlAsOperator, gremlinSqlNodes, sqlMetadata);
this.sqlAsOperator = sqlAsOperator;
this.sqlMetadata = sqlMetadata;
this.sqlOperands = gremlinSqlNodes;
}
@Override
protected void appendTraversal(final GraphTraversal<?, ?> graphTraversal) throws SQLException {
sqlMetadata.addRenamedColumn(getActual(), getRename());
if (sqlOperands.get(0) instanceof GremlinSqlBasicCall) {
((GremlinSqlBasicCall) sqlOperands.get(0)).generateTraversal(graphTraversal);
} else if (!(sqlOperands.get(0) instanceof GremlinSqlIdentifier) &&
!(sqlOperands.get(0) instanceof GremlinSqlLiteral)) {
throw SqlGremlinError.create(SqlGremlinError.UNEXPECTED_OPERAND);
}
if (sqlOperands.size() == 1) {
if (sqlOperands.get(0) instanceof GremlinSqlIdentifier) {
SqlTraversalEngine
.applySqlIdentifier((GremlinSqlIdentifier) sqlOperands.get(0), sqlMetadata, graphTraversal);
}
}
if (sqlOperands.size() == 2 && sqlOperands.get(0) instanceof GremlinSqlIdentifier) {
SqlTraversalEngine
.applySqlIdentifier((GremlinSqlIdentifier) sqlOperands.get(0), sqlMetadata, graphTraversal);
}
}
public String getName(final int operandIdx, final int nameIdx) throws SQLException {
if (operandIdx >= sqlOperands.size() || !(sqlOperands.get(operandIdx) instanceof GremlinSqlIdentifier)) {
throw SqlGremlinError.create(SqlGremlinError.UNEXPECTED_OPERAND_INDEX);
}
return ((GremlinSqlIdentifier) sqlOperands.get(operandIdx)).getName(nameIdx);
}
public String getActual() throws SQLException {
if (sqlOperands.size() != 2) {
throw SqlGremlinError.create(SqlGremlinError.OPERANDS_EXPECTED_TWO_SQL_AS);
}
if (sqlOperands.get(0) instanceof GremlinSqlIdentifier) {
return ((GremlinSqlIdentifier) sqlOperands.get(0)).getColumn();
} else if (sqlOperands.get(0) instanceof GremlinSqlBasicCall) {
return ((GremlinSqlBasicCall) sqlOperands.get(0)).getActual();
} else if (sqlOperands.get(0) instanceof GremlinSqlLiteral) {
return ((GremlinSqlLiteral) sqlOperands.get(0)).getValue().toString();
}
throw SqlGremlinError.create(SqlGremlinError.FAILED_GET_NAME_ACTUAL);
}
public String getRename() throws SQLException {
if (sqlOperands.size() != 2) {
throw SqlGremlinError.create(SqlGremlinError.OPERANDS_EXPECTED_TWO_SQL_AS);
}
if (sqlOperands.get(1) instanceof GremlinSqlIdentifier) {
return ((GremlinSqlIdentifier) sqlOperands.get(1)).getColumn();
} else if (sqlOperands.get(1) instanceof GremlinSqlBasicCall) {
return ((GremlinSqlBasicCall) sqlOperands.get(1)).getRename();
}
throw SqlGremlinError.create(SqlGremlinError.FAILED_GET_NAME_RENAME);
}
}
| 7,431 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operator/GremlinSqlTraversalAppender.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import java.sql.SQLException;
import java.util.List;
/**
* Interface for traversal appending function.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public interface GremlinSqlTraversalAppender {
void appendTraversal(GraphTraversal<?, ?> graphTraversal, List<GremlinSqlNode> operands) throws SQLException;
}
| 7,432 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operator/GremlinSqlBasicCall.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator;
import lombok.Getter;
import org.apache.calcite.sql.SqlBasicCall;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlFactory;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands.GremlinSqlIdentifier;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.aggregate.GremlinSqlAggFunction;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic.GremlinSqlBinaryOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic.GremlinSqlLiteral;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.List;
/**
* This module is a GremlinSql equivalent of Calcite's SqlBasicCall.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
@Getter
public class GremlinSqlBasicCall extends GremlinSqlNode {
private static final Logger LOGGER = LoggerFactory.getLogger(GremlinSqlBasicCall.class);
private final SqlBasicCall sqlBasicCall;
private final GremlinSqlOperator gremlinSqlOperator;
private final List<GremlinSqlNode> gremlinSqlNodes;
public GremlinSqlBasicCall(final SqlBasicCall sqlBasicCall, final SqlMetadata sqlMetadata)
throws SQLException {
super(sqlBasicCall, sqlMetadata);
this.sqlBasicCall = sqlBasicCall;
gremlinSqlOperator =
GremlinSqlFactory.createOperator(sqlBasicCall.getOperator(), sqlBasicCall.getOperandList());
gremlinSqlNodes = GremlinSqlFactory.createNodeList(sqlBasicCall.getOperandList());
}
void validate() throws SQLException {
if (gremlinSqlOperator instanceof GremlinSqlAsOperator) {
if (gremlinSqlNodes.size() != 2) {
throw SqlGremlinError.create(SqlGremlinError.UNEXPECTED_NODE_GREMLINSQLBASICCALL);
}
} else if (gremlinSqlOperator instanceof GremlinSqlAggFunction) {
if (gremlinSqlNodes.size() != 1) {
throw SqlGremlinError.create(SqlGremlinError.UNEXPECTED_NODE_GREMLINSQLAGGFUNCTION);
}
}
}
public void generateTraversal(final GraphTraversal<?, ?> graphTraversal) throws SQLException {
validate();
gremlinSqlOperator.appendOperatorTraversal(graphTraversal);
}
public String getRename() throws SQLException {
if (gremlinSqlOperator instanceof GremlinSqlAsOperator) {
return ((GremlinSqlAsOperator) gremlinSqlOperator).getRename();
} else if (gremlinSqlOperator instanceof GremlinSqlAggFunction) {
if (gremlinSqlNodes.size() == 1 &&
(gremlinSqlNodes.get(0) instanceof GremlinSqlIdentifier ||
gremlinSqlNodes.get(0) instanceof GremlinSqlLiteral)) {
// returns the formatted column name for aggregations
return ((GremlinSqlAggFunction) gremlinSqlOperator).getNewName();
}
} else if (gremlinSqlOperator instanceof GremlinSqlBinaryOperator) {
return ((GremlinSqlBinaryOperator) gremlinSqlOperator).getNewName();
} else if (gremlinSqlOperator instanceof GremlinSqlPrefixOperator) {
return ((GremlinSqlPrefixOperator) gremlinSqlOperator).getNewName();
} else if (gremlinSqlOperator instanceof GremlinSqlPostfixOperator) {
return ((GremlinSqlPostfixOperator) gremlinSqlOperator).getNewName();
}
throw SqlGremlinError.create(SqlGremlinError.COLUMN_RENAME_UNDETERMINED);
}
public String getActual() throws SQLException {
if (gremlinSqlOperator instanceof GremlinSqlAsOperator) {
return ((GremlinSqlAsOperator) gremlinSqlOperator).getActual();
} else if (gremlinSqlOperator instanceof GremlinSqlAggFunction) {
return ((GremlinSqlAggFunction) gremlinSqlOperator).getNewName();
} else if (gremlinSqlOperator instanceof GremlinSqlBinaryOperator) {
return ((GremlinSqlBinaryOperator) gremlinSqlOperator).getNewName();
} else if (gremlinSqlOperator instanceof GremlinSqlPrefixOperator) {
return ((GremlinSqlPrefixOperator) gremlinSqlOperator).getNewName();
} else if (gremlinSqlOperator instanceof GremlinSqlPostfixOperator) {
return ((GremlinSqlPostfixOperator) gremlinSqlOperator).getNewName();
}
throw SqlGremlinError.create(SqlGremlinError.COLUMN_ACTUAL_NAME_UNDETERMINED);
}
public String getOutputColumn() throws SQLException {
if (gremlinSqlNodes.size() != 1) {
throw SqlGremlinError.create(SqlGremlinError.COLUMN_ACTUAL_NAME_UNDETERMINED);
}
if (gremlinSqlNodes.get(0) instanceof GremlinSqlIdentifier) {
return ((GremlinSqlIdentifier) gremlinSqlNodes.get(0)).getColumn();
} else if (gremlinSqlOperator instanceof GremlinSqlAsOperator) {
return ((GremlinSqlAsOperator) gremlinSqlOperator).getActual();
}
throw SqlGremlinError.create(SqlGremlinError.COLUMN_ACTUAL_NAME_UNDETERMINED);
}
}
| 7,433 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operator/GremlinSqlPostfixOperator.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlPostfixOperator;
import org.apache.tinkerpop.gremlin.process.traversal.Order;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.SqlTraversalEngine;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands.GremlinSqlIdentifier;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic.GremlinSqlLiteral;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.List;
/**
* This module is a GremlinSql equivalent of Calcite's SqlPostFixOperator.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public class GremlinSqlPostfixOperator extends GremlinSqlOperator {
private static final Logger LOGGER = LoggerFactory.getLogger(GremlinSqlPostfixOperator.class);
private final SqlPostfixOperator sqlPostfixOperator;
private final SqlMetadata sqlMetadata;
private final List<GremlinSqlNode> sqlOperands;
public GremlinSqlPostfixOperator(final SqlPostfixOperator sqlPostfixOperator, final List<GremlinSqlNode> gremlinSqlNodes,
final SqlMetadata sqlMetadata) {
super(sqlPostfixOperator, gremlinSqlNodes, sqlMetadata);
this.sqlPostfixOperator = sqlPostfixOperator;
this.sqlMetadata = sqlMetadata;
this.sqlOperands = gremlinSqlNodes;
}
public String getNewName() throws SQLException {
return String.format("%s %s", getOperandName(sqlOperands.get(0)), sqlPostfixOperator.kind.sql);
}
public Order getOrder() throws SQLException {
if (sqlPostfixOperator.kind.equals(SqlKind.DESCENDING)) {
return Order.desc;
}
throw SqlGremlinError.create(SqlGremlinError.NO_ORDER, sqlPostfixOperator.kind.sql);
}
@Override
protected void appendTraversal(final GraphTraversal<?, ?> graphTraversal) throws SQLException {
if (sqlOperands.get(0) instanceof GremlinSqlBasicCall) {
((GremlinSqlBasicCall) sqlOperands.get(0)).generateTraversal(graphTraversal);
} else if (!(sqlOperands.get(0) instanceof GremlinSqlIdentifier) && !(sqlOperands.get(0) instanceof GremlinSqlLiteral)) {
throw SqlGremlinError.create(SqlGremlinError.UNEXPECTED_OPERAND);
}
if (sqlOperands.size() == 1) {
if (sqlOperands.get(0) instanceof GremlinSqlIdentifier) {
SqlTraversalEngine
.applySqlIdentifier((GremlinSqlIdentifier) sqlOperands.get(0), sqlMetadata, graphTraversal);
}
}
if (sqlOperands.size() == 2 && sqlOperands.get(0) instanceof GremlinSqlIdentifier) {
SqlTraversalEngine.applySqlIdentifier((GremlinSqlIdentifier) sqlOperands.get(0), sqlMetadata, graphTraversal);
}
}
}
| 7,434 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operator/GremlinSqlOperator.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator;
import lombok.AllArgsConstructor;
import org.apache.calcite.sql.SqlOperator;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands.GremlinSqlIdentifier;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic.GremlinSqlLiteral;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.List;
/**
* This abstract class is a GremlinSql equivalent of Calcite's SqlOperator.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
@AllArgsConstructor
public abstract class GremlinSqlOperator {
private static final Logger LOGGER = LoggerFactory.getLogger(GremlinSqlOperator.class);
private final SqlOperator sqlOperator;
private final List<GremlinSqlNode> sqlOperands;
private final SqlMetadata sqlMetadata;
protected abstract void appendTraversal(GraphTraversal<?, ?> graphTraversal) throws SQLException;
public void appendOperatorTraversal(final GraphTraversal<?, ?> graphTraversal) throws SQLException {
if (sqlOperands.size() > 2) {
throw SqlGremlinError.create(SqlGremlinError.OPERANDS_MORE_THAN_TWO);
} else if (sqlOperands.isEmpty()) {
throw SqlGremlinError.create(SqlGremlinError.OPERANDS_EMPTY);
}
appendTraversal(graphTraversal);
}
protected String getOperandName(final GremlinSqlNode operand) throws SQLException {
if (operand instanceof GremlinSqlIdentifier) {
final GremlinSqlIdentifier gremlinSqlIdentifier = (GremlinSqlIdentifier) operand;
return gremlinSqlIdentifier.isStar() ? "*" : gremlinSqlIdentifier.getColumn();
} else if (operand instanceof GremlinSqlLiteral) {
return ((GremlinSqlLiteral) operand).getValue().toString();
} else if (operand instanceof GremlinSqlBasicCall) {
return ((GremlinSqlBasicCall) operand).getRename();
}
throw SqlGremlinError.createNotSupported(SqlGremlinError.UNSUPPORTED_OPERAND_TYPE, operand.getClass().getName());
}
}
| 7,435 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operator/GremlinSqlPrefixOperator.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlPrefixOperator;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic.GremlinSqlBinaryOperator;
import java.sql.SQLException;
import java.util.List;
/**
* This module is a GremlinSql equivalent of Calcite's SqlPostFixOperator.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public class GremlinSqlPrefixOperator extends GremlinSqlOperator {
private static final Logger LOGGER = LoggerFactory.getLogger(GremlinSqlPrefixOperator.class);
private final SqlPrefixOperator sqlPrefixOperator;
private final SqlMetadata sqlMetadata;
private final List<GremlinSqlNode> sqlOperands;
public GremlinSqlPrefixOperator(final SqlPrefixOperator sqlPrefixOperator,
final List<GremlinSqlNode> gremlinSqlNodes,
final SqlMetadata sqlMetadata) {
super(sqlPrefixOperator, gremlinSqlNodes, sqlMetadata);
this.sqlPrefixOperator = sqlPrefixOperator;
this.sqlMetadata = sqlMetadata;
this.sqlOperands = gremlinSqlNodes;
}
public String getNewName() throws SQLException {
return String.format("%s %s", sqlPrefixOperator.kind.sql, getOperandName(sqlOperands.get(0)));
}
public boolean isNot() {
return sqlPrefixOperator.kind.equals(SqlKind.NOT);
}
@Override
protected void appendTraversal(final GraphTraversal<?, ?> graphTraversal) throws SQLException {
// If we are done our filtering, have a not operator, and have a single GremlinSqlBasicCall operand,
// we can then use not(<graph_traversal>).
if (sqlMetadata.isDoneFilters() && isNot() &&
sqlOperands.size() == 1 && sqlOperands.get(0) instanceof GremlinSqlBasicCall) {
final GremlinSqlBinaryOperator pseudoGremlinSqlBinaryOperator =
new GremlinSqlBinaryOperator(sqlPrefixOperator, sqlOperands, sqlMetadata);
pseudoGremlinSqlBinaryOperator.appendTraversal(graphTraversal);
}
}
}
| 7,436 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operator | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operator/aggregate/GremlinSqlAggFunction.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.aggregate;
import org.apache.calcite.sql.SqlAggFunction;
import org.apache.calcite.sql.SqlKind;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.SqlTraversalEngine;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands.GremlinSqlIdentifier;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlBasicCall;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlTraversalAppender;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic.GremlinSqlLiteral;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* This module is a GremlinSql equivalent of Calcite's SqlAggFunction.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public class GremlinSqlAggFunction extends GremlinSqlOperator {
// See SqlKind.AGGREGATE for list of aggregate functions in Calcite.
private static final Map<SqlKind, GremlinSqlTraversalAppender> AGGREGATE_APPENDERS =
new HashMap<SqlKind, GremlinSqlTraversalAppender>() {{
put(SqlKind.AVG, GremlinSqlAggFunctionImplementations.AVG);
put(SqlKind.COUNT, GremlinSqlAggFunctionImplementations.COUNT);
put(SqlKind.SUM, GremlinSqlAggFunctionImplementations.SUM);
put(SqlKind.MIN, GremlinSqlAggFunctionImplementations.MIN);
put(SqlKind.MAX, GremlinSqlAggFunctionImplementations.MAX);
}};
private static final Map<SqlKind, String> AGGREGATE_TYPE_MAP =
new HashMap<SqlKind, String>() {{
put(SqlKind.AVG, "double");
put(SqlKind.COUNT, "long");
}};
private final SqlAggFunction sqlAggFunction;
private final SqlMetadata sqlMetadata;
private final List<GremlinSqlNode> sqlOperands;
public GremlinSqlAggFunction(final SqlAggFunction sqlOperator,
final List<GremlinSqlNode> gremlinSqlNodes,
final SqlMetadata sqlMetadata) {
super(sqlOperator, gremlinSqlNodes, sqlMetadata);
this.sqlAggFunction = sqlOperator;
this.sqlMetadata = sqlMetadata;
this.sqlOperands = gremlinSqlNodes;
}
@Override
protected void appendTraversal(final GraphTraversal<?, ?> graphTraversal) throws SQLException {
if (sqlOperands.get(0) instanceof GremlinSqlBasicCall) {
((GremlinSqlBasicCall) sqlOperands.get(0)).generateTraversal(graphTraversal);
} else if (!(sqlOperands.get(0) instanceof GremlinSqlIdentifier) &&
!(sqlOperands.get(0) instanceof GremlinSqlLiteral)) {
throw SqlGremlinError.create(SqlGremlinError.UNEXPECTED_OPERAND);
}
if (sqlOperands.size() == 1) {
if (sqlOperands.get(0) instanceof GremlinSqlIdentifier) {
SqlTraversalEngine.applySqlIdentifier((GremlinSqlIdentifier) sqlOperands.get(0), sqlMetadata,
graphTraversal);
} else if (sqlOperands.get(0) instanceof GremlinSqlLiteral) {
GremlinSqlLiteral gremlinSqlLiteral = (GremlinSqlLiteral) sqlOperands.get(0);
gremlinSqlLiteral.appendTraversal(graphTraversal);
}
}
if (AGGREGATE_APPENDERS.containsKey(sqlAggFunction.kind)) {
AGGREGATE_APPENDERS.get(sqlAggFunction.kind).appendTraversal(graphTraversal, sqlOperands);
} else {
throw SqlGremlinError.create(SqlGremlinError.AGGREGATE_NOT_SUPPORTED, sqlAggFunction.kind.sql);
}
updateOutputTypeMap();
}
/**
* Aggregation columns will be named in the form of AGG(xxx) if no rename is specified in SQL
*/
public String getNewName() throws SQLException {
if (sqlOperands.get((sqlOperands.size() - 1)) instanceof GremlinSqlIdentifier) {
final GremlinSqlIdentifier gremlinSqlIdentifier =
(GremlinSqlIdentifier) sqlOperands.get((sqlOperands.size() - 1));
return String.format("%s(%s)", sqlAggFunction.kind.name(),
gremlinSqlIdentifier.isStar() ? "*" : gremlinSqlIdentifier.getColumn());
} else if (sqlOperands.get((sqlOperands.size() - 1)) instanceof GremlinSqlLiteral) {
return String.format("%s(%s)", sqlAggFunction.kind.name(),
((GremlinSqlLiteral) sqlOperands.get(sqlOperands.size() - 1)).getValue().toString());
}
throw SqlGremlinError.create(SqlGremlinError.FAILED_RENAME_GREMLINSQLAGGOPERATOR);
}
public String getActual() throws SQLException {
if (sqlOperands.get((sqlOperands.size() - 1)) instanceof GremlinSqlIdentifier) {
return ((GremlinSqlIdentifier) sqlOperands.get(sqlOperands.size() - 1)).getColumn();
} else if (sqlOperands.get((sqlOperands.size() - 1)) instanceof GremlinSqlLiteral) {
return ((GremlinSqlLiteral) sqlOperands.get(sqlOperands.size() - 1)).getValue().toString();
}
throw SqlGremlinError.create(SqlGremlinError.FAILED_RENAME_GREMLINSQLAGGOPERATOR);
}
private void updateOutputTypeMap() throws SQLException {
if (AGGREGATE_TYPE_MAP.containsKey(sqlAggFunction.kind)) {
sqlMetadata.addOutputType(getNewName(), AGGREGATE_TYPE_MAP.get(sqlAggFunction.kind));
}
sqlMetadata.addRenamedColumn(getActual(), getNewName());
}
private static class GremlinSqlAggFunctionImplementations {
public static final GremlinSqlTraversalAppender AVG =
(GraphTraversal<?, ?> graphTraversal, List<GremlinSqlNode> operands) -> graphTraversal.mean();
public static final GremlinSqlTraversalAppender COUNT =
(GraphTraversal<?, ?> graphTraversal, List<GremlinSqlNode> operands) -> graphTraversal.count();
public static final GremlinSqlTraversalAppender SUM =
(GraphTraversal<?, ?> graphTraversal, List<GremlinSqlNode> operands) -> graphTraversal.sum();
public static final GremlinSqlTraversalAppender MIN =
(GraphTraversal<?, ?> graphTraversal, List<GremlinSqlNode> operands) -> graphTraversal.min();
public static final GremlinSqlTraversalAppender MAX =
(GraphTraversal<?, ?> graphTraversal, List<GremlinSqlNode> operands) -> graphTraversal.max();
}
}
| 7,437 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operator | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operator/logic/GremlinSqlLiteral.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic;
import org.apache.calcite.sql.SqlLiteral;
import org.apache.calcite.sql.type.SqlTypeName;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import java.sql.SQLException;
/**
* This module is a GremlinSql equivalent of Calcite's SqlLiteral.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public class GremlinSqlLiteral extends GremlinSqlNode {
final SqlLiteral sqlLiteral;
public GremlinSqlLiteral(final SqlLiteral sqlLiteral,
final SqlMetadata sqlMetadata) {
super(sqlLiteral, sqlMetadata);
this.sqlLiteral = sqlLiteral;
}
public void appendTraversal(final GraphTraversal<?, ?> graphTraversal) throws SQLException {
graphTraversal.constant(getValue());
}
public Object getValue() {
return (sqlLiteral.getTypeName().equals(SqlTypeName.CHAR)) ? sqlLiteral.toValue() : sqlLiteral.getValue();
}
}
| 7,438 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operator | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/converter/ast/nodes/operator/logic/GremlinSqlBinaryOperator.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.logic;
import org.apache.calcite.sql.SqlKind;
import org.apache.calcite.sql.SqlOperator;
import org.apache.tinkerpop.gremlin.process.traversal.P;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import software.aws.neptune.gremlin.adapter.converter.SqlTraversalEngine;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.GremlinSqlNode;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operands.GremlinSqlIdentifier;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlBasicCall;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlPrefixOperator;
import software.aws.neptune.gremlin.adapter.converter.ast.nodes.operator.GremlinSqlTraversalAppender;
import software.aws.neptune.gremlin.adapter.util.SqlGremlinError;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
/**
* This module is a GremlinSql equivalent of Calcite's GremlinSqlBinaryOperator.
*
* @author Lyndon Bauto (lyndonb@bitquilltech.com)
*/
public class GremlinSqlBinaryOperator extends GremlinSqlOperator {
private static final String CHARS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890";
private static final Logger LOGGER = LoggerFactory.getLogger(GremlinSqlBinaryOperator.class);
private final Map<SqlKind, GremlinSqlTraversalAppender> BINARY_APPENDERS =
new HashMap<SqlKind, GremlinSqlTraversalAppender>() {
{
put(SqlKind.EQUALS, new GremlinSqlBinaryOperatorAppenderEquals());
put(SqlKind.NOT_EQUALS, new GremlinSqlBinaryOperatorAppenderNotEquals());
put(SqlKind.GREATER_THAN, new GremlinSqlBinaryOperatorAppenderGreater());
put(SqlKind.GREATER_THAN_OR_EQUAL, new GremlinSqlBinaryOperatorAppenderGreaterEquals());
put(SqlKind.LESS_THAN, new GremlinSqlBinaryOperatorAppenderLess());
put(SqlKind.LESS_THAN_OR_EQUAL, new GremlinSqlBinaryOperatorAppenderLessEquals());
put(SqlKind.AND, new GremlinSqlBinaryOperatorAppenderAnd());
put(SqlKind.OR, new GremlinSqlBinaryOperatorAppenderOr());
put(SqlKind.NOT, new GremlinSqlBinaryOperatorAppenderNot());
}
};
private final SqlOperator sqlBinaryOperator;
private final SqlMetadata sqlMetadata;
private final List<GremlinSqlNode> sqlOperands;
public GremlinSqlBinaryOperator(final SqlOperator sqlBinaryOperator,
final List<GremlinSqlNode> sqlOperands,
final SqlMetadata sqlMetadata) {
super(sqlBinaryOperator, sqlOperands, sqlMetadata);
this.sqlBinaryOperator = sqlBinaryOperator;
this.sqlMetadata = sqlMetadata;
this.sqlOperands = sqlOperands;
}
public static void appendBooleanEquals(final SqlMetadata sqlMetadata, GraphTraversal<?, ?> graphTraversal,
final GremlinSqlIdentifier identifier, boolean expectedValue)
throws SQLException {
final GraphTraversal graphTraversal1 = __.unfold();
SqlTraversalEngine.applySqlIdentifier(identifier, sqlMetadata, graphTraversal1);
final String randomString = getRandomString();
graphTraversal.as(randomString).where(randomString, P.eq(randomString));
if (sqlMetadata.isDoneFilters()) {
graphTraversal.by(graphTraversal1);
} else {
graphTraversal.by(__.coalesce(graphTraversal1,
__.constant(sqlMetadata.getDefaultCoalesceValue(identifier.getColumn()))));
}
graphTraversal.by(__.unfold().constant(expectedValue));
}
private static String getRandomString() {
final StringBuilder salt = new StringBuilder();
final Random rnd = new Random();
while (salt.length() < 10) { // length of the random string.
final int index = (int) (rnd.nextFloat() * CHARS.length());
salt.append(CHARS.charAt(index));
}
return salt.toString();
}
@Override
public void appendTraversal(final GraphTraversal<?, ?> graphTraversal) throws SQLException {
if (BINARY_APPENDERS.containsKey(sqlBinaryOperator.kind)) {
if (sqlMetadata.isDoneFilters()) {
// If we are outside of filters, we need this to evaluate to true/false, not just filter the result.
final GraphTraversal<?, ?> subGraphTraversal = __.__();
BINARY_APPENDERS.get(sqlBinaryOperator.kind).appendTraversal(subGraphTraversal, sqlOperands);
graphTraversal.fold().choose(
subGraphTraversal.count().is(P.gt(0L)), __.constant(true), __.constant(false));
} else {
BINARY_APPENDERS.get(sqlBinaryOperator.kind).appendTraversal(graphTraversal, sqlOperands);
}
} else {
throw SqlGremlinError.create(SqlGremlinError.AGGREGATE_NOT_SUPPORTED, sqlBinaryOperator.kind.sql);
}
}
void handleEmbeddedGremlinSqlBasicCall(final GremlinSqlBasicCall gremlinSqlBasicCall,
final GraphTraversal<?, ?> graphTraversal)
throws SQLException {
if (gremlinSqlBasicCall.getGremlinSqlNodes().size() == 1 &&
gremlinSqlBasicCall.getGremlinSqlNodes().get(0) instanceof GremlinSqlIdentifier) {
final GremlinSqlIdentifier gremlinSqlIdentifier =
(GremlinSqlIdentifier) gremlinSqlBasicCall.getGremlinSqlNodes().get(0);
if (gremlinSqlBasicCall.getGremlinSqlOperator() instanceof GremlinSqlPrefixOperator) {
GremlinSqlPrefixOperator gremlinSqlPrefixOperator =
(GremlinSqlPrefixOperator) gremlinSqlBasicCall.getGremlinSqlOperator();
if (gremlinSqlPrefixOperator.isNot()) {
appendBooleanEquals(sqlMetadata, graphTraversal, gremlinSqlIdentifier, false);
} else {
throw SqlGremlinError.createNotSupported(SqlGremlinError.ONLY_NOT_PREFIX_SUPPORTED);
}
} else {
appendBooleanEquals(sqlMetadata, graphTraversal, gremlinSqlIdentifier, true);
}
} else {
gremlinSqlBasicCall.generateTraversal(graphTraversal);
}
}
private GraphTraversal<?, ?>[] getEmbeddedLogicOperators(final List<GremlinSqlNode> operands) throws SQLException {
if (operands.size() != 2 && operands.size() != 1) {
throw SqlGremlinError.create(SqlGremlinError.BINARY_AND_PREFIX_OPERAND_COUNT);
}
final GraphTraversal<?, ?>[] graphTraversals = new GraphTraversal[operands.size()];
for (int i = 0; i < operands.size(); i++) {
graphTraversals[i] = __.__();
if (sqlMetadata.isDoneFilters()) {
// If we are outside of filters, the result is grouped so we need to unfold.
graphTraversals[i].unfold();
}
if (operands.get(i) instanceof GremlinSqlIdentifier) {
// Embedded equalities are SqlBasicCall's.
// When the equality is struck, it is a pair of a SqlIdentifier and a SqlLiteral.
// However, boolean columns are exceptions to this, they are just left as a SqlIdentifier
if (!(operands.get((i == 0) ? 1 : 0) instanceof GremlinSqlLiteral)) {
// However, inverted logic booleans are added as SqlBasicCalls with a SqlPrefixOperator.
appendBooleanEquals(sqlMetadata, graphTraversals[i], (GremlinSqlIdentifier) operands.get(i), true);
} else {
graphTraversals[i].values(((GremlinSqlIdentifier) operands.get(i)).getColumn());
}
} else if (operands.get(i) instanceof GremlinSqlBasicCall) {
handleEmbeddedGremlinSqlBasicCall((GremlinSqlBasicCall) operands.get(i), graphTraversals[i]);
} else if (operands.get(i) instanceof GremlinSqlLiteral) {
((GremlinSqlLiteral) operands.get(i)).appendTraversal(graphTraversals[i]);
}
}
if (sqlMetadata.isDoneFilters()) {
// The gremlin and/or do not work on boolean logic. Instead, they evaluate whether a traversal returns
// something. Since we are returning true/false for our comparison operators in the SELECT clause,
// we need to filter this based on if it is true. If it is false, this will make it return an empty
// traversal, as opposed to returning a single element of false. This will make and/or properly evaluate.
for (int i = 0; i < operands.size(); i++) {
graphTraversals[i].filter(__.is(P.eq(true)));
}
}
return graphTraversals;
}
private GraphTraversal<?, ?>[] getTraversalEqualities(final List<GremlinSqlNode> operands)
throws SQLException {
if (operands.size() != 2) {
throw SqlGremlinError.create(SqlGremlinError.BINARY_AND_PREFIX_OPERAND_COUNT);
}
final GraphTraversal[] graphTraversals = new GraphTraversal[2];
for (int i = 0; i < operands.size(); i++) {
graphTraversals[i] = __.unfold();
if (operands.get(i) instanceof GremlinSqlIdentifier) {
final GremlinSqlIdentifier gremlinSqlIdentifier = (GremlinSqlIdentifier) operands.get(i);
final GraphTraversal subtraversal = __.unfold();
SqlTraversalEngine.applySqlIdentifier(gremlinSqlIdentifier, sqlMetadata, subtraversal);
graphTraversals[i] = __.coalesce(subtraversal,
__.constant(sqlMetadata.getDefaultCoalesceValue(gremlinSqlIdentifier.getColumn())));
} else if (operands.get(i) instanceof GremlinSqlBasicCall) {
final GremlinSqlBasicCall gremlinSqlBasicCall = ((GremlinSqlBasicCall) operands.get(i));
gremlinSqlBasicCall.generateTraversal(graphTraversals[i]);
graphTraversals[i] = __.coalesce(graphTraversals[i],
__.constant(sqlMetadata.getDefaultCoalesceValue(gremlinSqlBasicCall.getActual())));
} else if (operands.get(i) instanceof GremlinSqlLiteral) {
((GremlinSqlLiteral) operands.get(i)).appendTraversal(graphTraversals[i]);
}
}
return graphTraversals;
}
public String getNewName() throws SQLException {
return String.format("%s %s %s", getOperandName(sqlOperands.get(0)), sqlBinaryOperator.kind.sql,
getOperandName(sqlOperands.get(1)));
}
public class GremlinSqlBinaryOperatorAppenderEquals implements GremlinSqlTraversalAppender {
public void appendTraversal(final GraphTraversal<?, ?> graphTraversal, final List<GremlinSqlNode> operands)
throws SQLException {
final String randomString = getRandomString();
final GraphTraversal<?, ?>[] graphTraversals = getTraversalEqualities(operands);
graphTraversal.as(randomString).where(randomString, P.eq(randomString))
.by(graphTraversals[0]).by(graphTraversals[1]);
}
}
public class GremlinSqlBinaryOperatorAppenderNotEquals implements GremlinSqlTraversalAppender {
public void appendTraversal(final GraphTraversal<?, ?> graphTraversal, final List<GremlinSqlNode> operands)
throws SQLException {
final String randomString = getRandomString();
final GraphTraversal<?, ?>[] graphTraversals = getTraversalEqualities(operands);
graphTraversal.as(randomString).where(randomString, P.neq(randomString))
.by(graphTraversals[0]).by(graphTraversals[1]);
}
}
public class GremlinSqlBinaryOperatorAppenderGreater implements GremlinSqlTraversalAppender {
public void appendTraversal(final GraphTraversal<?, ?> graphTraversal, final List<GremlinSqlNode> operands)
throws SQLException {
final String randomString = getRandomString();
final GraphTraversal<?, ?>[] graphTraversals = getTraversalEqualities(operands);
graphTraversal.as(randomString).where(randomString, P.gt(randomString))
.by(graphTraversals[0]).by(graphTraversals[1]);
}
}
public class GremlinSqlBinaryOperatorAppenderGreaterEquals implements GremlinSqlTraversalAppender {
public void appendTraversal(final GraphTraversal<?, ?> graphTraversal, final List<GremlinSqlNode> operands)
throws SQLException {
final String randomString = getRandomString();
final GraphTraversal<?, ?>[] graphTraversals = getTraversalEqualities(operands);
graphTraversal.as(randomString).where(randomString, P.gte(randomString))
.by(graphTraversals[0]).by(graphTraversals[1]);
}
}
public class GremlinSqlBinaryOperatorAppenderLess implements GremlinSqlTraversalAppender {
public void appendTraversal(final GraphTraversal<?, ?> graphTraversal, final List<GremlinSqlNode> operands)
throws SQLException {
final String randomString = getRandomString();
final GraphTraversal<?, ?>[] graphTraversals = getTraversalEqualities(operands);
graphTraversal.as(randomString).where(randomString, P.lt(randomString)).by(graphTraversals[0])
.by(graphTraversals[1]);
}
}
public class GremlinSqlBinaryOperatorAppenderLessEquals implements GremlinSqlTraversalAppender {
public void appendTraversal(final GraphTraversal<?, ?> graphTraversal, final List<GremlinSqlNode> operands)
throws SQLException {
final String randomString = getRandomString();
final GraphTraversal<?, ?>[] graphTraversals = getTraversalEqualities(operands);
graphTraversal.as(randomString).where(randomString, P.lte(randomString)).by(graphTraversals[0])
.by(graphTraversals[1]);
}
}
public class GremlinSqlBinaryOperatorAppenderAnd implements GremlinSqlTraversalAppender {
public void appendTraversal(final GraphTraversal<?, ?> graphTraversal, final List<GremlinSqlNode> operands)
throws SQLException {
graphTraversal.and(getEmbeddedLogicOperators(operands));
}
}
public class GremlinSqlBinaryOperatorAppenderOr implements GremlinSqlTraversalAppender {
public void appendTraversal(final GraphTraversal<?, ?> graphTraversal, final List<GremlinSqlNode> operands)
throws SQLException {
graphTraversal.or(getEmbeddedLogicOperators(operands));
}
}
public class GremlinSqlBinaryOperatorAppenderNot implements GremlinSqlTraversalAppender {
public void appendTraversal(final GraphTraversal<?, ?> graphTraversal, final List<GremlinSqlNode> operands)
throws SQLException {
final GraphTraversal<?, ?>[] graphTraversals = getEmbeddedLogicOperators(operands);
// Should never happen.
if (graphTraversals.length != 1) {
throw SqlGremlinError.create(SqlGremlinError.BINARY_AND_PREFIX_OPERAND_COUNT);
}
graphTraversal.not(graphTraversals[0]);
}
}
}
| 7,439 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/util/SqlGremlinError.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.util;
import org.slf4j.Logger;
import java.sql.SQLException;
import java.util.Locale;
import java.util.ResourceBundle;
/**
* Enum representing the possible error messages and lookup facilities for localization.
*/
public enum SqlGremlinError {
IDENTIFIER_INDEX_OUT_OF_BOUNDS,
IDENTIFIER_LIST_EMPTY,
OPERANDS_MORE_THAN_TWO,
OPERANDS_EMPTY,
IDENTIFIER_SIZE_INCORRECT,
ID_BASED_APPEND,
SCHEMA_NOT_SET,
SQL_SELECT_ONLY,
PARSE_ERROR,
EDGE_LABEL_END_MISMATCH,
EDGE_EXPECTED,
TABLE_DOES_NOT_EXIST,
UNKNOWN_NODE,
UNKNOWN_NODE_GETFROM,
UNKNOWN_NODE_ISTABLE,
UNKNOWN_OPERATOR,
TYPE_MISMATCH,
ERROR_TABLE,
UNEXPECTED_OPERAND,
UNEXPECTED_OPERAND_INDEX,
OPERANDS_EXPECTED_TWO_SQL_AS,
FAILED_GET_NAME_ACTUAL,
FAILED_GET_NAME_RENAME,
UNEXPECTED_NODE_GREMLINSQLBASICCALL,
UNEXPECTED_NODE_GREMLINSQLAGGFUNCTION,
COLUMN_RENAME_UNDETERMINED,
COLUMN_ACTUAL_NAME_UNDETERMINED,
FAILED_RENAME_GREMLINSQLAGGOPERATOR,
AGGREGATE_NOT_SUPPORTED,
COLUMN_NOT_FOUND,
NO_ORDER,
ONLY_NOT_PREFIX_SUPPORTED,
BINARY_AND_PREFIX_OPERAND_COUNT,
UNKNOWN_NODE_SELECTLIST,
JOIN_TABLE_COUNT,
INNER_JOIN_ONLY,
JOIN_ON_ONLY,
LEFT_RIGHT_CONDITION_OPERANDS,
LEFT_RIGHT_AS_OPERATOR,
JOIN_EDGELESS_VERTICES,
CANNOT_GROUP_EDGES,
CANNOT_GROUP_TABLE,
CANNOT_GROUP_COLUMN,
JOIN_HAVING_UNSUPPORTED,
JOIN_WHERE_UNSUPPORTED,
SINGLE_SELECT_MULTI_RETURN,
SELECT_NO_LIST,
UNEXPECTED_FROM_FORMAT,
COLUMN_RENAME_LIST_EMPTY,
NO_TRAVERSAL_TABLE,
CANNOT_ORDER_COLUMN_LITERAL,
CANNOT_ORDER_BY,
ORDER_BY_ORDINAL_VALUE,
WHERE_NOT_ONLY_BOOLEAN,
WHERE_UNSUPPORTED_PREFIX,
WHERE_BASIC_LITERALS,
UNEXPECTED_JOIN_NODES,
NO_JOIN_COLUMN,
NOT_LOGICAL_FILTER,
OFFSET_NOT_SUPPORTED,
UNSUPPORTED_LITERAL_EXPRESSION,
CANNOT_JOIN_DIFFERENT_EDGES,
UNSUPPORTED_OPERAND_TYPE,
UNRECOGNIZED_TYPE,
UNSUPPORTED_BASIC_LITERALS;
private static final ResourceBundle RESOURCE;
static {
Locale.setDefault(Locale.ENGLISH);
RESOURCE = ResourceBundle.getBundle("error-messages");
}
/**
* Looks up the resource bundle string corresponding to the key, and formats it with the provided
* arguments.
*
* @param key Resource key for bundle provided to constructor.
* @param formatArgs Any additional arguments to format the resource string with.
* @return resource String, formatted with formatArgs.
*/
public static String getMessage(final SqlGremlinError key, final Object... formatArgs) {
return String.format(RESOURCE.getString(key.name()), formatArgs);
}
/**
* Helper method for creating an appropriate SQLException.
*
* @param key Key for the error message.
* @param logger Logger for logging the error message.
* @param cause The underlying cause for the SQLException.
* @param formatArgs Any additional arguments to format the error message with.
* @return SQLException
*/
public static SQLException create(final SqlGremlinError key, final Logger logger, final Throwable cause,
final boolean isNotSupported, final Object... formatArgs) {
final String message = getMessage(key, formatArgs);
final SQLException exception;
if (cause == null) {
exception = isNotSupported ? new SQLNotSupportedException(message) : new SQLException(message);
if (logger != null) {
logger.error(message);
}
} else {
exception = isNotSupported ? new SQLNotSupportedException(message, cause) : new SQLException(message, cause);
if (logger != null) {
logger.error(message, cause);
}
}
return exception;
}
/**
* Helper method for creating an appropriate SQLException.
*
* @param key Key for the error message.
* @param logger Logger for logging the error message.
* @param formatArgs Any additional arguments to format the error message with.
* @return SQLException
*/
public static SQLException create(final SqlGremlinError key, final Logger logger, final Object... formatArgs) {
return create(key, logger, null, false, formatArgs);
}
/**
* Helper method for creating an appropriate SQLException.
*
* @param key Key for the error message.
* @param cause The underlying cause for the SQLException.
* @param formatArgs Any additional arguments to format the error message with.
* @return SQLException
*/
public static SQLException create(final SqlGremlinError key, final Throwable cause, final Object... formatArgs) {
return create(key, null, cause, false, formatArgs);
}
/**
* Helper method for creating an appropriate SQLException.
*
* @param key Key for the error message.
* @param formatArgs Any additional arguments to format the error message with.
* @return SQLException
*/
public static SQLException create(final SqlGremlinError key, final Object... formatArgs) {
return create(key, null, null, false, formatArgs);
}
/**
* Helper method for creating an appropriate SQLNotSupportedException.
*
* @param key Key for the error message.
* @param logger Logger for logging the error message.
* @param cause The underlying cause for the SQLNotSupportedException.
* @param formatArgs Any additional arguments to format the error message with.
* @return SQLException
*/
public static SQLNotSupportedException createNotSupported(
final SqlGremlinError key,
final Logger logger,
final Throwable cause,
final Object... formatArgs) {
return (SQLNotSupportedException)create(key, logger, cause, true, formatArgs);
}
/**
* Helper method for creating an appropriate SQLNotSupportedException.
*
* @param key Key for the error message.
* @param logger Logger for logging the error message.
* @param formatArgs Any additional arguments to format the error message with.
* @return SQLException
*/
public static SQLNotSupportedException createNotSupported(final SqlGremlinError key, final Logger logger,
final Object... formatArgs) {
return createNotSupported(key, logger, null, formatArgs);
}
/**
* Helper method for creating an appropriate SQLNotSupportedException.
*
* @param key Key for the error message.
* @param cause The underlying cause for the SQLNotSupportedException.
* @param formatArgs Any additional arguments to format the error message with.
* @return SQLException
*/
public static SQLNotSupportedException createNotSupported(final SqlGremlinError key, final Throwable cause,
final Object... formatArgs) {
return createNotSupported(key, null, cause, formatArgs);
}
/**
* Helper method for creating an appropriate SQLNotSupportedException.
*
* @param key Key for the error message.
* @param formatArgs Any additional arguments to format the error message with.
* @return SQLException
*/
public static SQLNotSupportedException createNotSupported(final SqlGremlinError key, final Object... formatArgs) {
return createNotSupported(key, null, null, formatArgs);
}
}
| 7,440 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/util/SQLNotSupportedException.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.util;
import java.sql.SQLException;
/**
* SQLException for failures specifically relating to not supported scenarios.
*/
public class SQLNotSupportedException extends SQLException {
/**
* SQLNotSupportedException constructor.
*
* @param message Message of the exception.
* @param cause Underlying cause for the exception.
*/
public SQLNotSupportedException(final String message, final Throwable cause) {
super(message, cause);
}
/**
* SQLNotSupportedException constructor.
*
* @param message Message of the exception.
*/
public SQLNotSupportedException(final String message) {
super(message);
}
}
| 7,441 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/results/SqlGremlinQueryResult.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.results;
import lombok.Getter;
import software.aws.neptune.gremlin.adapter.converter.SqlMetadata;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
@Getter
public class SqlGremlinQueryResult implements AutoCloseable {
public static final String EMPTY_MESSAGE = "No more results.";
public static final String NULL_VALUE = "$%#NULL#%$";
private final List<String> columns;
private final List<String> columnTypes = new ArrayList<>();
private final BlockingQueue<List<Object>> blockingQueueRows = new LinkedBlockingQueue<>();
private SQLException paginationException = null;
public SqlGremlinQueryResult(final List<String> columns, final SqlMetadata sqlMetadata) throws SQLException {
this.columns = columns;
for (final String column : columns) {
columnTypes.add(sqlMetadata.getType(column));
}
}
public void setPaginationException(final SQLException e) {
paginationException = e;
close();
}
@Override
public void close() {
blockingQueueRows.add(new EmptyResult());
}
public void addResults(final List<List<Object>> rows) {
// This is a workaround for Gremlin null support not being in any version of Gremlin that is
// widely supported by database vendors.
rows.forEach(row -> row.replaceAll(col -> (col instanceof String && col.equals(NULL_VALUE) ? null : col)));
blockingQueueRows.addAll(rows);
}
public List<Object> getResult() throws SQLException {
while (true) {
try {
final List<Object> result = blockingQueueRows.take();
// If a pagination exception occurs, an EmptyResult Object will be inserted into the BlockingQueue.
// The pagination exception needs to be checked before returning.
if (paginationException != null) {
throw paginationException;
}
return result;
} catch (final InterruptedException ignored) {
}
}
}
public static class EmptyResult extends ArrayList<Object> {
}
}
| 7,442 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/results | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/results/pagination/GetRowFromMap.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.results.pagination;
import java.util.Map;
interface GetRowFromMap {
Object[] execute(Map<String, Object> input);
}
| 7,443 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/results | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/results/pagination/Pagination.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.results.pagination;
import lombok.AllArgsConstructor;
import org.apache.tinkerpop.gremlin.process.traversal.translator.GroovyTranslator;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.results.SqlGremlinQueryResult;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
@AllArgsConstructor
public class Pagination implements Runnable {
private static final Logger LOGGER = LoggerFactory.getLogger(Pagination.class);
private static final int DEFAULT_PAGE_SIZE = 1000;
private final int pageSize = DEFAULT_PAGE_SIZE;
private final GetRowFromMap getRowFromMap;
private final GraphTraversal<?, ?> traversal;
private final SqlGremlinQueryResult sqlGremlinQueryResult;
@Override
public void run() {
try {
LOGGER.info("Graph traversal: " +
GroovyTranslator.of("g").translate(traversal.asAdmin().getBytecode()));
while (traversal.hasNext()) {
final List<Object> rows = new ArrayList<>();
traversal.next(pageSize).forEach(map -> {
// Our choose(<predicate>, <expected>, <empty>) returns an empty list.
// If we get that, we just want to skip over it.
if (map instanceof Map) {
rows.add(getRowFromMap.execute((Map<String, Object>) map));
}
});
convertAndInsertResult(sqlGremlinQueryResult, rows);
}
// If we run out of traversal data (or hit our limit), stop and signal to the result that it is done.
sqlGremlinQueryResult.close();
} catch (final Exception e) {
final StringWriter sw = new StringWriter();
final PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
LOGGER.error("Encountered exception", e);
sqlGremlinQueryResult.setPaginationException(new SQLException(e + sw.toString()));
} finally {
closeTraversal();
}
}
void closeTraversal() {
try {
traversal.close();
} catch (final Exception ignored) {
}
}
/**
* converts input row results and insert them into sqlGremlinQueryResult
*/
void convertAndInsertResult(final SqlGremlinQueryResult sqlGremlinQueryResult, final List<Object> rows) {
final List<List<Object>> finalRowResult = new ArrayList<>();
for (final Object row : rows) {
final List<Object> convertedRow = new ArrayList<>();
if (row instanceof Object[]) {
convertedRow.addAll(Arrays.asList((Object[]) row));
} else {
convertedRow.add(row);
}
finalRowResult.add(convertedRow);
}
sqlGremlinQueryResult.addResults(finalRowResult);
}
}
| 7,444 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/results | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/results/pagination/JoinDataReader.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.results.pagination;
import org.apache.calcite.util.Pair;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public class JoinDataReader implements GetRowFromMap {
private final List<Pair<String, String>> tableColumnList = new ArrayList<>();
public JoinDataReader(final Map<String, List<String>> tablesColumns) {
tablesColumns.forEach((key, value) -> value.forEach(column -> tableColumnList.add(new Pair<>(key, column))));
}
@Override
public Object[] execute(final Map<String, Object> map) {
final Object[] row = new Object[tableColumnList.size()];
int i = 0;
for (final Pair<String, String> tableColumn : tableColumnList) {
final Optional<String> tableKey =
map.keySet().stream().filter(key -> key.equalsIgnoreCase(tableColumn.left)).findFirst();
if (!tableKey.isPresent()) {
row[i++] = null;
continue;
}
final Optional<String> columnKey = ((Map<String, Object>) map.get(tableKey.get())).keySet().stream()
.filter(key -> key.equalsIgnoreCase(tableColumn.right)).findFirst();
if (!columnKey.isPresent()) {
row[i++] = null;
continue;
}
row[i++] = ((Map<String, Object>) map.get(tableKey.get())).getOrDefault(columnKey.get(), null);
}
return row;
}
}
| 7,445 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/results | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/adapter/results/pagination/SimpleDataReader.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.adapter.results.pagination;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public class SimpleDataReader implements GetRowFromMap {
private final String label;
private final List<String> columnNames;
public SimpleDataReader(final String label, final List<String> columnNames) {
this.label = label;
this.columnNames = columnNames;
}
@Override
public Object[] execute(final Map<String, Object> map) {
final Object[] row = new Object[columnNames.size()];
int i = 0;
for (final String column : columnNames) {
final Optional<String> tableKey =
map.keySet().stream().filter(key -> key.equalsIgnoreCase(label)).findFirst();
if (!tableKey.isPresent()) {
row[i++] = null;
continue;
}
final Optional<String> columnKey = ((Map<String, Object>) map.get(tableKey.get())).keySet().stream()
.filter(key -> key.equalsIgnoreCase(column)).findFirst();
if (!columnKey.isPresent()) {
row[i++] = null;
continue;
}
row[i++] = ((Map<String, Object>) map.get(tableKey.get())).getOrDefault(columnKey.get(), null);
}
return row;
}
}
| 7,446 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/sql/SqlGremlinQueryExecutor.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.sql;
import lombok.SneakyThrows;
import org.apache.tinkerpop.gremlin.driver.remote.DriverRemoteConnection;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.SqlConverter;
import software.aws.neptune.gremlin.adapter.results.SqlGremlinQueryResult;
import software.aws.neptune.common.gremlindatamodel.MetadataCache;
import software.aws.neptune.gremlin.GremlinConnectionProperties;
import software.aws.neptune.gremlin.GremlinQueryExecutor;
import software.aws.neptune.gremlin.resultset.GremlinResultSetGetColumns;
import software.aws.neptune.gremlin.resultset.GremlinResultSetGetTables;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import java.lang.reflect.Constructor;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import static org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource.traversal;
/**
* Implementation of QueryExecutor for SQL via Gremlin.
*/
public class SqlGremlinQueryExecutor extends GremlinQueryExecutor {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlGremlinQueryExecutor.class);
private static final Object TRAVERSAL_LOCK = new Object();
private SqlConverter gremlinSqlConverter = null;
private static GraphTraversalSource graphTraversalSource = null;
private final GremlinConnectionProperties gremlinConnectionProperties;
/**
* Constructor for SqlGremlinQueryExecutor.
*
* @param gremlinConnectionProperties GremlinConnectionProperties for connection.
*/
public SqlGremlinQueryExecutor(final GremlinConnectionProperties gremlinConnectionProperties) throws SQLException {
super(gremlinConnectionProperties);
this.gremlinConnectionProperties = gremlinConnectionProperties;
}
/**
* Function to release the SqlGremlinQueryExecutor resources.
*/
public static void close() {
try {
synchronized (TRAVERSAL_LOCK) {
if (graphTraversalSource != null) {
graphTraversalSource.close();
}
graphTraversalSource = null;
}
} catch (final Exception e) {
LOGGER.warn("Failed to close traversal source", e);
}
GremlinQueryExecutor.close();
}
private GraphTraversalSource getGraphTraversalSource(
final GremlinConnectionProperties gremlinConnectionProperties)
throws SQLException {
synchronized (TRAVERSAL_LOCK) {
if (graphTraversalSource == null) {
graphTraversalSource =
traversal().withRemote(DriverRemoteConnection.using(getClient(gremlinConnectionProperties)));
}
}
return graphTraversalSource;
}
private SqlConverter getGremlinSqlConverter(final GremlinConnectionProperties gremlinConnectionProperties)
throws SQLException {
MetadataCache.updateCacheIfNotUpdated(gremlinConnectionProperties);
if (gremlinSqlConverter == null) {
gremlinSqlConverter = new SqlConverter(MetadataCache.getGremlinSchema(
gremlinConnectionProperties.getContactPoint()));
}
return gremlinSqlConverter;
}
/**
* Function to get table types.
*
* @param statement java.sql.Statement Object required for result set.
* @param nodes String containing nodes to get schema for.
* @return java.sql.ResultSet Object containing columns.
*/
@Override
public java.sql.ResultSet executeGetColumns(final java.sql.Statement statement, final String nodes)
throws SQLException {
LOGGER.info("Running executeGetColumns.");
final String endpoint = this.gremlinConnectionProperties.getContactPoint();
if (!MetadataCache.isMetadataCached(endpoint)) {
if (!statement.getConnection().isValid(3000)) {
throw new SQLException("Failed to execute getTables, could not connect to database.");
}
}
MetadataCache.updateCacheIfNotUpdated(gremlinConnectionProperties);
return new GremlinResultSetGetColumns(statement, MetadataCache.getFilteredCacheNodeColumnInfos(nodes, endpoint),
MetadataCache.getFilteredResultSetInfoWithoutRowsForColumns(nodes, endpoint));
}
/**
* Function to get tables.
*
* @param statement java.sql.Statement Object required for result set.
* @param tableName String table name with colon delimits.
* @return java.sql.ResultSet object returned from query execution.
* @throws SQLException if query execution fails, or it was cancelled.
*/
@Override
public java.sql.ResultSet executeGetTables(final java.sql.Statement statement, final String tableName)
throws SQLException {
LOGGER.info("Running executeGetTables.");
final String endpoint = this.gremlinConnectionProperties.getContactPoint();
if (!MetadataCache.isMetadataCached(endpoint)) {
if (!statement.getConnection().isValid(3000)) {
throw new SQLException("Failed to execute getTables, could not connect to database.");
}
}
MetadataCache.updateCacheIfNotUpdated(gremlinConnectionProperties);
return new GremlinResultSetGetTables(statement,
MetadataCache.getFilteredCacheNodeColumnInfos(tableName, endpoint),
MetadataCache.getFilteredResultSetInfoWithoutRowsForTables(tableName, endpoint));
}
/**
* Function to execute query.
*
* @param sql Query to execute.
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet object returned from query execution.
* @throws SQLException if query execution fails, or it was cancelled.
*/
@Override
public ResultSet executeQuery(final String sql, final Statement statement) throws SQLException {
final Constructor<?> constructor;
try {
constructor = SqlGremlinResultSet.class
.getConstructor(java.sql.Statement.class, SqlGremlinQueryResult.class);
} catch (final NoSuchMethodException e) {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.QUERY_FAILED, e);
}
return runCancellableQuery(constructor, statement, sql);
}
@SneakyThrows
@Override
@SuppressWarnings("unchecked")
protected <T> T runQuery(final String query) {
return (T) getGremlinSqlConverter(gremlinConnectionProperties).executeQuery(getGraphTraversalSource(gremlinConnectionProperties), query);
}
// TODO AN-540: Look into query cancellation.
@Override
protected void performCancel() {
}
}
| 7,447 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/sql/SqlGremlinConnection.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.sql;
import lombok.NonNull;
import software.aws.neptune.NeptuneDatabaseMetadata;
import software.aws.neptune.gremlin.GremlinConnection;
import software.aws.neptune.jdbc.utilities.ConnectionProperties;
import software.aws.neptune.jdbc.utilities.QueryExecutor;
import java.sql.DatabaseMetaData;
import java.sql.SQLException;
public class SqlGremlinConnection extends GremlinConnection {
/**
* Gremlin constructor, initializes super class.
*
* @param connectionProperties ConnectionProperties Object.
*/
public SqlGremlinConnection(
final @NonNull ConnectionProperties connectionProperties)
throws SQLException {
super(connectionProperties);
}
@Override
public void doClose() {
SqlGremlinQueryExecutor.close();
}
@Override
public DatabaseMetaData getMetaData() {
return new NeptuneDatabaseMetadata(this);
}
@Override
public QueryExecutor getQueryExecutor() throws SQLException {
return new SqlGremlinQueryExecutor(getGremlinConnectionProperties());
}
}
| 7,448 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/gremlin/sql/SqlGremlinResultSet.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.gremlin.sql;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.results.SqlGremlinQueryResult;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetColumns;
import software.aws.neptune.gremlin.GremlinTypeMapping;
import software.aws.neptune.gremlin.resultset.GremlinResultSetMetadata;
import software.aws.neptune.jdbc.ResultSet;
import software.aws.neptune.jdbc.utilities.SqlError;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public class SqlGremlinResultSet extends ResultSet implements java.sql.ResultSet {
private static final Logger LOGGER = LoggerFactory.getLogger(SqlGremlinResultSet.class);
private final List<String> columns;
private final List<String> columnTypes;
private final GremlinResultSetMetadata gremlinResultSetMetadata;
private final SqlGremlinQueryResult sqlQueryResult;
// A single row that's assigned when we use getResult() in next().
private List<Object> row;
private boolean wasNull = false;
/**
* GremlinResultSet constructor, initializes super class.
*
* @param statement Statement Object.
* @param queryResult SqlGremlinQueryResult Object.
*/
public SqlGremlinResultSet(final java.sql.Statement statement,
final SqlGremlinQueryResult queryResult) {
// 1 for row count as placeholder.
super(statement, queryResult.getColumns(), 1);
this.columns = queryResult.getColumns();
// Null until we get result by calling next.
this.row = null;
this.columnTypes = queryResult.getColumnTypes();
this.sqlQueryResult = queryResult;
final List<Class<?>> rowTypes = new ArrayList<>();
for (final String columnType : columnTypes) {
final Optional<? extends Class<?>> javaClassOptional =
ResultSetGetColumns.GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP.
entrySet().stream().
filter(d -> d.getKey().equalsIgnoreCase(columnType)).
map(Map.Entry::getValue).
findFirst();
rowTypes.add(javaClassOptional.isPresent() ? javaClassOptional.get() : String.class);
}
gremlinResultSetMetadata = new GremlinResultSetMetadata(columns, rowTypes);
}
@Override
protected void doClose() throws SQLException {
}
@Override
public boolean next() throws SQLException {
final Object res;
res = sqlQueryResult.getResult();
if (res instanceof SqlGremlinQueryResult.EmptyResult) {
return false;
}
this.row = (List<Object>) res;
return true;
}
@Override
public boolean isLast() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public boolean isAfterLast() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public boolean absolute(final int row) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
// TODO use fetch size for page size?
protected int getDriverFetchSize() throws SQLException {
return 0;
}
@Override
// TODO use fetch size for page size?
protected void setDriverFetchSize(final int rows) {
}
@Override
public boolean wasNull() throws SQLException {
return wasNull;
}
@Override
protected ResultSetMetaData getResultMetadata() throws SQLException {
return gremlinResultSetMetadata;
}
protected Object getConvertedValue(final int columnIndex) throws SQLException {
final Object value = getValue(columnIndex);
return (value == null) || GremlinTypeMapping.checkContains(value.getClass())
? value
: value.toString();
}
private Object getValue(final int columnIndex) throws SQLException {
verifyOpen();
// Grab value in row using column index (note: 1 based indexing of JDBC hence -1).
final Object value = row.get(columnIndex - 1);
wasNull = (value == null);
return value;
}
@Override
public Object getObject(final int columnIndex, final Map<String, Class<?>> map) throws SQLException {
LOGGER.trace("Getting column {} as an Object using provided Map.", columnIndex);
final Object value = getValue(columnIndex);
return getObject(columnIndex, map.get(GremlinTypeMapping.getJDBCType(value.getClass()).name()));
}
}
| 7,449 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/SparqlConnection.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql;
import lombok.Getter;
import lombok.NonNull;
import software.aws.neptune.NeptuneDatabaseMetadata;
import software.aws.neptune.jdbc.Connection;
import software.aws.neptune.jdbc.utilities.ConnectionProperties;
import software.aws.neptune.jdbc.utilities.QueryExecutor;
import java.sql.DatabaseMetaData;
import java.sql.SQLException;
public class SparqlConnection extends Connection implements java.sql.Connection {
@Getter
private final SparqlConnectionProperties sparqlConnectionProperties;
/**
* Sparql constructor, initializes super class.
*
* @param connectionProperties ConnectionProperties Object.
*/
public SparqlConnection(@NonNull final ConnectionProperties connectionProperties) throws SQLException {
super(connectionProperties);
this.sparqlConnectionProperties = new SparqlConnectionProperties(getConnectionProperties());
}
@Override
protected void doClose() {
SparqlQueryExecutor.close();
}
@Override
public DatabaseMetaData getMetaData() throws SQLException {
return new NeptuneDatabaseMetadata(this);
}
@Override
public QueryExecutor getQueryExecutor() throws SQLException {
return new SparqlQueryExecutor(getSparqlConnectionProperties());
}
}
| 7,450 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/SparqlPooledConnection.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql;
import software.aws.neptune.jdbc.PooledConnection;
import java.sql.SQLException;
/**
* Sparql implementation of PooledConnection.
*/
public class SparqlPooledConnection extends PooledConnection implements javax.sql.PooledConnection {
/**
* SparqlPooledConnection constructor, initializes super class.
*
* @param connection Connection Object.
*/
public SparqlPooledConnection(final java.sql.Connection connection) {
super(connection);
}
@Override
public java.sql.Connection getConnection() throws SQLException {
return new SparqlConnection(new SparqlConnectionProperties());
}
}
| 7,451 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/SparqlConnectionProperties.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql;
import com.google.common.collect.ImmutableSet;
import lombok.NonNull;
import org.apache.http.client.HttpClient;
import org.apache.http.protocol.HttpContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.jdbc.utilities.AuthScheme;
import software.aws.neptune.jdbc.utilities.ConnectionProperties;
import java.net.URI;
import java.net.URISyntaxException;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
public class SparqlConnectionProperties extends ConnectionProperties {
// currently this requires the full url with "http://" or "https://"
// e.g. enter "https://your-neptune-endpoint"
public static final String ENDPOINT_KEY = "endpointURL";
public static final String PORT_KEY = "port";
// Dataset path which is optional depending on server (e.g. Neptune vs Fuseki)
public static final String DATASET_KEY = "dataset";
public static final String DESTINATION_KEY = "destination";
// The query endpoints for sparql database
// as a read-only driver we only support the query endpoint
public static final String QUERY_ENDPOINT_KEY = "queryEndpoint";
// RDFConnection builder has default header: "application/sparql-results+json, application/sparql-results+xml;q=0.9,
// text/tab-separated-values;q=0.7, text/csv;q=0.5, application/json;q=0.2, application/xml;q=0.2, */*;q=0.1"
public static final String ACCEPT_HEADER_QUERY_KEY = "acceptHeaderQuery";
public static final String ACCEPT_HEADER_ASK_QUERY_KEY = "acceptHeaderAskQuery";
public static final String ACCEPT_HEADER_SELECT_QUERY_KEY = "acceptHeaderSelectQuery";
public static final String PARSE_CHECK_SPARQL_KEY = "parseCheckSparql";
public static final String ACCEPT_HEADER_DATASET_KEY = "acceptHeaderDataset";
public static final String HTTP_CLIENT_KEY = "httpClient";
public static final String HTTP_CONTEXT_KEY = "httpContext";
public static final int DEFAULT_PORT = 8182; // Neptune default port
// Because RDFConnection builder does not include all the Neptune supported media-types in its default header, we
// are adding them into DEFAULT_PROPERTIES_MAP. These also include the media-types supported by Jena
// QueryExecution, the query engine we use.
public static final String NEPTUNE_ACCEPTED_HEADERS =
"application/rdf+xml, application/n-triples, text/turtle, text/plain, application/n-quads, " +
"text/x-nquads, text/turtle, application/trig, text/n3, application/ld+json, application/trix, " +
"application/x-binary-rdf, application/sparql-results+json, application/sparql-results+xml;q=0.9, " +
"text/tab-separated-values;q=0.7, text/csv;q=0.5, application/json;q=0.2, application/xml;q=0.2, " +
"*/*;q=0.1";
public static final Map<String, Object> DEFAULT_PROPERTIES_MAP = new HashMap<>();
private static final Map<String, ConnectionProperties.PropertyConverter<?>> PROPERTY_CONVERTER_MAP =
new HashMap<>();
private static final Logger LOGGER = LoggerFactory.getLogger(SparqlConnectionProperties.class);
private static final Set<String> SUPPORTED_PROPERTIES_SET = ImmutableSet.<String>builder()
.add(ENDPOINT_KEY)
.add(PORT_KEY)
.add(DATASET_KEY)
.add(DESTINATION_KEY)
.add(QUERY_ENDPOINT_KEY)
.add(ACCEPT_HEADER_ASK_QUERY_KEY)
.add(ACCEPT_HEADER_DATASET_KEY)
.add(ACCEPT_HEADER_QUERY_KEY)
.add(ACCEPT_HEADER_SELECT_QUERY_KEY)
.add(PARSE_CHECK_SPARQL_KEY)
.add(HTTP_CLIENT_KEY)
.add(HTTP_CONTEXT_KEY)
.build();
// property converter parses on the in-coming connection string
static {
PROPERTY_CONVERTER_MAP.put(ENDPOINT_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(PORT_KEY, ConnectionProperties::toUnsigned);
PROPERTY_CONVERTER_MAP.put(DATASET_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(DESTINATION_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(QUERY_ENDPOINT_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(PARSE_CHECK_SPARQL_KEY, ConnectionProperties::toBoolean);
PROPERTY_CONVERTER_MAP.put(ACCEPT_HEADER_ASK_QUERY_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(ACCEPT_HEADER_DATASET_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(ACCEPT_HEADER_QUERY_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(ACCEPT_HEADER_SELECT_QUERY_KEY, (key, value) -> value);
}
static {
DEFAULT_PROPERTIES_MAP.put(PORT_KEY, DEFAULT_PORT);
DEFAULT_PROPERTIES_MAP.put(ENDPOINT_KEY, "");
DEFAULT_PROPERTIES_MAP.put(DATASET_KEY, "");
DEFAULT_PROPERTIES_MAP.put(QUERY_ENDPOINT_KEY, "");
DEFAULT_PROPERTIES_MAP.put(DESTINATION_KEY, "");
DEFAULT_PROPERTIES_MAP.put(ACCEPT_HEADER_QUERY_KEY, NEPTUNE_ACCEPTED_HEADERS);
}
/**
* SparqlConnectionProperties constructor.
*/
public SparqlConnectionProperties() throws SQLException {
super(new Properties(), DEFAULT_PROPERTIES_MAP, PROPERTY_CONVERTER_MAP);
}
/**
* SparqlConnectionProperties constructor.
*
* @param properties Properties to examine and extract key details from.
*/
public SparqlConnectionProperties(final Properties properties) throws SQLException {
super(properties, DEFAULT_PROPERTIES_MAP, PROPERTY_CONVERTER_MAP);
}
protected static AuthScheme toAuthScheme(@NonNull final String key, @NonNull final String value)
throws SQLException {
if (isWhitespace(value)) {
return DEFAULT_AUTH_SCHEME;
}
if (AuthScheme.fromString(value) == null) {
throw invalidConnectionPropertyError(key, value);
}
return AuthScheme.fromString(value);
}
@Override
public String getHostname() throws SQLException {
try {
return (new URI(getEndpoint())).getHost();
} catch (final URISyntaxException e) {
throw new SQLException(e);
}
}
protected boolean isEncryptionEnabled() {
// Neptune only supports https when using SPARQL.
return true;
}
@Override
public void sshTunnelOverride(final int port) throws SQLException {
setPort(port);
}
/**
* Gets the connection endpoint.
*
* @return The connection endpoint.
*/
public String getEndpoint() {
return getProperty(ENDPOINT_KEY);
}
/**
* Sets the connection endpoint.
*
* @param endpoint The connection endpoint.
* @throws SQLException if value is invalid.
*/
public void setEndpoint(@NonNull final String endpoint) throws SQLException {
setProperty(ENDPOINT_KEY,
(String) PROPERTY_CONVERTER_MAP.get(ENDPOINT_KEY).convert(ENDPOINT_KEY, endpoint));
}
/**
* Gets the port that the Sparql Servers will be listening on.
*
* @return The port.
*/
@Override
public int getPort() {
return (int) get(PORT_KEY);
}
/**
* Sets the port that the Sparql Servers will be listening on.
*
* @param port The port.
*/
public void setPort(final int port) throws SQLException {
if (port < 0) {
throw invalidConnectionPropertyError(PORT_KEY, port);
}
put(PORT_KEY, port);
}
/**
* Gets the dataset path for the connection string.
*
* @return The dataset path for the connection string.
*/
public String getDataset() {
return getProperty(DATASET_KEY);
}
/**
* Sets the dataset path for the connection string.
*
* @param dataset The dataset path for the connection string.
* @throws SQLException if value is invalid.
*/
public void setDataset(@NonNull final String dataset) throws SQLException {
setProperty(DATASET_KEY,
(String) PROPERTY_CONVERTER_MAP.get(DATASET_KEY).convert(DATASET_KEY, dataset));
}
/**
* Gets the RDF connection destination.
*
* @return The RDF connection destination.
*/
public String getDestination() {
return getProperty(DESTINATION_KEY);
}
/**
* Sets the RDF connection destination.
*
* @param destination The RDF connection destination.
* @throws SQLException if value is invalid.
*/
public void setDestination(@NonNull final String destination) throws SQLException {
put(DESTINATION_KEY, destination);
}
/**
* Gets the query endpoint.
*
* @return The query endpoint for sparql query.
*/
public String getQueryEndpoint() {
return getProperty(QUERY_ENDPOINT_KEY);
}
/**
* Sets the query endpoint.
*
* @param queryEndpoint The query endpoint.
* @throws SQLException if value is invalid.
*/
public void setQueryEndpoint(@NonNull final String queryEndpoint) throws SQLException {
setProperty(QUERY_ENDPOINT_KEY,
(String) PROPERTY_CONVERTER_MAP.get(QUERY_ENDPOINT_KEY).convert(QUERY_ENDPOINT_KEY, queryEndpoint));
}
/**
* Gets the HTTP accept:header used when making a SPARQL Protocol ASK query.
*
* @return The HTTP accept:header.
*/
public String getAcceptHeaderAskQuery() {
return getProperty(ACCEPT_HEADER_ASK_QUERY_KEY);
}
/**
* Sets the HTTP accept:header used when making a SPARQL Protocol ASK query.
*
* @param acceptHeaderAskQuery The HTTP endpoint.
* @throws SQLException if value is invalid.
*/
public void setAcceptHeaderAskQuery(@NonNull final String acceptHeaderAskQuery) throws SQLException {
setProperty(ACCEPT_HEADER_ASK_QUERY_KEY,
(String) PROPERTY_CONVERTER_MAP.get(ACCEPT_HEADER_ASK_QUERY_KEY)
.convert(ACCEPT_HEADER_ASK_QUERY_KEY,
acceptHeaderAskQuery));
}
/**
* Gets the HTTP accept:header used to fetch RDF dataset using HTTP GET.
*
* @return The HTTP accept:header.
*/
public String getAcceptHeaderDataset() {
return getProperty(ACCEPT_HEADER_DATASET_KEY);
}
/**
* Sets the HTTP accept:header used to fetch RDF dataset using HTTP GET.
*
* @param acceptHeaderDataset The HTTP endpoint.
* @throws SQLException if value is invalid.
*/
public void setAcceptHeaderDataset(@NonNull final String acceptHeaderDataset) throws SQLException {
setProperty(ACCEPT_HEADER_DATASET_KEY,
(String) PROPERTY_CONVERTER_MAP.get(ACCEPT_HEADER_DATASET_KEY).convert(ACCEPT_HEADER_DATASET_KEY,
acceptHeaderDataset));
}
/**
* Gets the HTTP accept:header used when making SPARQL Protocol query if no query specific setting is available.
*
* @return The HTTP accept:header.
*/
public String getAcceptHeaderQuery() {
return getProperty(ACCEPT_HEADER_QUERY_KEY);
}
/**
* Sets the HTTP accept:header used when making SPARQL Protocol query if no query specific setting is available.
*
* @param acceptHeaderQuery The HTTP endpoint.
* @throws SQLException if value is invalid.
*/
public void setAcceptHeaderQuery(@NonNull final String acceptHeaderQuery) throws SQLException {
setProperty(ACCEPT_HEADER_QUERY_KEY,
(String) PROPERTY_CONVERTER_MAP.get(ACCEPT_HEADER_QUERY_KEY).convert(ACCEPT_HEADER_QUERY_KEY,
acceptHeaderQuery));
}
/**
* Gets the HTTP accept:header used when making SPARQL Protocol SELECT query if no query specific setting is available.
*
* @return The HTTP accept:header.
*/
public String getAcceptHeaderSelectQuery() {
return getProperty(ACCEPT_HEADER_SELECT_QUERY_KEY);
}
/**
* Sets the HTTP accept:header used when making SPARQL Protocol SELECT query if no query specific setting is available.
*
* @param acceptHeaderSelectQuery The HTTP accept:header.
* @throws SQLException if value is invalid.
*/
public void setAcceptHeaderSelectQuery(@NonNull final String acceptHeaderSelectQuery) throws SQLException {
setProperty(ACCEPT_HEADER_SELECT_QUERY_KEY,
(String) PROPERTY_CONVERTER_MAP.get(ACCEPT_HEADER_SELECT_QUERY_KEY)
.convert(ACCEPT_HEADER_SELECT_QUERY_KEY,
acceptHeaderSelectQuery));
}
/**
* Gets the HttpClient for the connection to be built.
*
* @return The HttpClient
*/
public HttpClient getHttpClient() {
return (HttpClient) get(HTTP_CLIENT_KEY);
}
/**
* Sets the HttpClient for the connection to be built.
*
* @param httpClient The HTTP client.
* @throws SQLException if value is invalid.
*/
public void setHttpClient(@NonNull final HttpClient httpClient) throws SQLException {
put(HTTP_CLIENT_KEY, httpClient);
}
/**
* Gets the HttpContext for the connection to be built.
*
* @return The HttpContext.
*/
public HttpContext getHttpContext() {
return (HttpContext) get(HTTP_CONTEXT_KEY);
}
/**
* Sets the HttpContext for the connection to be built.
*
* @param httpContext The HTTP context.
* @throws SQLException if value is invalid.
*/
public void setHttpContext(@NonNull final HttpContext httpContext) throws SQLException {
put(HTTP_CONTEXT_KEY, httpContext);
}
/**
* Gets the flag for whether to check SPARQL queries are provided as a string
*
* @return The HTTP accept:header.
*/
public boolean getParseCheckSparql() {
return (boolean) get(PARSE_CHECK_SPARQL_KEY);
}
/**
* Sets the flag for whether to check SPARQL queries are provided as a string
*
* @param parseCheckSparql The flag.
* @throws SQLException if value is invalid.
*/
public void setParseCheckSparql(final boolean parseCheckSparql) throws SQLException {
put(PARSE_CHECK_SPARQL_KEY, parseCheckSparql);
}
/**
* Validate the supported properties.
*/
@Override
protected void validateProperties() throws SQLException {
if (AuthScheme.IAMSigV4.equals(getAuthScheme())) {
// If IAMSigV4 is specified, we need the region provided to us.
validateServiceRegionEnvVariable();
// Throw exception if both IAM AUTH and HTTP_CLIENT_KEY are given
if (getHttpClient() != null) {
throw invalidConnectionPropertyValueError(AUTH_SCHEME_KEY, "IAMSigV4 does not support custom" +
"HttpClient input. Set AuthScheme to None to pass in custom HttpClient.");
}
}
if ("".equals(getEndpoint()) || getPort() < 0) {
throw missingConnectionPropertyError(
String.format("The '%s' and '%s' fields must be provided", ENDPOINT_KEY, PORT_KEY));
}
String destination = String.format("%s:%d", getEndpoint(), getPort());
if (!"".equals(getDataset())) {
destination = String.format("%s/%s", destination, getDataset());
}
setDestination(destination);
}
/**
* Check if the property is supported by the driver.
*
* @param name The name of the property.
* @return {@code true} if property is supported; {@code false} otherwise.
*/
@Override
public boolean isSupportedProperty(final String name) {
return SUPPORTED_PROPERTIES_SET.contains(name);
}
}
| 7,452 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/SparqlDataSource.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql;
import lombok.NonNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.NeptuneDriver;
import software.aws.neptune.jdbc.DataSource;
import software.aws.neptune.jdbc.utilities.AuthScheme;
import software.aws.neptune.jdbc.utilities.SqlError;
import javax.sql.PooledConnection;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
public class SparqlDataSource extends DataSource
implements javax.sql.DataSource, javax.sql.ConnectionPoolDataSource {
public static final String SPARQL_PREFIX = NeptuneDriver.CONN_STRING_PREFIX + "sparql://";
private static final Logger LOGGER = LoggerFactory.getLogger(SparqlDataSource.class);
private final SparqlConnectionProperties connectionProperties;
/**
* SparqlDataSource constructor, initializes super class.
*/
SparqlDataSource() throws SQLException {
super();
this.connectionProperties = new SparqlConnectionProperties();
}
@Override
public Connection getConnection() throws SQLException {
return DriverManager.getConnection(SPARQL_PREFIX, connectionProperties);
}
@Override
public Connection getConnection(final String username, final String password) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public PooledConnection getPooledConnection() throws SQLException {
return new SparqlPooledConnection(getConnection());
}
@Override
public PooledConnection getPooledConnection(final String user, final String password) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
/**
* Sets the timeout for opening a connection.
*
* @return the connection timeout in seconds.
*/
@Override
public int getLoginTimeout() throws SQLException {
return connectionProperties.getConnectionTimeoutMillis();
}
/**
* Sets the timeout for opening a connection.
*
* @param seconds The connection timeout in seconds.
* @throws SQLException if timeout is negative.
*/
@Override
public void setLoginTimeout(final int seconds) throws SQLException {
connectionProperties.setConnectionTimeoutMillis(seconds);
}
/**
* Gets the application name.
*
* @return The application name.
*/
public String getApplicationName() {
return connectionProperties.getApplicationName();
}
/**
* Sets the application name.
*
* @param applicationName The application name.
* @throws SQLException if value is invalid.
*/
public void setApplicationName(final String applicationName) throws SQLException {
connectionProperties.setApplicationName(applicationName);
}
/**
* Gets the connection endpoint.
*
* @return The connection endpoint.
*/
public String getEndpoint() {
return connectionProperties.getEndpoint();
}
/**
* Sets the connection endpoint.
*
* @param endpoint The connection endpoint.
* @throws SQLException if value is invalid.
*/
public void setEndpoint(final String endpoint) throws SQLException {
connectionProperties.setEndpoint(endpoint);
}
/**
* Gets the RDF connection destination.
*
* @return The RDF connection destination.
*/
public String getDestination() {
return connectionProperties.getDestination();
}
/**
* Sets the RDF connection destination.
*
* @param destination The RDF connection destination.
* @throws SQLException if value is invalid.
*/
public void setDestination(@NonNull final String destination) throws SQLException {
connectionProperties.setDestination(destination);
}
/**
* Gets the query endpoint.
*
* @return The query endpoint for sparql query.
*/
public String getQueryEndpoint() {
return connectionProperties.getQueryEndpoint();
}
/**
* Sets the query endpoint.
*
* @param queryEndpoint The query endpoint.
* @throws SQLException if value is invalid.
*/
public void setQueryEndpoint(@NonNull final String queryEndpoint) throws SQLException {
connectionProperties.setQueryEndpoint(queryEndpoint);
}
/**
* Gets the port.
*
* @return The port.
*/
public int getPort() {
return connectionProperties.getPort();
}
/**
* Sets the port.
*
* @param port The port.
* @throws SQLException if value is invalid.
*/
public void setPort(final int port) throws SQLException {
connectionProperties.setPort(port);
}
/**
* Gets the dataset.
*
* @return The dataset.
*/
public String getDataset() {
return connectionProperties.getDataset();
}
/**
* Sets the dataset.
*
* @param dataset The dataset.
* @throws SQLException if value is invalid.
*/
public void setDataset(final String dataset) throws SQLException {
connectionProperties.setDataset(dataset);
}
/**
* Gets the connection timeout in milliseconds.
*
* @return The connection timeout in milliseconds.
*/
public int getConnectionTimeoutMillis() {
return connectionProperties.getConnectionTimeoutMillis();
}
/**
* Sets the connection timeout in milliseconds.
*
* @param timeoutMillis The connection timeout in milliseconds.
* @throws SQLException if value is invalid.
*/
public void setConnectionTimeoutMillis(final int timeoutMillis) throws SQLException {
connectionProperties.setConnectionTimeoutMillis(timeoutMillis);
}
/**
* Gets the connection retry count.
*
* @return The connection retry count.
*/
public int getConnectionRetryCount() {
return connectionProperties.getConnectionRetryCount();
}
/**
* Sets the connection retry count.
*
* @param retryCount The connection retry count.
* @throws SQLException if value is invalid.
*/
public void setConnectionRetryCount(final int retryCount) throws SQLException {
connectionProperties.setConnectionRetryCount(retryCount);
}
/**
* Gets the authentication scheme.
*
* @return The authentication scheme.
*/
public AuthScheme getAuthScheme() {
return connectionProperties.getAuthScheme();
}
/**
* Sets the authentication scheme.
*
* @param authScheme The authentication scheme.
* @throws SQLException if value is invalid.
*/
public void setAuthScheme(final AuthScheme authScheme) throws SQLException {
connectionProperties.setAuthScheme(authScheme);
}
}
| 7,453 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/SparqlTypeMapping.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql;
import org.apache.jena.datatypes.xsd.XSDDatatype;
import org.apache.jena.datatypes.xsd.XSDDateTime;
import org.apache.jena.graph.impl.LiteralLabel;
import org.apache.jena.rdf.model.Literal;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.jdbc.utilities.JdbcType;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import java.sql.Date;
import java.sql.SQLException;
import java.sql.Time;
import java.sql.Timestamp;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.Map;
public class SparqlTypeMapping {
public static final Map<Class<?>, JdbcType> SPARQL_JAVA_TO_JDBC_TYPE_MAP = new HashMap<>();
public static final Map<XSDDatatype, Class<?>> SPARQL_LITERAL_TO_JAVA_TYPE_MAP = new HashMap<>();
public static final Map<XSDDatatype, JdbcType> SPARQL_LITERAL_TO_JDBC_TYPE_MAP = new HashMap<>();
public static final Map<XSDDatatype, SparqlTypeMapping.Converter<?>> SPARQL_LITERAL_TO_JAVA_TRANSFORM_MAP =
new HashMap<>();
public static final Converter<Timestamp> DATE_TIME_CONVERTER = new DateTimeConverter();
public static final Converter<ZonedDateTime> DATE_TIME_STAMP_CONVERTER = new DateTimeStampConverter();
private static final Logger LOGGER = LoggerFactory.getLogger(SparqlTypeMapping.class);
static {
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDdecimal, java.math.BigDecimal.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDinteger, java.math.BigInteger.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDnonPositiveInteger, java.math.BigInteger.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDnonNegativeInteger, java.math.BigInteger.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDpositiveInteger, java.math.BigInteger.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDnegativeInteger, java.math.BigInteger.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDbyte, Byte.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDunsignedByte, Integer.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDdouble, Double.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDfloat, Float.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDlong, Long.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDunsignedShort, Integer.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDunsignedInt, java.math.BigInteger.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDunsignedLong, java.math.BigInteger.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDint, Integer.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDshort, Short.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDboolean, Boolean.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDdate, java.sql.Date.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDtime, java.sql.Time.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDdateTime, java.sql.Timestamp.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDdateTimeStamp, java.sql.Timestamp.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDduration, String.class);
SPARQL_LITERAL_TO_JAVA_TYPE_MAP.put(XSDDatatype.XSDstring, String.class);
SPARQL_LITERAL_TO_JAVA_TRANSFORM_MAP
.put(XSDDatatype.XSDtime, (Object value) -> Time.valueOf(getStringValueBasedOnType(value)));
SPARQL_LITERAL_TO_JAVA_TRANSFORM_MAP
.put(XSDDatatype.XSDdate, (Object value) -> Date.valueOf(getStringValueBasedOnType(value)));
SPARQL_LITERAL_TO_JAVA_TRANSFORM_MAP.put(XSDDatatype.XSDdateTime, DATE_TIME_CONVERTER);
SPARQL_LITERAL_TO_JAVA_TRANSFORM_MAP.put(XSDDatatype.XSDdateTimeStamp, DATE_TIME_STAMP_CONVERTER);
SPARQL_LITERAL_TO_JAVA_TRANSFORM_MAP.put(XSDDatatype.XSDduration, SparqlTypeMapping::getStringValueBasedOnType);
// NOTE: Gregorian date types are not supported currently due to incompatibility with java and JDBC datatype,
// currently returning as String
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDdecimal, JdbcType.DECIMAL);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDinteger, JdbcType.BIGINT);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDnonPositiveInteger, JdbcType.BIGINT);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDnonNegativeInteger, JdbcType.BIGINT);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDpositiveInteger, JdbcType.BIGINT);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDnegativeInteger, JdbcType.BIGINT);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDbyte, JdbcType.TINYINT);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDunsignedByte, JdbcType.INTEGER);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDdouble, JdbcType.DOUBLE);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDfloat, JdbcType.REAL);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDlong, JdbcType.BIGINT);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDunsignedShort, JdbcType.INTEGER);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDunsignedInt, JdbcType.BIGINT);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDunsignedLong, JdbcType.BIGINT);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDint, JdbcType.INTEGER);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDshort, JdbcType.SMALLINT);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDboolean, JdbcType.BIT);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDdate, JdbcType.DATE);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDtime, JdbcType.TIME);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDdateTime, JdbcType.TIMESTAMP);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDdateTimeStamp, JdbcType.TIMESTAMP);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDduration, JdbcType.VARCHAR);
SPARQL_LITERAL_TO_JDBC_TYPE_MAP.put(XSDDatatype.XSDstring, JdbcType.VARCHAR);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(String.class, JdbcType.VARCHAR);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(Boolean.class, JdbcType.BIT);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(byte[].class, JdbcType.VARCHAR);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(Byte.class, JdbcType.TINYINT);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(Short.class, JdbcType.SMALLINT);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(Integer.class, JdbcType.INTEGER);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(Long.class, JdbcType.BIGINT);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(Float.class, JdbcType.REAL);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(Double.class, JdbcType.DOUBLE);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(java.util.Date.class, JdbcType.DATE);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(java.sql.Date.class, JdbcType.DATE);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(Time.class, JdbcType.TIME);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(Timestamp.class, JdbcType.TIMESTAMP);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(ZonedDateTime.class, JdbcType.TIMESTAMP);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(java.math.BigInteger.class, JdbcType.BIGINT);
SPARQL_JAVA_TO_JDBC_TYPE_MAP.put(java.math.BigDecimal.class, JdbcType.DECIMAL);
}
/**
* Function to get JDBC type equivalent of Sparql input type.
*
* @param sparqlClass Sparql Datatype.
* @return JDBC equivalent for Sparql class type.
*/
public static JdbcType getJDBCType(final Object sparqlClass) {
try {
return SPARQL_LITERAL_TO_JDBC_TYPE_MAP.getOrDefault(sparqlClass, JdbcType.VARCHAR);
} catch (final ClassCastException e) {
LOGGER.warn("Value is not of typed literal XSDDatatype, returning as VARCHAR type");
return JdbcType.VARCHAR;
}
}
/**
* Function to get Java type equivalent of Sparql input type.
*
* @param sparqlClass Sparql Datatype.
* @return Java equivalent for Sparql class type.
*/
public static Class<?> getJavaType(final Object sparqlClass) {
try {
return SPARQL_LITERAL_TO_JAVA_TYPE_MAP.getOrDefault(sparqlClass, String.class);
} catch (final ClassCastException e) {
LOGGER.warn("Value is not of typed literal XSDDatatype, returning as String type");
return String.class;
}
}
/**
* Function to check if Sparql has a direct converter for the given class type.
*
* @param sparqlDatatype Input class type.
* @return True if a direct converter exists, false otherwise.
*/
public static boolean checkContains(final XSDDatatype sparqlDatatype) {
return SPARQL_LITERAL_TO_JAVA_TYPE_MAP.containsKey(sparqlDatatype);
}
/**
* Function to check if we have a converter for the given sparql class type.
*
* @param sparqlDatatype Input class type.
* @return True if a direct converter exists, false otherwise.
*/
public static boolean checkConverter(final XSDDatatype sparqlDatatype) {
return SPARQL_LITERAL_TO_JAVA_TRANSFORM_MAP.containsKey(sparqlDatatype);
}
private static XSDDateTime getDateTimeValueBasedOnType(final Object value) throws SQLException {
if (value instanceof Literal) {
final Literal valueLiteral = (Literal) value;
return (XSDDateTime) valueLiteral.getValue();
} else if (value instanceof LiteralLabel) {
final LiteralLabel valueLiteralLabel = (LiteralLabel) value;
return (XSDDateTime) valueLiteralLabel.getValue();
} else {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.UNSUPPORTED_TYPE);
}
}
private static String getStringValueBasedOnType(final Object value) throws SQLException {
if (value instanceof Literal) {
final Literal valueLiteral = (Literal) value;
return valueLiteral.getLexicalForm();
} else if (value instanceof LiteralLabel) {
final LiteralLabel valueLiteralLabel = (LiteralLabel) value;
return valueLiteralLabel.getLexicalForm();
} else {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.UNSUPPORTED_TYPE);
}
}
/**
* Converter interface to convert a Value type to a Java type.
*
* @param <T> Java type to convert to.
*/
public interface Converter<T> {
/**
* Function to perform conversion.
*
* @param value Input value to convert.
* @return Converted value.
*/
T convert(Object value) throws SQLException;
}
static class DateTimeConverter implements Converter<Timestamp> {
@Override
public Timestamp convert(final Object value) throws SQLException {
return convert(getDateTimeValueBasedOnType(value));
}
public Timestamp convert(final XSDDateTime value) {
return new Timestamp(value.asCalendar().getTimeInMillis());
}
}
// Converts from XSD DateTimeStamp to Java ZonedDateTime in UTC
static class DateTimeStampConverter implements Converter<ZonedDateTime> {
@Override
public ZonedDateTime convert(final Object value) throws SQLException {
return convert(getDateTimeValueBasedOnType(value));
}
public ZonedDateTime convert(final XSDDateTime value) {
final GregorianCalendar calendarTime = (GregorianCalendar) value.asCalendar();
return calendarTime.toZonedDateTime().withZoneSameInstant(ZoneId.of("UTC"));
}
}
}
| 7,454 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/SparqlQueryExecutor.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.neptune.auth.NeptuneApacheHttpSigV4Signer;
import com.amazonaws.neptune.auth.NeptuneSigV4SignerException;
import lombok.SneakyThrows;
import org.apache.http.HttpRequest;
import org.apache.http.HttpRequestInterceptor;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.protocol.HttpContext;
import org.apache.jena.atlas.iterator.PeekIterator;
import org.apache.jena.graph.Node;
import org.apache.jena.graph.Triple;
import org.apache.jena.query.Query;
import org.apache.jena.query.QueryExecution;
import org.apache.jena.query.QueryFactory;
import org.apache.jena.query.QuerySolution;
import org.apache.jena.query.QueryType;
import org.apache.jena.rdf.model.RDFNode;
import org.apache.jena.rdfconnection.RDFConnection;
import org.apache.jena.rdfconnection.RDFConnectionRemote;
import org.apache.jena.rdfconnection.RDFConnectionRemoteBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetTables;
import software.aws.neptune.jdbc.utilities.AuthScheme;
import software.aws.neptune.jdbc.utilities.QueryExecutor;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import software.aws.neptune.sparql.resultset.SparqlAskResultSet;
import software.aws.neptune.sparql.resultset.SparqlResultSetGetCatelogs;
import software.aws.neptune.sparql.resultset.SparqlResultSetGetColumns;
import software.aws.neptune.sparql.resultset.SparqlResultSetGetSchemas;
import software.aws.neptune.sparql.resultset.SparqlResultSetGetTableTypes;
import software.aws.neptune.sparql.resultset.SparqlResultSetGetTables;
import software.aws.neptune.sparql.resultset.SparqlResultSetGetTypeInfo;
import software.aws.neptune.sparql.resultset.SparqlSelectResultSet;
import software.aws.neptune.sparql.resultset.SparqlTriplesResultSet;
import java.lang.reflect.Constructor;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
public class SparqlQueryExecutor extends QueryExecutor {
private static final Logger LOGGER = LoggerFactory.getLogger(SparqlQueryExecutor.class);
private static final Object RDF_CONNECTION_LOCK = new Object();
private static RDFConnection rdfConnection = null;
private static SparqlConnectionProperties previousSparqlConnectionProperties = null;
private static QueryExecution queryExecution = null;
private final Object queryExecutionLock = new Object();
private final SparqlConnectionProperties sparqlConnectionProperties;
SparqlQueryExecutor(final SparqlConnectionProperties sparqlConnectionProperties) throws SQLException {
this.sparqlConnectionProperties = sparqlConnectionProperties;
}
/***
* Creates a Jena RDF remote connection builder from Sparql connection properties
* @param properties Sparql connection properties
* @return a Jena RDF remote connection builder
* @throws SQLException if remote connection builder fails, or it was cancelled.
*/
public static RDFConnectionRemoteBuilder createRDFBuilder(final SparqlConnectionProperties properties)
throws SQLException {
final RDFConnectionRemoteBuilder builder = RDFConnectionRemote.create();
if (properties.containsKey(SparqlConnectionProperties.DESTINATION_KEY)) {
builder.destination(properties.getDestination());
}
if (properties.containsKey(SparqlConnectionProperties.QUERY_ENDPOINT_KEY)) {
builder.queryEndpoint(properties.getQueryEndpoint());
}
if (properties.containsKey(SparqlConnectionProperties.ACCEPT_HEADER_ASK_QUERY_KEY)) {
builder.acceptHeaderAskQuery(properties.getAcceptHeaderAskQuery());
}
if (properties.containsKey(SparqlConnectionProperties.ACCEPT_HEADER_DATASET_KEY)) {
builder.acceptHeaderDataset(properties.getAcceptHeaderDataset());
}
if (properties.containsKey(SparqlConnectionProperties.ACCEPT_HEADER_QUERY_KEY)) {
builder.acceptHeaderQuery(properties.getAcceptHeaderQuery());
}
if (properties.containsKey(SparqlConnectionProperties.ACCEPT_HEADER_SELECT_QUERY_KEY)) {
builder.acceptHeaderSelectQuery(properties.getAcceptHeaderSelectQuery());
}
if (properties.containsKey(SparqlConnectionProperties.PARSE_CHECK_SPARQL_KEY)) {
builder.parseCheckSPARQL(properties.getParseCheckSparql());
}
if (properties.getAuthScheme() == AuthScheme.IAMSigV4) {
builder.httpClient(createV4SigningClient(properties));
} else if (properties.containsKey(SparqlConnectionProperties.HTTP_CLIENT_KEY)) {
builder.httpClient(properties.getHttpClient());
}
if (properties.containsKey(SparqlConnectionProperties.HTTP_CONTEXT_KEY)) {
builder.httpContext(properties.getHttpContext());
}
return builder;
}
// https://github.com/aws/amazon-neptune-sparql-java-sigv4/blob/master/src/main/java/com/amazonaws/neptune/client/jena/NeptuneJenaSigV4Example.java
private static HttpClient createV4SigningClient(final SparqlConnectionProperties properties) throws SQLException {
final AWSCredentialsProvider awsCredentialsProvider = new DefaultAWSCredentialsProviderChain();
final NeptuneApacheHttpSigV4Signer v4Signer;
final HttpClient v4SigningClient;
try {
v4Signer = new NeptuneApacheHttpSigV4Signer(properties.getServiceRegion(), awsCredentialsProvider);
v4SigningClient =
HttpClientBuilder.create().addInterceptorLast(new HttpRequestInterceptor() {
@SneakyThrows
@Override
public void process(final HttpRequest req, final HttpContext ctx) {
if (req instanceof HttpUriRequest) {
final HttpUriRequest httpUriReq = (HttpUriRequest) req;
try {
v4Signer.signRequest(httpUriReq);
} catch (final NeptuneSigV4SignerException e) {
throw SqlError.createSQLException(LOGGER,
SqlState.INVALID_AUTHORIZATION_SPECIFICATION,
SqlError.CONN_FAILED, e);
}
} else {
throw SqlError.createSQLException(LOGGER,
SqlState.INVALID_AUTHORIZATION_SPECIFICATION,
SqlError.UNSUPPORTED_REQUEST, "Not an HttpUriRequest");
}
}
}).build();
} catch (final NeptuneSigV4SignerException e) {
throw SqlError.createSQLException(
LOGGER,
SqlState.INVALID_AUTHORIZATION_SPECIFICATION,
SqlError.CONN_FAILED, e);
}
return v4SigningClient;
}
/**
* Function to close down the RDF connection.
*/
public static void close() {
synchronized (RDF_CONNECTION_LOCK) {
if (rdfConnection != null) {
rdfConnection.close();
rdfConnection = null;
}
}
}
private RDFConnection getRdfConnection(final SparqlConnectionProperties sparqlConnectionProperties)
throws SQLException {
if (rdfConnection == null || !propertiesEqual(previousSparqlConnectionProperties, sparqlConnectionProperties)) {
previousSparqlConnectionProperties = sparqlConnectionProperties;
return createRDFBuilder(sparqlConnectionProperties).build();
}
return rdfConnection;
}
/**
* Function to return max fetch size.
*
* @return Max fetch size (Integer max value).
*/
@Override
public int getMaxFetchSize() {
return Integer.MAX_VALUE;
}
/**
* Verify that connection to database is functional.
*
* @param timeout Time in seconds to wait for the database operation used to validate the connection to complete.
* @return true if the connection is valid, otherwise false.
*/
@Override
public boolean isValid(final int timeout) {
try {
final RDFConnection tempConn =
SparqlQueryExecutor.createRDFBuilder(sparqlConnectionProperties).build();
final QueryExecution executeQuery = tempConn.query("SELECT * { ?s ?p ?o } LIMIT 0");
// The 2nd parameter controls the timeout for the whole query execution.
executeQuery.setTimeout(timeout, TimeUnit.SECONDS, timeout, TimeUnit.SECONDS);
executeQuery.execSelect();
return true;
} catch (final Exception e) {
LOGGER.error("Connection to database returned an error:", e);
return false;
}
}
/**
* Function to execute query.
*
* @param sparql Query to execute.
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet object returned from query execution.
* @throws SQLException if query execution fails, or it was cancelled.
*/
@Override
public ResultSet executeQuery(final String sparql, final Statement statement) throws SQLException {
final Constructor<?> constructor = createConstructorBasedOnQueryType(sparql);
return runCancellableQuery(constructor, statement, sparql);
}
/**
* Private function to get constructor based on the given query type
*/
private Constructor<?> createConstructorBasedOnQueryType(final String sparql) throws SQLException {
final Constructor<?> constructor;
try {
final Query query = QueryFactory.create(sparql);
switch (query.queryType()) {
case SELECT:
constructor = SparqlSelectResultSet.class
.getConstructor(java.sql.Statement.class,
SparqlSelectResultSet.ResultSetInfoWithRows.class);
break;
case ASK:
constructor = SparqlAskResultSet.class
.getConstructor(java.sql.Statement.class, SparqlAskResultSet.ResultSetInfoWithRows.class);
break;
case CONSTRUCT:
case DESCRIBE:
constructor = SparqlTriplesResultSet.class
.getConstructor(java.sql.Statement.class,
SparqlTriplesResultSet.ResultSetInfoWithRows.class);
break;
default:
throw SqlError
.createSQLException(LOGGER, SqlState.INVALID_QUERY_EXPRESSION, SqlError.INVALID_QUERY);
}
} catch (final Exception e) {
throw SqlError.createSQLException(
LOGGER,
SqlState.INVALID_QUERY_EXPRESSION,
SqlError.QUERY_FAILED, e);
}
return constructor;
}
/**
* Function to get tables.
*
* @param statement java.sql.Statement Object required for result set.
* @param tableName String table name with colon delimits.
* @return java.sql.ResultSet object returned from query execution.
* @throws SQLException if query execution fails, or it was cancelled.
*/
@Override
public java.sql.ResultSet executeGetTables(final Statement statement, final String tableName) throws SQLException {
return new SparqlResultSetGetTables(statement, new GremlinSchema(new ArrayList<>(), new ArrayList<>()),
new ResultSetInfoWithoutRows(0, ResultSetGetTables.getColumns()));
}
/**
* Function to get schema.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet Object containing schemas.
* @throws SQLException if query execution fails, or it was cancelled.
*/
@Override
public java.sql.ResultSet executeGetSchemas(final Statement statement) throws SQLException {
return new SparqlResultSetGetSchemas(statement);
}
/**
* Function to get catalogs.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet Object containing catalogs.
*/
@Override
public java.sql.ResultSet executeGetCatalogs(final Statement statement) throws SQLException {
return new SparqlResultSetGetCatelogs(statement);
}
/**
* Function to get table types.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet Object containing table types.
*/
@Override
public java.sql.ResultSet executeGetTableTypes(final Statement statement) throws SQLException {
return new SparqlResultSetGetTableTypes(statement);
}
/**
* Function to get table types.
*
* @param statement java.sql.Statement Object required for result set.
* @param nodes String containing nodes to get schema for.
* @return java.sql.ResultSet Object containing columns.
*/
@Override
public java.sql.ResultSet executeGetColumns(final Statement statement, final String nodes) throws SQLException {
return new SparqlResultSetGetColumns(statement, new GremlinSchema(new ArrayList<>(), new ArrayList<>()),
new ResultSetInfoWithoutRows(0, ResultSetGetTables.getColumns()));
}
/**
* Function to get type info.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet Object containing type info.
*/
@Override
public java.sql.ResultSet executeGetTypeInfo(final java.sql.Statement statement)
throws SQLException {
return new SparqlResultSetGetTypeInfo(statement);
}
@Override
@SuppressWarnings("unchecked")
protected <T> T runQuery(final String query) throws SQLException {
synchronized (queryExecutionLock) {
synchronized (RDF_CONNECTION_LOCK) {
rdfConnection = getRdfConnection(sparqlConnectionProperties);
}
queryExecution = rdfConnection.query(query);
}
final QueryType queryType = queryExecution.getQuery().queryType();
final Object sparqlResultSet = getResultSetBasedOnQueryType(queryType);
synchronized (queryExecutionLock) {
queryExecution.close();
queryExecution = null;
}
return (T) sparqlResultSet;
}
/**
* Private function to get result set based on the given query type
*/
private Object getResultSetBasedOnQueryType(final QueryType queryType) throws SQLException {
final Object sparqlResultSet;
switch (queryType) {
case SELECT:
final org.apache.jena.query.ResultSet selectResult = queryExecution.execSelect();
sparqlResultSet = getSelectResultSet(selectResult);
break;
case ASK:
sparqlResultSet = new SparqlAskResultSet.ResultSetInfoWithRows(queryExecution.execAsk());
break;
case CONSTRUCT:
final PeekIterator<Triple> constructResult = PeekIterator.create(queryExecution.execConstructTriples());
sparqlResultSet = getTriplesResultSet(constructResult);
break;
case DESCRIBE:
final PeekIterator<Triple> describeResult = PeekIterator.create(queryExecution.execDescribeTriples());
sparqlResultSet = getTriplesResultSet(describeResult);
break;
default:
throw SqlError
.createSQLException(LOGGER, SqlState.INVALID_QUERY_EXPRESSION, SqlError.INVALID_QUERY);
}
return sparqlResultSet;
}
/**
* Private function to get select result set
*/
private Object getSelectResultSet(final org.apache.jena.query.ResultSet selectResult) {
final List<QuerySolution> selectRows = new ArrayList<>();
final List<String> columns = selectResult.getResultVars();
final List<String> tempColumns = new ArrayList<>(columns);
final Map<String, Object> tempColumnType = new LinkedHashMap<>();
// TODO: Revisit type promotion in performance testing ticket
while (selectResult.hasNext()) {
final QuerySolution row = selectResult.next();
selectRows.add(row);
final Iterator<String> tempColumnIterator = tempColumns.iterator();
while (tempColumnIterator.hasNext()) {
final String column = tempColumnIterator.next();
final RDFNode rdfNode = row.get(column);
if (rdfNode == null) {
continue;
}
final Node node = rdfNode.asNode();
getColumnType(tempColumnType, tempColumnIterator, node, column);
}
}
// Create new map to return column type.
final Map<String, Object> selectColumnType = new LinkedHashMap<>();
columns.forEach(c -> selectColumnType.put(c, tempColumnType.getOrDefault(c, String.class)));
return new SparqlSelectResultSet.ResultSetInfoWithRows(selectRows, columns,
new ArrayList<>(selectColumnType.values()));
}
/**
* Private function to get Triples result set
*/
private Object getTriplesResultSet(final PeekIterator<Triple> triplesResult) throws SQLException {
final List<Triple> describeRows = new ArrayList<>();
final List<String> tempColumns = new ArrayList<>(SparqlTriplesResultSet.TRIPLES_COLUMN_LIST);
final Map<String, Object> tempColumnType = new LinkedHashMap<>();
while (triplesResult.hasNext()) {
final Triple row = triplesResult.next();
describeRows.add(row);
final Iterator<String> tempColumnIterator = tempColumns.iterator();
while (tempColumnIterator.hasNext()) {
final String column = tempColumnIterator.next();
final Node node = getNodeFromColumnName(row, column);
if (node == null) {
continue;
}
getColumnType(tempColumnType, tempColumnIterator, node, column);
}
}
final Map<String, Object> triplesColumnType = new LinkedHashMap<>();
SparqlTriplesResultSet.TRIPLES_COLUMN_LIST
.forEach(c -> triplesColumnType.put(c, tempColumnType.getOrDefault(c, String.class)));
return new SparqlTriplesResultSet.ResultSetInfoWithRows(describeRows,
new ArrayList<>(triplesColumnType.values()));
}
/**
* Private function to get node type from result set
*/
private void getColumnType(final Map<String, Object> tempColumnType, final Iterator<String> tempColumnIterator,
final Node node, final String column) {
final Object nodeType = node.isLiteral() ? node.getLiteral().getDatatype() : node.getClass();
if (!tempColumnType.containsKey(column)) {
tempColumnType.put(column, nodeType);
// For Node, the resource type is org.apache.jena.graph.Node_URI instead of org.apache.jena.rdf.model.impl.ResourceImpl
// Another possibility is to remove if is org.apache.jena.graph.Node_URI type.
} else if (nodeType == null || !nodeType.equals(tempColumnType.get(column))) {
tempColumnType.put(column, String.class);
// Remove column from list if it is string type.
tempColumnIterator.remove();
}
}
/**
* Private function to get Node from Triples result set column label
*/
private Node getNodeFromColumnName(final Triple row, final String column) throws SQLException {
final Node node;
switch (column) {
case SparqlTriplesResultSet.TRIPLES_COLUMN_LABEL_PREDICATE:
node = row.getPredicate();
break;
case SparqlTriplesResultSet.TRIPLES_COLUMN_LABEL_SUBJECT:
node = row.getSubject();
break;
case SparqlTriplesResultSet.TRIPLES_COLUMN_LABEL_OBJECT:
node = row.getObject();
break;
default:
throw SqlError
.createSQLException(LOGGER, SqlState.DATA_EXCEPTION, SqlError.INVALID_COLUMN_LABEL,
column);
}
return node;
}
@Override
protected void performCancel() throws SQLException {
synchronized (queryExecutionLock) {
if (queryExecution != null) {
queryExecution.abort();
// TODO: Check in later tickets if adding close() affects anything or if we need any additional guards,
// as its implementation does have null checks
queryExecution.close();
queryExecution = null;
}
}
}
}
| 7,455 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/resultset/SparqlSelectResultSet.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql.resultset;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.jena.datatypes.RDFDatatype;
import org.apache.jena.datatypes.xsd.XSDDatatype;
import org.apache.jena.query.QuerySolution;
import org.apache.jena.rdf.model.Literal;
import org.apache.jena.rdf.model.RDFNode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.sparql.SparqlTypeMapping;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class SparqlSelectResultSet extends SparqlResultSet {
private static final Logger LOGGER = LoggerFactory.getLogger(SparqlSelectResultSet.class);
private final List<QuerySolution> rows;
private final List<String> columns;
private final List<Object> columnTypes;
/**
* SparqlResultSet constructor, initializes super class.
*
* @param statement Statement Object.
* @param resultSetInfo ResultSetInfoWithRows Object.
*/
public SparqlSelectResultSet(final Statement statement, final ResultSetInfoWithRows resultSetInfo) {
super(statement, resultSetInfo.getColumns(), resultSetInfo.getRows().size());
this.rows = resultSetInfo.getRows();
this.columns = resultSetInfo.getColumns();
this.columnTypes = resultSetInfo.getColumnTypes();
}
/**
* SparqlResultSet constructor, initializes super class.
*
* @param statement Statement Object.
* @param resultSetInfoWithoutRows ResultSetInfoWithoutRows Object.
*/
public SparqlSelectResultSet(final Statement statement, final ResultSetInfoWithoutRows resultSetInfoWithoutRows) {
super(statement, resultSetInfoWithoutRows.getColumns(), resultSetInfoWithoutRows.getRowCount());
this.rows = null;
this.columns = resultSetInfoWithoutRows.getColumns();
this.columnTypes = columns.stream().map(c -> String.class).collect(Collectors.toList());
}
@Override
protected Object getConvertedValue(final int columnIndex) throws SQLException {
final RDFNode node = getValue(columnIndex);
if (node == null) {
return null;
}
if (node.isLiteral()) {
final Literal literal = node.asLiteral();
final RDFDatatype resultDatatype = literal.getDatatype();
try {
// check for types that needs conversion first
if (SparqlTypeMapping.checkConverter((XSDDatatype) resultDatatype)) {
final SparqlTypeMapping.Converter<?> converter = getConverter((XSDDatatype) resultDatatype);
return converter.convert(literal);
}
if (SparqlTypeMapping.checkContains((XSDDatatype) resultDatatype)) {
return literal.getValue();
}
} catch (final ClassCastException e) {
LOGGER.warn("Value is not of typed literal XSDDatatype, returning as String");
return literal.getLexicalForm();
}
return literal.getLexicalForm();
}
return node.toString();
}
private RDFNode getValue(final int columnIndex) throws SQLException {
verifyOpen();
validateRowColumn(columnIndex);
final String colName = columns.get(columnIndex - 1);
final QuerySolution row = rows.get(getRowIndex());
final RDFNode value = row.get(colName);
// literal: primitives
// resource: relationships
setWasNull(value == null);
return value;
}
private SparqlTypeMapping.Converter<?> getConverter(final XSDDatatype datatype) {
return SparqlTypeMapping.SPARQL_LITERAL_TO_JAVA_TRANSFORM_MAP.get(datatype);
}
@Override
protected ResultSetMetaData getResultMetadata() throws SQLException {
if (rows.isEmpty()) {
// TODO: AN-562 see other ways to address empty result lists
final List<Object> emptyColumnTypes = new ArrayList<>();
for (final String column : columns) {
emptyColumnTypes.add(String.class);
}
return new SparqlResultSetMetadata(columns, emptyColumnTypes);
} else {
return new SparqlResultSetMetadata(columns, this.columnTypes);
}
}
@AllArgsConstructor
@Getter
public static class ResultSetInfoWithRows {
private final List<QuerySolution> rows;
private final List<String> columns;
private final List<Object> columnTypes;
}
}
| 7,456 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/resultset/SparqlResultSetGetTypeInfo.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql.resultset;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetTypeInfo;
import java.sql.Statement;
import java.sql.Types;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class SparqlResultSetGetTypeInfo extends ResultSetGetTypeInfo {
private static final List<Map<String, Object>> TYPE_INFO = new ArrayList<>();
static {
// The order added to TYPE_INFO matters
putInfo(TYPE_INFO, "XSDboolean", Types.BIT, false, false);
putInfo(TYPE_INFO, "XSDbyte", Types.TINYINT, false, true);
putInfo(TYPE_INFO, "XSDinteger", Types.BIGINT, false, true);
putInfo(TYPE_INFO, "XSDnonPositiveInteger", Types.BIGINT, false, true);
putInfo(TYPE_INFO, "XSDnonNegativeInteger", Types.BIGINT, false, true);
putInfo(TYPE_INFO, "XSDpositiveInteger", Types.BIGINT, false, true);
putInfo(TYPE_INFO, "XSDnegativeInteger", Types.BIGINT, false, true);
putInfo(TYPE_INFO, "XSDunsignedInt", Types.BIGINT, false, true, true);
putInfo(TYPE_INFO, "XSDlong", Types.BIGINT, false, true);
putInfo(TYPE_INFO, "XSDunsignedLong", Types.BIGINT, false, true, true);
putInfo(TYPE_INFO, "XSDdecimal", Types.DECIMAL, false, true);
putInfo(TYPE_INFO, "XSDint", Types.INTEGER, false, true);
putInfo(TYPE_INFO, "XSDunsignedByte", Types.INTEGER, false, true, true);
putInfo(TYPE_INFO, "XSDunsignedShort", Types.INTEGER, false, true, true);
putInfo(TYPE_INFO, "XSDshort", Types.SMALLINT, false, true);
putInfo(TYPE_INFO, "XSDfloat", Types.REAL, false, true);
putInfo(TYPE_INFO, "XSDdouble", Types.DOUBLE, false, true);
putInfo(TYPE_INFO, "XSDstring", Types.VARCHAR, true, false);
putInfo(TYPE_INFO, "XSDduration", Types.VARCHAR, true, false);
putInfo(TYPE_INFO, "XSDdate", Types.DATE, false, false);
putInfo(TYPE_INFO, "XSDtime", Types.TIME, false, false);
putInfo(TYPE_INFO, "XSDdateTimeStamp", Types.TIMESTAMP, false, false);
putInfo(TYPE_INFO, "XSDdateTime", Types.TIMESTAMP, false, false);
populateConstants(TYPE_INFO);
}
/**
* SparqlResultSetGetTypeInfo constructor, initializes super class.
*
* @param statement Statement Object.
*/
public SparqlResultSetGetTypeInfo(final Statement statement) {
super(statement, new ArrayList<>(TYPE_INFO));
}
}
| 7,457 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/resultset/SparqlTriplesResultSet.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql.resultset;
import com.google.common.collect.ImmutableList;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.apache.jena.datatypes.RDFDatatype;
import org.apache.jena.datatypes.xsd.XSDDatatype;
import org.apache.jena.graph.Node;
import org.apache.jena.graph.Triple;
import org.apache.jena.graph.impl.LiteralLabel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import software.aws.neptune.sparql.SparqlTypeMapping;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class SparqlTriplesResultSet extends SparqlResultSet {
public static final String TRIPLES_COLUMN_LABEL_SUBJECT = "Subject";
public static final String TRIPLES_COLUMN_LABEL_PREDICATE = "Predicate";
public static final String TRIPLES_COLUMN_LABEL_OBJECT = "Object";
public static final List<String> TRIPLES_COLUMN_LIST = ImmutableList
.of(TRIPLES_COLUMN_LABEL_SUBJECT, TRIPLES_COLUMN_LABEL_PREDICATE, TRIPLES_COLUMN_LABEL_OBJECT);
public static final int TRIPLES_COLUMN_INDEX_SUBJECT = 1;
public static final int TRIPLES_COLUMN_INDEX_PREDICATE = 2;
public static final int TRIPLES_COLUMN_INDEX_OBJECT = 3;
public static final int TRIPLES_COLUMN_COUNT = 3;
private static final Logger LOGGER = LoggerFactory.getLogger(SparqlTriplesResultSet.class);
private final List<Triple> rows;
private final List<String> columns;
private final List<Object> columnTypes;
/**
* SparqlResultSet constructor, initializes super class.
*
* @param statement Statement Object.
* @param resultSetInfo ResultSetInfoWithRows Object.
*/
public SparqlTriplesResultSet(final Statement statement, final ResultSetInfoWithRows resultSetInfo) {
super(statement, resultSetInfo.getColumns(), resultSetInfo.getRows().size());
this.rows = resultSetInfo.getRows();
this.columns = resultSetInfo.getColumns();
this.columnTypes = resultSetInfo.getColumnTypes();
}
/**
* SparqlResultSet constructor, initializes super class.
*
* @param statement Statement Object.
* @param resultSetInfoWithoutRows ResultSetInfoWithRows Object.
*/
public SparqlTriplesResultSet(final Statement statement, final ResultSetInfoWithoutRows resultSetInfoWithoutRows) {
super(statement, resultSetInfoWithoutRows.getColumns(), resultSetInfoWithoutRows.getRowCount());
this.rows = null;
this.columns = resultSetInfoWithoutRows.getColumns();
this.columnTypes = columns.stream().map(c -> String.class).collect(Collectors.toList());
}
@Override
protected Object getConvertedValue(final int columnIndex) throws SQLException {
final Node node = getValue(columnIndex);
if (node == null) {
return null;
}
if (node.isLiteral()) {
final LiteralLabel literal = node.getLiteral();
final RDFDatatype resultDatatype = literal.getDatatype();
try {
// check for types that needs conversion first
if (SparqlTypeMapping.checkConverter((XSDDatatype) resultDatatype)) {
final SparqlTypeMapping.Converter<?> converter = getConverter((XSDDatatype) resultDatatype);
return converter.convert(literal);
}
if (SparqlTypeMapping.checkContains((XSDDatatype) resultDatatype)) {
return literal.getValue();
}
} catch (final ClassCastException e) {
LOGGER.warn("Value is not of typed literal XSDDatatype, returning as String");
return literal.getLexicalForm();
}
return literal.getLexicalForm();
}
return node.toString();
}
private Node getValue(final int columnIndex) throws SQLException {
verifyOpen();
validateRowColumn(columnIndex);
final Triple row = rows.get(getRowIndex());
final Node value = getNodeFromColumnIndex(row, columnIndex);
setWasNull(value == null);
return value;
}
private SparqlTypeMapping.Converter<?> getConverter(final XSDDatatype datatype) {
return SparqlTypeMapping.SPARQL_LITERAL_TO_JAVA_TRANSFORM_MAP.get(datatype);
}
@Override
protected ResultSetMetaData getResultMetadata() throws SQLException {
if (rows.isEmpty()) {
// TODO: AN-562 see other ways to address empty result lists
final List<Object> emptyColumnTypes = new ArrayList<>();
for (int i = 1; i <= 3; i++) {
emptyColumnTypes.add(String.class);
}
return new SparqlResultSetMetadata(columns, emptyColumnTypes);
} else {
return new SparqlResultSetMetadata(columns, this.columnTypes);
}
}
// get the Node of a row of Triple based on given column index
private Node getNodeFromColumnIndex(final Triple row, final int columnIndex) throws SQLException {
Node node = null;
switch (columnIndex) {
case 1:
node = row.getSubject();
break;
case 2:
node = row.getPredicate();
break;
case 3:
node = row.getObject();
break;
default:
throw SqlError
.createSQLException(LOGGER, SqlState.DATA_EXCEPTION, SqlError.INVALID_COLUMN_INDEX,
columnIndex);
}
return node;
}
@AllArgsConstructor
@Getter
public static class ResultSetInfoWithRows {
private final List<Triple> rows;
private final List<String> columns = TRIPLES_COLUMN_LIST;
private final List<Object> columnTypes;
}
}
| 7,458 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/resultset/SparqlAskResultSet.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql.resultset;
import com.google.common.collect.ImmutableList;
import lombok.AllArgsConstructor;
import lombok.Getter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
public class SparqlAskResultSet extends SparqlResultSet {
private static final Logger LOGGER = LoggerFactory.getLogger(SparqlAskResultSet.class);
private static final String ASK_COLUMN_NAME = "Ask";
private final Boolean row;
private final List<String> column;
/**
* SparqlResultSet constructor, initializes super class.
*
* @param statement Statement Object.
* @param resultSetInfo ResultSetInfoWithRows Object.
*/
public SparqlAskResultSet(final Statement statement, final ResultSetInfoWithRows resultSetInfo) {
super(statement, resultSetInfo.getColumn(), 1);
this.row = resultSetInfo.row;
this.column = resultSetInfo.column;
}
/**
* SparqlResultSet constructor, initializes super class.
*
* @param statement Statement Object.
* @param resultSetInfoWithoutRows ResultSetInfoWithRows Object.
*/
public SparqlAskResultSet(final Statement statement, final ResultSetInfoWithoutRows resultSetInfoWithoutRows) {
super(statement, resultSetInfoWithoutRows.getColumns(), 1);
this.row = null;
this.column = resultSetInfoWithoutRows.getColumns();
}
@Override
protected Object getConvertedValue(final int columnIndex) throws SQLException {
verifyOpen();
if (columnIndex != 1) {
throw SqlError
.createSQLException(LOGGER, SqlState.DATA_EXCEPTION, SqlError.INVALID_COLUMN_INDEX, columnIndex, 1);
}
return row;
}
@Override
protected ResultSetMetaData getResultMetadata() throws SQLException {
return new SparqlAskResultSetMetadata(column, row.getClass());
}
@AllArgsConstructor
@Getter
public static class ResultSetInfoWithRows {
private final Boolean row;
private final List<String> column = ImmutableList
.of(ASK_COLUMN_NAME);
}
}
| 7,459 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/resultset/SparqlResultSetGetCatelogs.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql.resultset;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetCatalogs;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
public class SparqlResultSetGetCatelogs extends ResultSetGetCatalogs {
/**
* ResultSetGetCatalogs constructor, initializes super class.
*
* @param statement Statement Object.
*/
public SparqlResultSetGetCatelogs(final Statement statement) {
super(statement);
}
@Override
protected ResultSetMetaData getResultMetadata() throws SQLException {
final List<Object> rowTypes = new ArrayList<>();
for (int i = 0; i < getColumns().size(); i++) {
rowTypes.add(String.class);
}
return new SparqlResultSetMetadata(getColumns(), rowTypes);
}
}
| 7,460 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/resultset/SparqlResultSetGetSchemas.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql.resultset;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetSchemas;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
public class SparqlResultSetGetSchemas extends ResultSetGetSchemas {
/**
* ResultSetGetSchemas constructor, initializes super class.
*
* @param statement Statement Object.
*/
public SparqlResultSetGetSchemas(final Statement statement) {
super(statement);
}
@Override
protected ResultSetMetaData getResultMetadata() throws SQLException {
final List<Object> rowTypes = new ArrayList<>();
for (int i = 0; i < getColumns().size(); i++) {
rowTypes.add(String.class);
}
return new SparqlResultSetMetadata(getColumns(), rowTypes);
}
}
| 7,461 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/resultset/SparqlResultSetGetTables.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql.resultset;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetTables;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class SparqlResultSetGetTables extends ResultSetGetTables implements java.sql.ResultSet {
private static final Map<String, Class<?>> TABLE_TYPE_MAP = new HashMap<>();
// REF https://docs.oracle.com/javase/7/docs/api/java/sql/DatabaseMetaData.html#getTables
static {
TABLE_TYPE_MAP.put("TABLE_CAT", String.class);
TABLE_TYPE_MAP.put("TABLE_SCHEM", String.class);
TABLE_TYPE_MAP.put("TABLE_NAME", String.class);
TABLE_TYPE_MAP.put("TABLE_TYPE", String.class);
TABLE_TYPE_MAP.put("REMARKS", String.class);
TABLE_TYPE_MAP.put("TYPE_CAT", String.class);
TABLE_TYPE_MAP.put("TYPE_SCHEM", String.class);
TABLE_TYPE_MAP.put("TYPE_NAME", String.class);
TABLE_TYPE_MAP.put("SELF_REFERENCING_COL_NAME", String.class);
TABLE_TYPE_MAP.put("REF_GENERATION", String.class);
}
/**
* ResultSetGetTables constructor, initializes super class.
*
* @param statement Statement Object.
* @param gremlinSchema GremlinSchema Object.
* @param resultSetInfoWithoutRows ResultSetInfoWithoutRows Object.
*/
public SparqlResultSetGetTables(final Statement statement,
final GremlinSchema gremlinSchema,
final ResultSetInfoWithoutRows resultSetInfoWithoutRows) {
super(statement, gremlinSchema, resultSetInfoWithoutRows);
}
@Override
protected ResultSetMetaData getResultMetadata() throws SQLException {
final List<String> orderedColumns = getColumns();
final List<Object> rowTypes = new ArrayList<>();
for (final String column : orderedColumns) {
rowTypes.add(TABLE_TYPE_MAP.get(column));
}
return new SparqlResultSetMetadata(getColumns(), rowTypes);
}
}
| 7,462 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/resultset/SparqlResultSetMetadata.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql.resultset;
import software.aws.neptune.jdbc.ResultSetMetaData;
import software.aws.neptune.sparql.SparqlTypeMapping;
import java.sql.SQLException;
import java.util.List;
public class SparqlResultSetMetadata extends ResultSetMetaData
implements java.sql.ResultSetMetaData {
private final List<Object> columnTypes;
/**
* SparqlResultSetMetadata constructor.
*
* @param columns List of column names.
* @param columnTypes List of column types.
*/
protected SparqlResultSetMetadata(final List<String> columns, final List<Object> columnTypes) {
super(columns);
this.columnTypes = columnTypes;
}
/**
* Get the class of a given column in Sparql Result.
* Wrapped in Java class if available, otherwise outputs Jena XSDDatatype
*
* @param column the 1-based column index.
* @return Bolt Type Object for column.
*/
protected Object getColumnSparqlType(final int column) throws SQLException {
verifyColumnIndex(column);
// see if there are Sparql representation of mixed result
return columnTypes.get(column - 1);
}
@Override
public int getColumnType(final int column) throws SQLException {
verifyColumnIndex(column);
return SparqlTypeMapping.getJDBCType(getColumnSparqlType(column)).getJdbcCode();
}
@Override
public String getColumnTypeName(final int column) throws SQLException {
verifyColumnIndex(column);
return getColumnSparqlType(column).toString();
}
@Override
public String getColumnClassName(final int column) throws SQLException {
verifyColumnIndex(column);
return SparqlTypeMapping.getJavaType(getColumnSparqlType(column)).getName();
}
}
| 7,463 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/resultset/SparqlResultSet.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql.resultset;
import software.aws.neptune.jdbc.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
public abstract class SparqlResultSet extends ResultSet {
private boolean wasNull = false;
protected SparqlResultSet(final Statement statement, final List<String> columns,
final int rowCount) {
super(statement, columns, rowCount);
}
@Override
protected void doClose() throws SQLException {
}
@Override
public boolean wasNull() throws SQLException {
return this.wasNull;
}
protected void setWasNull(final boolean wasNull) {
this.wasNull = wasNull;
}
protected abstract Object getConvertedValue(int columnIndex) throws SQLException;
protected abstract ResultSetMetaData getResultMetadata() throws SQLException;
}
| 7,464 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/resultset/SparqlAskResultSetMetadata.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql.resultset;
import software.aws.neptune.jdbc.ResultSetMetaData;
import software.aws.neptune.jdbc.utilities.JdbcType;
import java.sql.SQLException;
import java.util.List;
public class SparqlAskResultSetMetadata extends ResultSetMetaData
implements java.sql.ResultSetMetaData {
private final Class<?> columnType;
protected SparqlAskResultSetMetadata(final List<String> column, final Class<?> columnType) {
super(column);
this.columnType = columnType;
}
@Override
public int getColumnType(final int column) throws SQLException {
return JdbcType.BIT.getJdbcCode();
}
@Override
public String getColumnTypeName(final int column) throws SQLException {
return columnType.getName();
}
@Override
public String getColumnClassName(final int column) throws SQLException {
return columnType.getName();
}
}
| 7,465 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/resultset/SparqlResultSetGetColumns.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql.resultset;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetColumns;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class SparqlResultSetGetColumns extends ResultSetGetColumns implements java.sql.ResultSet {
private static final Map<String, Class<?>> COLUMN_TYPE_MAP = new HashMap<>();
static {
COLUMN_TYPE_MAP.put("TABLE_CAT", String.class);
COLUMN_TYPE_MAP.put("TABLE_SCHEM", String.class);
COLUMN_TYPE_MAP.put("TABLE_NAME", String.class);
COLUMN_TYPE_MAP.put("COLUMN_NAME", String.class);
COLUMN_TYPE_MAP.put("DATA_TYPE", Integer.class);
COLUMN_TYPE_MAP.put("TYPE_NAME", String.class);
COLUMN_TYPE_MAP.put("COLUMN_SIZE", Integer.class);
COLUMN_TYPE_MAP.put("BUFFER_LENGTH", Integer.class);
COLUMN_TYPE_MAP.put("DECIMAL_DIGITS", Integer.class);
COLUMN_TYPE_MAP.put("NUM_PREC_RADIX", Integer.class);
COLUMN_TYPE_MAP.put("NULLABLE", Integer.class);
COLUMN_TYPE_MAP.put("REMARKS", String.class);
COLUMN_TYPE_MAP.put("COLUMN_DEF", String.class);
COLUMN_TYPE_MAP.put("SQL_DATA_TYPE", Integer.class);
COLUMN_TYPE_MAP.put("SQL_DATETIME_SUB", Integer.class);
COLUMN_TYPE_MAP.put("CHAR_OCTET_LENGTH", Integer.class);
COLUMN_TYPE_MAP.put("ORDINAL_POSITION", Integer.class);
COLUMN_TYPE_MAP.put("IS_NULLABLE", String.class);
COLUMN_TYPE_MAP.put("SCOPE_CATALOG", String.class);
COLUMN_TYPE_MAP.put("SCOPE_SCHEMA", String.class);
COLUMN_TYPE_MAP.put("SCOPE_TABLE", String.class);
COLUMN_TYPE_MAP.put("SOURCE_DATA_TYPE", Integer.class);
COLUMN_TYPE_MAP.put("IS_AUTOINCREMENT", String.class);
COLUMN_TYPE_MAP.put("IS_GENERATEDCOLUMN", String.class);
}
/**
* ResultSetGetColumns constructor, initializes super class.
*
* @param statement Statement Object.
* @param gremlinSchema GremlinSchema Object.
* @param resultSetInfoWithoutRows ResultSetInfoWithoutRows Object.
*/
public SparqlResultSetGetColumns(final Statement statement,
final GremlinSchema gremlinSchema,
final ResultSetInfoWithoutRows resultSetInfoWithoutRows)
throws SQLException {
super(statement, gremlinSchema, resultSetInfoWithoutRows);
}
@Override
protected ResultSetMetaData getResultMetadata() throws SQLException {
final List<String> orderedColumns = getColumns();
final List<Object> rowTypes = new ArrayList<>();
for (final String column : orderedColumns) {
rowTypes.add(COLUMN_TYPE_MAP.get(column));
}
return new SparqlResultSetMetadata(getColumns(), rowTypes);
}
}
| 7,466 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/sparql/resultset/SparqlResultSetGetTableTypes.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.sparql.resultset;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetTableTypes;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
public class SparqlResultSetGetTableTypes extends ResultSetGetTableTypes {
/**
* ResultSetGetTableTypes constructor, initializes super class.
*
* @param statement Statement Object.
*/
public SparqlResultSetGetTableTypes(final Statement statement) {
super(statement);
}
@Override
protected ResultSetMetaData getResultMetadata() throws SQLException {
final List<Object> rowTypes = new ArrayList<>();
for (int i = 0; i < getColumns().size(); i++) {
rowTypes.add(String.class);
}
return new SparqlResultSetMetadata(getColumns(), rowTypes);
}
}
| 7,467 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/Statement.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc;
import lombok.Getter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.jdbc.utilities.CastHelper;
import software.aws.neptune.jdbc.utilities.QueryExecutor;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import software.aws.neptune.jdbc.utilities.Warning;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Implementation of Statement for JDBC Driver.
*/
public class Statement implements java.sql.Statement {
private static final Logger LOGGER = LoggerFactory.getLogger(Statement.class);
private final java.sql.Connection connection;
private final AtomicBoolean isClosed = new AtomicBoolean(false);
@Getter
private final QueryExecutor queryExecutor;
private int maxFieldSize = 0;
private long largeMaxRows = 0;
private boolean shouldCloseOnCompletion = false;
private SQLWarning warnings;
private int fetchSize = 0;
private ResultSet resultSet;
/**
* Constructor for seeding the statement with the parent connection.
*
* @param connection The parent connection.
* @param queryExecutor The query executor.
* @throws SQLException if error occurs when get type map of connection.
*/
public Statement(final java.sql.Connection connection, final QueryExecutor queryExecutor) throws SQLException {
this.connection = connection;
this.warnings = null;
this.queryExecutor = queryExecutor;
}
@Override
public void addBatch(final String sql) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void cancel() throws SQLException {
verifyOpen();
queryExecutor.cancelQuery(false);
}
@Override
public void clearBatch() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void clearWarnings() throws SQLException {
verifyOpen();
warnings = null;
}
@Override
public void close() throws SQLException {
if (!this.isClosed.getAndSet(true)) {
LOGGER.debug("Cancelling running queries.");
try {
queryExecutor.cancelQuery(true);
} catch (final SQLException e) {
LOGGER.warn("Error occurred while closing Statement. Failed to cancel running query: '"
+ e.getMessage() + "'");
}
if (this.resultSet != null) {
LOGGER.debug("Closing ResultSet, which was left open in Statement.");
this.resultSet.close();
}
}
}
@Override
public void closeOnCompletion() throws SQLException {
verifyOpen();
this.shouldCloseOnCompletion = true;
}
@Override
public boolean execute(final String sql) throws SQLException {
this.resultSet = executeQuery(sql);
return true;
}
// Add default execute stubs.
@Override
public boolean execute(final String sql, final int autoGeneratedKeys) throws SQLException {
// Ignore the auto-generated keys as INSERT is not supported and auto-generated keys are not supported.
return execute(sql);
}
@Override
public boolean execute(final String sql, final int[] columnIndexes) throws SQLException {
// Ignore the auto-generated keys as INSERT is not supported and auto-generated keys are not supported.
return execute(sql);
}
@Override
public boolean execute(final String sql, final String[] columnNames) throws SQLException {
// Ignore the auto-generated keys as INSERT is not supported and auto-generated keys are not supported.
return execute(sql);
}
@Override
public int[] executeBatch() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public long[] executeLargeBatch() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public long executeLargeUpdate(final String sql) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public long executeLargeUpdate(final String sql, final int autoGeneratedKeys) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public long executeLargeUpdate(final String sql, final int[] columnIndexes) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public long executeLargeUpdate(final String sql, final String[] columnNames) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public int executeUpdate(final String sql) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public int executeUpdate(final String sql, final int autoGeneratedKeys) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public int executeUpdate(final String sql, final int[] columnIndexes) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public int executeUpdate(final String sql, final String[] columnNames) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public java.sql.Connection getConnection() throws SQLException {
verifyOpen();
return connection;
}
@Override
public int getFetchDirection() throws SQLException {
verifyOpen();
return ResultSet.FETCH_FORWARD;
}
@Override
public void setFetchDirection(final int direction) throws SQLException {
verifyOpen();
if (direction != ResultSet.FETCH_FORWARD) {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
}
@Override
public int getFetchSize() throws SQLException {
verifyOpen();
return fetchSize;
}
@Override
public void setFetchSize(final int rows) throws SQLException {
verifyOpen();
if (rows < 1) {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.INVALID_FETCH_SIZE,
rows);
}
// Silently truncate to the maximum number of rows that can be retrieved at a time.
this.fetchSize = Math.min(rows, queryExecutor.getMaxFetchSize());
}
@Override
public ResultSet getGeneratedKeys() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public long getLargeMaxRows() throws SQLException {
verifyOpen();
// Maximum result size is 1MB, so therefore a singe row cannot exceed this.
return largeMaxRows;
}
@Override
public void setLargeMaxRows(final long max) throws SQLException {
verifyOpen();
if (max < 0) {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.INVALID_LARGE_MAX_ROWS_SIZE,
max);
}
this.largeMaxRows = max;
}
@Override
public long getLargeUpdateCount() throws SQLException {
verifyOpen();
// Updates are not supported, so always return -1.
return -1;
}
@Override
public int getMaxFieldSize() throws SQLException {
verifyOpen();
return maxFieldSize;
}
@Override
public void setMaxFieldSize(final int max) throws SQLException {
verifyOpen();
if (max < 0) {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.INVALID_MAX_FIELD_SIZE,
max);
}
this.maxFieldSize = max;
}
@Override
public int getMaxRows() throws SQLException {
final long maxRows = getLargeMaxRows();
if (maxRows > Integer.MAX_VALUE) {
final String warning = Warning.lookup(Warning.MAX_VALUE_TRUNCATED, maxRows, Integer.MAX_VALUE);
LOGGER.warn(warning);
this.addWarning(new SQLWarning(warning));
return Integer.MAX_VALUE;
}
return (int) maxRows;
}
@Override
public void setMaxRows(final int max) throws SQLException {
setLargeMaxRows(max);
}
@Override
public boolean getMoreResults() throws SQLException {
return getMoreResults(java.sql.Statement.CLOSE_CURRENT_RESULT);
}
@Override
public boolean getMoreResults(final int current) throws SQLException {
verifyOpen();
if ((java.sql.Statement.KEEP_CURRENT_RESULT != current) && (this.resultSet != null)) {
this.resultSet.close();
this.resultSet = null;
}
return false;
}
@Override
public ResultSet getResultSet() throws SQLException {
verifyOpen();
return resultSet;
}
@Override
public int getResultSetConcurrency() throws SQLException {
verifyOpen();
return ResultSet.CONCUR_READ_ONLY;
}
@Override
public int getResultSetHoldability() throws SQLException {
verifyOpen();
return ResultSet.CLOSE_CURSORS_AT_COMMIT;
}
@Override
public int getResultSetType() throws SQLException {
verifyOpen();
return ResultSet.TYPE_FORWARD_ONLY;
}
@Override
public int getUpdateCount() throws SQLException {
return (int) this.getLargeUpdateCount();
}
@Override
public SQLWarning getWarnings() throws SQLException {
verifyOpen();
return warnings;
}
@Override
public boolean isClosed() {
return isClosed.get();
}
@Override
public boolean isCloseOnCompletion() throws SQLException {
verifyOpen();
return shouldCloseOnCompletion;
}
@Override
public boolean isPoolable() throws SQLException {
verifyOpen();
// Statement pooling is not supported.
return false;
}
@Override
public void setPoolable(final boolean poolable) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public boolean isWrapperFor(final Class<?> iface) {
return CastHelper.isWrapperFor(iface, this);
}
@Override
public void setCursorName(final String name) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setEscapeProcessing(final boolean enable) throws SQLException {
verifyOpen();
// Do nothing, because the driver does not support escape processing.
}
@Override
public <T> T unwrap(final Class<T> iface) throws SQLException {
return CastHelper.unwrap(iface, LOGGER, this);
}
/**
* Adds a new {@link SQLWarning} to the end of the warning list.
*
* @param warning the {@link SQLWarning} to add.
*/
void addWarning(final SQLWarning warning) {
if (this.warnings == null) {
this.warnings = warning;
} else {
this.warnings.setNextWarning(warning);
}
}
/**
* Verify the statement is open.
*
* @throws SQLException if the statement is closed.
*/
protected void verifyOpen() throws SQLException {
if (isClosed.get()) {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.STMT_CLOSED);
}
}
@Override
public java.sql.ResultSet executeQuery(final String sql) throws SQLException {
return queryExecutor.executeQuery(sql, this);
}
@Override
public int getQueryTimeout() throws SQLException {
verifyOpen();
return queryExecutor.getQueryTimeout();
}
@Override
public void setQueryTimeout(final int seconds) throws SQLException {
verifyOpen();
queryExecutor.setQueryTimeout(seconds);
}
}
| 7,468 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/PreparedStatement.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc;
import lombok.Getter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.jdbc.utilities.QueryExecutor;
import software.aws.neptune.jdbc.utilities.SqlError;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.Connection;
import java.sql.Date;
import java.sql.NClob;
import java.sql.ParameterMetaData;
import java.sql.Ref;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLXML;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.Calendar;
/**
* Implementation of PreparedStatement for JDBC Driver.
*/
public class PreparedStatement extends Statement implements java.sql.PreparedStatement {
private static final Logger LOGGER = LoggerFactory.getLogger(software.aws.neptune.jdbc.Connection.class);
private final String sql;
@Getter
private final QueryExecutor queryExecutor;
private ResultSet resultSet;
/**
* Constructor for seeding the prepared statement with the parent connection.
*
* @param connection The parent connection.
* @param sql The sql query.
* @param queryExecutor The query executor.
* @throws SQLException if error occurs when get type map of connection.
*/
public PreparedStatement(final Connection connection, final String sql, final QueryExecutor queryExecutor)
throws SQLException {
super(connection, queryExecutor);
this.sql = sql;
this.queryExecutor = queryExecutor;
}
@Override
public void addBatch() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public boolean execute() throws SQLException {
this.resultSet = executeQuery();
return true;
}
@Override
public boolean execute(final String sql) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public ResultSet executeQuery(final String sql) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void clearParameters() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public int executeUpdate() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public ParameterMetaData getParameterMetaData() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setArray(final int parameterIndex, final Array x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setAsciiStream(final int parameterIndex, final InputStream x, final int length)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setAsciiStream(final int parameterIndex, final InputStream x, final long length)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setAsciiStream(final int parameterIndex, final InputStream x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setBigDecimal(final int parameterIndex, final BigDecimal x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setBinaryStream(final int parameterIndex, final InputStream x, final int length)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setBinaryStream(final int parameterIndex, final InputStream x, final long length)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setBinaryStream(final int parameterIndex, final InputStream x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setBlob(final int parameterIndex, final Blob x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setBlob(final int parameterIndex, final InputStream inputStream, final long length)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setBlob(final int parameterIndex, final InputStream inputStream) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setBoolean(final int parameterIndex, final boolean x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setByte(final int parameterIndex, final byte x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setBytes(final int parameterIndex, final byte[] x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setCharacterStream(final int parameterIndex, final Reader reader, final int length)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setCharacterStream(final int parameterIndex, final Reader reader, final long length)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setCharacterStream(final int parameterIndex, final Reader reader)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setClob(final int parameterIndex, final Clob x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setClob(final int parameterIndex, final Reader reader, final long length)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setClob(final int parameterIndex, final Reader reader) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setDate(final int parameterIndex, final Date x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setDate(final int parameterIndex, final Date x, final Calendar cal)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setDouble(final int parameterIndex, final double x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setFloat(final int parameterIndex, final float x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setInt(final int parameterIndex, final int x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setLong(final int parameterIndex, final long x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setNCharacterStream(final int parameterIndex, final Reader value, final long length)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setNCharacterStream(final int parameterIndex, final Reader value)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setNClob(final int parameterIndex, final NClob value) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setNClob(final int parameterIndex, final Reader reader, final long length)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setNClob(final int parameterIndex, final Reader reader) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setNString(final int parameterIndex, final String value) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setNull(final int parameterIndex, final int sqlType) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setNull(final int parameterIndex, final int sqlType, final String typeName)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setObject(final int parameterIndex, final Object x, final int targetSqlType)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setObject(final int parameterIndex, final Object x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setObject(final int parameterIndex, final Object x, final int targetSqlType,
final int scaleOrLength)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setRef(final int parameterIndex, final Ref x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setRowId(final int parameterIndex, final RowId x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setSQLXML(final int parameterIndex, final SQLXML xmlObject) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setShort(final int parameterIndex, final short x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setString(final int parameterIndex, final String x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setTime(final int parameterIndex, final Time x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setTime(final int parameterIndex, final Time x, final Calendar cal)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setTimestamp(final int parameterIndex, final Timestamp x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setTimestamp(final int parameterIndex, final Timestamp x, final Calendar cal)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setURL(final int parameterIndex, final URL x) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Deprecated
@Override
public void setUnicodeStream(final int parameterIndex, final InputStream x, final int length)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public int getQueryTimeout() throws SQLException {
verifyOpen();
return queryExecutor.getQueryTimeout();
}
@Override
public void setQueryTimeout(final int seconds) throws SQLException {
verifyOpen();
queryExecutor.setQueryTimeout(seconds);
}
@Override
public java.sql.ResultSet executeQuery() throws SQLException {
resultSet = queryExecutor.executeQuery(sql, this);
return resultSet;
}
@Override
public ResultSetMetaData getMetaData() throws SQLException {
return (resultSet == null) ? null : resultSet.getMetaData();
}
}
| 7,469 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/Driver.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc;
import org.slf4j.LoggerFactory;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.lang.management.ManagementFactory;
import java.nio.charset.StandardCharsets;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.Properties;
import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Abstract implementation of Driver for JDBC Driver.
*/
public abstract class Driver implements java.sql.Driver {
public static final int DRIVER_MAJOR_VERSION;
public static final int DRIVER_MINOR_VERSION;
public static final String DRIVER_FULL_VERSION;
public static final String APPLICATION_NAME;
private static final String PROPERTIES_PATH = "/project.properties";
private static final String MAJOR_VERSION_KEY = "driver.major.version";
private static final String MINOR_VERSION_KEY = "driver.minor.version";
private static final String FULL_VERSION_KEY = "driver.full.version";
private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(Driver.class);
static {
int majorVersion = 0;
int minorVersion = 0;
String fullVersion = "";
try (InputStream input = Driver.class.getResourceAsStream(PROPERTIES_PATH)) {
final Properties properties = new Properties();
properties.load(input);
majorVersion = Integer.parseInt(properties.getProperty(MAJOR_VERSION_KEY));
minorVersion = Integer.parseInt(properties.getProperty(MINOR_VERSION_KEY));
fullVersion = properties.getProperty(FULL_VERSION_KEY);
} catch (IOException e) {
LOGGER.error("Error loading driver version: ", e);
}
DRIVER_MAJOR_VERSION = majorVersion;
DRIVER_MINOR_VERSION = minorVersion;
DRIVER_FULL_VERSION = fullVersion;
APPLICATION_NAME = getApplicationName();
}
/**
* Get the name of the currently running application.
*
* @return the name of the currently running application.
*/
private static String getApplicationName() {
// What we do is get the process ID of the current process, then check the set of running processes and pick out
// the one that matches the current process. From there we can grab the name of what is running the process.
try {
final String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
final boolean isWindows = System.getProperty("os.name").startsWith("Windows");
if (isWindows) {
final Process process = Runtime.getRuntime()
.exec(new String[] {"tasklist /fi \"PID eq ", pid, "\" /fo csv /nh"});
try (final BufferedReader input = new BufferedReader(
new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) {
final String line = input.readLine();
if (line != null) {
// Omit the surrounding quotes.
return line.substring(1, line.indexOf(",") - 1);
}
}
} else {
final Process process = Runtime.getRuntime().exec("ps -eo pid,comm");
try (final BufferedReader input = new BufferedReader(
new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) {
String line;
while ((line = input.readLine()) != null) {
line = line.trim();
if (line.startsWith(pid)) {
return line.substring(line.indexOf(" ") + 1);
}
}
}
}
} catch (final Exception err) {
// Eat the exception and fall through.
LOGGER.info(
"An exception has occurred and ignored while retrieving the caller application name: "
+ err.getLocalizedMessage());
}
return "Unknown";
}
@Override
public java.sql.DriverPropertyInfo[] getPropertyInfo(final String url, final Properties info) throws SQLException {
return new java.sql.DriverPropertyInfo[0];
}
@Override
public int getMajorVersion() {
return DRIVER_MAJOR_VERSION;
}
@Override
public int getMinorVersion() {
return DRIVER_MINOR_VERSION;
}
@Override
public boolean jdbcCompliant() {
return false;
}
@Override
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
protected String getLanguage(final String url, final Pattern jdbcPattern) throws SQLException {
final Matcher matcher = jdbcPattern.matcher(url);
if (matcher.matches()) {
return matcher.group(1);
}
throw SqlError.createSQLException(
LOGGER,
SqlState.CONNECTION_EXCEPTION,
SqlError.UNSUPPORTED_LANGUAGE, url);
}
protected String getPropertyString(final String url, final Pattern jdbcPattern) throws SQLException {
final Matcher matcher = jdbcPattern.matcher(url);
if (matcher.matches()) {
return matcher.group(2);
}
throw SqlError.createSQLException(
LOGGER,
SqlState.CONNECTION_EXCEPTION,
SqlError.UNSUPPORTED_PROPERTIES_STRING, url);
}
protected Properties parsePropertyString(final String propertyString, final String firstPropertyKey) {
final Properties properties = new Properties();
if (propertyString.isEmpty()) {
return properties;
}
final String[] propertyArray = propertyString.split(";");
if (propertyArray.length == 0) {
return properties;
} else if (!propertyArray[0].trim().isEmpty()) {
properties.setProperty(firstPropertyKey, propertyArray[0].trim());
}
for (int i = 1; i < propertyArray.length; i++) {
if (propertyArray[i].contains("=")) {
final String[] keyValue = propertyArray[i].split("=");
if (keyValue.length != 2) {
LOGGER.warn("Encountered property that could not be parsed: " + propertyArray[i] + ".");
} else {
properties.setProperty(keyValue[0], keyValue[1]);
}
} else {
properties.setProperty(propertyArray[i], "");
}
}
return properties;
}
}
| 7,470 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/DatabaseMetaData.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc;
import org.slf4j.LoggerFactory;
import software.aws.neptune.common.EmptyResultSet;
import software.aws.neptune.gremlin.GremlinConnectionProperties;
import software.aws.neptune.jdbc.utilities.CastHelper;
import software.aws.neptune.jdbc.utilities.SqlError;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.sql.ResultSet;
import java.sql.RowIdLifetime;
import java.sql.SQLException;
/**
* Abstract implementation of DatabaseMetaData for JDBC Driver.
*/
public abstract class DatabaseMetaData implements java.sql.DatabaseMetaData {
private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(DatabaseMetaData.class);
// TODO: Create class with abstract functions to get these (and other) of constants
private static final int MAX_CATALOG_NAME_LENGTH = 60;
private static final int MAX_TABLE_NAME_LENGTH = 60;
private static final int MAX_STATEMENT_LENGTH = 65536;
private final Connection connection;
/**
* DatabaseMetaData constructor.
*
* @param connection Connection Object.
*/
public DatabaseMetaData(final java.sql.Connection connection) {
this.connection = (Connection) connection;
}
@Override
public <T> T unwrap(final Class<T> iface) throws SQLException {
return CastHelper.unwrap(iface, LOGGER, this);
}
@Override
public boolean isWrapperFor(final Class<?> iface) {
return CastHelper.isWrapperFor(iface, this);
}
@Override
public java.sql.Connection getConnection() {
return connection;
}
@Override
public int getDefaultTransactionIsolation() {
return java.sql.Connection.TRANSACTION_NONE;
}
@Override
public int getDriverMajorVersion() {
return Driver.DRIVER_MAJOR_VERSION;
}
@Override
public int getDriverMinorVersion() {
return Driver.DRIVER_MINOR_VERSION;
}
@Override
public String getDriverVersion() {
return Driver.DRIVER_FULL_VERSION;
}
@Override
public int getResultSetHoldability() {
return ResultSet.CLOSE_CURSORS_AT_COMMIT;
}
@Override
public RowIdLifetime getRowIdLifetime() {
return RowIdLifetime.ROWID_UNSUPPORTED;
}
@Override
public int getSQLStateType() {
return java.sql.DatabaseMetaData.sqlStateSQL;
}
@Override
public String getIdentifierQuoteString() {
return "\"";
}
@Override
public int getMaxCatalogNameLength() {
return MAX_CATALOG_NAME_LENGTH;
}
@Override
public int getMaxStatementLength() {
return MAX_STATEMENT_LENGTH;
}
@Override
public int getMaxTableNameLength() {
return MAX_TABLE_NAME_LENGTH;
}
@Override
public boolean supportsResultSetConcurrency(final int type, final int concurrency) {
return (type == ResultSet.TYPE_FORWARD_ONLY) && (concurrency == ResultSet.CONCUR_READ_ONLY);
}
@Override
public boolean supportsResultSetType(final int type) {
return (ResultSet.TYPE_FORWARD_ONLY == type);
}
@Override
public String getProcedureTerm() {
LOGGER.debug("Procedures are not supported. Returning null.");
return "";
}
@Override
public String getSchemaTerm() {
LOGGER.debug("Schemas are not supported. Returning an empty string.");
return "";
}
@Override
public int getMaxBinaryLiteralLength() {
LOGGER.debug("Binary is not a supported data type.");
return 0;
}
@Override
public ResultSet getCrossReference(final String parentCatalog, final String parentSchema, final String parentTable,
final String foreignCatalog, final String foreignSchema,
final String foreignTable)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public ResultSet getExportedKeys(final String catalog, final String schema, final String table)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public ResultSet getFunctionColumns(final String catalog, final String schemaNamePattern,
final String tableNamePattern, final String columnNamePattern)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public ResultSet getFunctions(final String catalog, final String schemaPattern, final String functionNamePattern)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public ResultSet getProcedureColumns(final String catalog, final String schemaPattern,
final String procedureNamePattern, final String columnNamePattern)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public ResultSet getPseudoColumns(final String catalog, final String schemaPattern, final String tableNamePattern,
final String columnNamePattern) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public ResultSet getSuperTables(final String catalog, final String schemaPattern, final String tableNamePattern)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public ResultSet getSuperTypes(final String catalog, final String schemaPattern, final String tableNamePattern)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public ResultSet getTablePrivileges(final String catalog, final String schemaPattern, final String tableNamePattern)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public ResultSet getUDTs(final String catalog, final String schemaPattern, final String typeNamePattern,
final int[] types) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public ResultSet getVersionColumns(final String catalog, final String schema, final String table)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public int getMaxTablesInSelect() {
return 1;
}
@Override
public int getMaxUserNameLength() {
return 0;
}
@Override
public boolean allProceduresAreCallable() {
return false;
}
@Override
public boolean allTablesAreSelectable() {
return true;
}
@Override
public boolean autoCommitFailureClosesAllResultSets() {
return false;
}
@Override
public boolean dataDefinitionCausesTransactionCommit() {
return false;
}
@Override
public boolean dataDefinitionIgnoredInTransactions() {
return false;
}
@Override
public boolean deletesAreDetected(final int type) {
return false;
}
@Override
public boolean doesMaxRowSizeIncludeBlobs() {
return true;
}
@Override
public boolean generatedKeyAlwaysReturned() {
return false;
}
@Override
public boolean insertsAreDetected(final int type) {
return false;
}
@Override
public boolean isCatalogAtStart() {
return true;
}
@Override
public boolean isReadOnly() {
return true;
}
@Override
public boolean locatorsUpdateCopy() {
return false;
}
@Override
public boolean nullPlusNonNullIsNull() {
return true;
}
@Override
public boolean nullsAreSortedAtEnd() {
return false;
}
@Override
public boolean nullsAreSortedAtStart() {
return false;
}
@Override
public boolean nullsAreSortedHigh() {
return false;
}
@Override
public boolean nullsAreSortedLow() {
return false;
}
@Override
public boolean othersDeletesAreVisible(final int type) {
return false;
}
@Override
public boolean othersInsertsAreVisible(final int type) {
return false;
}
@Override
public boolean othersUpdatesAreVisible(final int type) {
return false;
}
@Override
public boolean ownDeletesAreVisible(final int type) {
return false;
}
@Override
public boolean ownInsertsAreVisible(final int type) {
return false;
}
@Override
public boolean ownUpdatesAreVisible(final int type) {
return false;
}
@Override
public boolean storesLowerCaseIdentifiers() {
return false;
}
@Override
public boolean storesLowerCaseQuotedIdentifiers() {
return false;
}
@Override
public boolean storesMixedCaseIdentifiers() {
return true;
}
@Override
public boolean storesMixedCaseQuotedIdentifiers() {
return true;
}
@Override
public boolean storesUpperCaseIdentifiers() {
return false;
}
@Override
public boolean storesUpperCaseQuotedIdentifiers() {
return false;
}
@Override
public boolean supportsANSI92EntryLevelSQL() {
return true;
}
@Override
public boolean supportsANSI92FullSQL() {
return false;
}
@Override
public boolean supportsANSI92IntermediateSQL() {
return false;
}
@Override
public boolean supportsAlterTableWithAddColumn() {
return false;
}
@Override
public boolean supportsAlterTableWithDropColumn() {
return false;
}
@Override
public boolean supportsBatchUpdates() {
return false;
}
@Override
public boolean supportsCatalogsInDataManipulation() {
return false;
}
@Override
public boolean supportsCatalogsInIndexDefinitions() {
return false;
}
@Override
public boolean supportsCatalogsInPrivilegeDefinitions() {
return false;
}
@Override
public boolean supportsCatalogsInProcedureCalls() {
return false;
}
@Override
public boolean supportsCatalogsInTableDefinitions() {
return false;
}
@Override
public boolean supportsColumnAliasing() {
return true;
}
@Override
public boolean supportsConvert() {
return false;
}
@Override
public boolean supportsConvert(final int fromType, final int toType) {
return false;
}
@Override
public boolean supportsCoreSQLGrammar() {
return true;
}
@Override
public boolean supportsCorrelatedSubqueries() {
return true;
}
@Override
public boolean supportsDataDefinitionAndDataManipulationTransactions() {
return false;
}
@Override
public boolean supportsDataManipulationTransactionsOnly() {
return false;
}
@Override
public boolean supportsDifferentTableCorrelationNames() {
return false;
}
@Override
public boolean supportsExpressionsInOrderBy() {
return true;
}
@Override
public boolean supportsExtendedSQLGrammar() {
return true;
}
@Override
public boolean supportsFullOuterJoins() {
return true;
}
@Override
public boolean supportsGetGeneratedKeys() {
return false;
}
@Override
public boolean supportsGroupBy() {
return true;
}
@Override
public boolean supportsGroupByBeyondSelect() {
return true;
}
@Override
public boolean supportsGroupByUnrelated() {
return false;
}
@Override
public boolean supportsIntegrityEnhancementFacility() {
return false;
}
@Override
public boolean supportsLikeEscapeClause() {
return true;
}
@Override
public boolean supportsLimitedOuterJoins() {
return true;
}
@Override
public boolean supportsMinimumSQLGrammar() {
return true;
}
@Override
public boolean supportsMixedCaseIdentifiers() {
return true;
}
@Override
public boolean supportsMixedCaseQuotedIdentifiers() {
return true;
}
@Override
public boolean supportsMultipleOpenResults() {
return false;
}
@Override
public boolean supportsMultipleResultSets() {
return false;
}
@Override
public boolean supportsMultipleTransactions() {
return false;
}
@Override
public boolean supportsNamedParameters() {
return false;
}
@Override
public boolean supportsNonNullableColumns() {
return false;
}
@Override
public boolean supportsOpenCursorsAcrossCommit() {
return false;
}
@Override
public boolean supportsOpenCursorsAcrossRollback() {
return false;
}
@Override
public boolean supportsOpenStatementsAcrossCommit() {
return false;
}
@Override
public boolean supportsOpenStatementsAcrossRollback() {
return false;
}
@Override
public boolean supportsOrderByUnrelated() {
return true;
}
@Override
public boolean supportsOuterJoins() {
return true;
}
@Override
public boolean supportsPositionedDelete() {
return false;
}
@Override
public boolean supportsPositionedUpdate() {
return false;
}
@Override
public boolean supportsResultSetHoldability(final int holdability) {
return false;
}
@Override
public boolean supportsSavepoints() {
return false;
}
@Override
public boolean supportsSchemasInDataManipulation() {
return false;
}
@Override
public boolean supportsSchemasInIndexDefinitions() {
return false;
}
@Override
public boolean supportsSchemasInPrivilegeDefinitions() {
return false;
}
@Override
public boolean supportsSchemasInProcedureCalls() {
return false;
}
@Override
public boolean supportsSchemasInTableDefinitions() {
return false;
}
@Override
public boolean supportsSelectForUpdate() {
return false;
}
@Override
public boolean supportsStatementPooling() {
return false;
}
@Override
public boolean supportsStoredFunctionsUsingCallSyntax() {
return false;
}
@Override
public boolean supportsStoredProcedures() {
return false;
}
@Override
public boolean supportsSubqueriesInComparisons() {
return false;
}
@Override
public boolean supportsSubqueriesInExists() {
return false;
}
@Override
public boolean supportsSubqueriesInIns() {
return false;
}
@Override
public boolean supportsSubqueriesInQuantifieds() {
return false;
}
@Override
public boolean supportsTableCorrelationNames() {
return false;
}
@Override
public boolean supportsTransactionIsolationLevel(final int level) {
return false;
}
@Override
public boolean supportsTransactions() {
return false;
}
@Override
public boolean supportsUnion() {
return false;
}
@Override
public boolean supportsUnionAll() {
return false;
}
@Override
public boolean updatesAreDetected(final int type) {
return false;
}
@Override
public boolean usesLocalFilePerTable() {
return false;
}
@Override
public boolean usesLocalFiles() {
return false;
}
@Override
public int getMaxCharLiteralLength() {
return 0;
}
@Override
public int getMaxColumnNameLength() {
return 0;
}
@Override
public int getMaxColumnsInGroupBy() {
return 0;
}
@Override
public int getMaxColumnsInIndex() {
return 0;
}
@Override
public int getMaxColumnsInOrderBy() {
return 0;
}
@Override
public int getMaxColumnsInSelect() {
return 0;
}
@Override
public int getMaxColumnsInTable() {
return 0;
}
@Override
public int getMaxConnections() {
return 0;
}
@Override
public int getMaxCursorNameLength() {
return 0;
}
@Override
public int getMaxIndexLength() {
return 0;
}
@Override
public int getMaxProcedureNameLength() {
return 0;
}
@Override
public int getMaxSchemaNameLength() {
return 0;
}
@Override
public int getMaxStatements() {
return 0;
}
@Override
public String getURL() throws SQLException {
// "contactPoint" is the property key for the hostname for the connection
return this.connection.getClientInfo(GremlinConnectionProperties.CONTACT_POINT_KEY);
}
@Override
public String getUserName() throws SQLException {
return "";
}
@Override
public String getSQLKeywords() throws SQLException {
return "";
}
@Override
public String getNumericFunctions() throws SQLException {
return "";
}
@Override
public String getStringFunctions() throws SQLException {
return "";
}
@Override
public String getSystemFunctions() throws SQLException {
return "";
}
@Override
public String getTimeDateFunctions() throws SQLException {
return "";
}
@Override
public String getSearchStringEscape() throws SQLException {
return "'";
}
@Override
public String getExtraNameCharacters() throws SQLException {
return "";
}
@Override
public int getMaxRowSize() throws SQLException {
return 0;
}
@Override
public ResultSet getProcedures(final String catalog, final String schemaPattern, final String procedureNamePattern)
throws SQLException {
return new EmptyResultSet(getConnection().createStatement());
}
@Override
public ResultSet getSchemas(final String catalog, final String schemaPattern) throws SQLException {
// No support for getSchemas other than empty result set so we can just invoke getSchema().
return getSchemas();
}
@Override
public ResultSet getColumnPrivileges(final String catalog, final String schema, final String table,
final String columnNamePattern)
throws SQLException {
return new EmptyResultSet(getConnection().createStatement());
}
@Override
public ResultSet getBestRowIdentifier(final String catalog, final String schema, final String table,
final int scope, final boolean nullable)
throws SQLException {
return new EmptyResultSet(getConnection().createStatement());
}
@Override
public ResultSet getPrimaryKeys(final String catalog, final String schema, final String table) throws SQLException {
return new EmptyResultSet(getConnection().createStatement());
}
@Override
public ResultSet getImportedKeys(final String catalog, final String schema, final String table)
throws SQLException {
return new EmptyResultSet(getConnection().createStatement());
}
@Override
public ResultSet getTypeInfo() throws SQLException {
LOGGER.info("Getting database type info.");
return connection.getQueryExecutor().executeGetTypeInfo(getConnection().createStatement());
}
@Override
public ResultSet getIndexInfo(final String catalog, final String schema, final String table, final boolean unique,
final boolean approximate)
throws SQLException {
return new EmptyResultSet(getConnection().createStatement());
}
@Override
public ResultSet getAttributes(final String catalog, final String schemaPattern, final String typeNamePattern,
final String attributeNamePattern) throws SQLException {
return new EmptyResultSet(getConnection().createStatement());
}
@Override
public ResultSet getClientInfoProperties() throws SQLException {
return new EmptyResultSet(getConnection().createStatement());
}
@Override
public int getDatabaseMajorVersion() throws SQLException {
return 0;
}
@Override
public int getDatabaseMinorVersion() throws SQLException {
return 0;
}
@Override
public int getJDBCMajorVersion() throws SQLException {
return 4;
}
@Override
public int getJDBCMinorVersion() throws SQLException {
return 2;
}
@Override
public ResultSet getTables(final String catalog, final String schemaPattern, final String tableNamePattern,
final String[] types)
throws SQLException {
// Only tableNamePattern is supported as an exact node label semicolon delimited String.
LOGGER.info("Getting database tables.");
return connection.getQueryExecutor().executeGetTables(getConnection().createStatement(), tableNamePattern);
}
@Override
public ResultSet getSchemas() throws SQLException {
LOGGER.info("Getting database schemas.");
return connection.getQueryExecutor().executeGetSchemas(getConnection().createStatement());
}
@Override
public ResultSet getCatalogs() throws SQLException {
LOGGER.info("Getting database catalogs.");
return connection.getQueryExecutor().executeGetCatalogs(getConnection().createStatement());
}
@Override
public ResultSet getTableTypes() throws SQLException {
LOGGER.info("Getting database table types.");
return connection.getQueryExecutor().executeGetTableTypes(getConnection().createStatement());
}
@Override
public ResultSet getColumns(final String catalog, final String schemaPattern, final String tableNamePattern,
final String columnNamePattern)
throws SQLException {
if (catalog != null) {
LOGGER.warn("Catalog in getColumns is not supported, ignoring {}.", catalog);
}
if (columnNamePattern != null) {
LOGGER.warn("ColumnNamePattern in getColumns is not supported, ignoring {}.", columnNamePattern);
}
if (schemaPattern != null) {
LOGGER.warn("SchemaPattern in getColumns is not supported, ignoring {}.", schemaPattern);
}
try {
LOGGER.info("Getting database columns.");
final ResultSet resultSet = connection.getQueryExecutor()
.executeGetColumns(getConnection().createStatement(), tableNamePattern);
LOGGER.info("Database columns retrieved.");
return resultSet;
} catch (final Exception e) {
final StringWriter sw = new StringWriter();
final PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
LOGGER.error("Encountered exception", e);
throw new SQLException(e);
}
}
}
| 7,471 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/Connection.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc;
import lombok.NonNull;
import lombok.SneakyThrows;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.jdbc.utilities.CastHelper;
import software.aws.neptune.jdbc.utilities.ConnectionProperties;
import software.aws.neptune.jdbc.utilities.QueryExecutor;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import software.aws.neptune.jdbc.utilities.SshTunnel;
import java.sql.Array;
import java.sql.ClientInfoStatus;
import java.sql.ResultSet;
import java.sql.SQLClientInfoException;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.sql.SQLWarning;
import java.sql.SQLXML;
import java.sql.Savepoint;
import java.sql.Struct;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Abstract implementation of Connection for JDBC Driver.
*/
public abstract class Connection implements java.sql.Connection {
private static final Logger LOGGER = LoggerFactory.getLogger(Connection.class);
private final AtomicBoolean isClosed = new AtomicBoolean(false);
private final ConnectionProperties connectionProperties;
private Map<String, Class<?>> typeMap = new HashMap<>();
private SQLWarning warnings = null;
private final SshTunnel sshTunnel;
protected Connection(@NonNull final ConnectionProperties connectionProperties) throws SQLException {
this.connectionProperties = connectionProperties;
this.connectionProperties.putIfAbsent(
ConnectionProperties.APPLICATION_NAME_KEY,
Driver.APPLICATION_NAME);
sshTunnel = new SshTunnel(connectionProperties);
if (sshTunnel.sshTunnelValid()) {
connectionProperties.sshTunnelOverride(sshTunnel.getTunnelPort());
}
}
/**
* Function to get QueryExecutor of underlying connection.
*
* @return QueryExecutor Object.
*/
public abstract QueryExecutor getQueryExecutor() throws SQLException;
protected ConnectionProperties getConnectionProperties() {
return this.connectionProperties;
}
/**
* Function to get failures as a Map.
* @param name Name of property which failed.
* @param value Value of property which failed.
* @return Map of failures.
*/
public static Map<String, ClientInfoStatus> getFailures(@NonNull final String name, final String value) {
final Properties newProperties = new Properties();
newProperties.setProperty(name, value);
return getFailures(newProperties);
}
private static Map<String, ClientInfoStatus> getFailures(final Properties properties) {
final Map<String, ClientInfoStatus> clientInfoStatusMap = new HashMap<>();
if (properties != null) {
for (final String name : properties.stringPropertyNames()) {
clientInfoStatusMap.put(name, ClientInfoStatus.REASON_UNKNOWN);
}
}
return clientInfoStatusMap;
}
/*
Functions that have their implementation in this Connection class.
*/
@Override
public Properties getClientInfo() throws SQLException {
verifyOpen();
final Properties clientInfo = new Properties();
clientInfo.putAll(connectionProperties);
return clientInfo;
}
@SneakyThrows
@Override
public void setClientInfo(final Properties properties) throws SQLClientInfoException {
if (isClosed.get()) {
throw SqlError.createSQLClientInfoException(
LOGGER,
getFailures(properties),
SqlError.CONN_CLOSED);
}
if (properties != null) {
for (final String name : properties.stringPropertyNames()) {
final String value = properties.getProperty(name);
setClientInfo(name, value);
}
}
LOGGER.debug("Successfully set client info with all properties.");
}
@Override
public String getClientInfo(final String name) throws SQLException {
verifyOpen();
if (name == null || !connectionProperties.isSupportedProperty(name)) {
LOGGER.debug("Invalid name '{}' passed to getClientInfo, returning null for value.", name);
return null;
}
return connectionProperties.get(name).toString();
}
@Override
public void setClientInfo(@NonNull final String name, final String value) throws SQLClientInfoException {
if (isClosed.get()) {
throw SqlError.createSQLClientInfoException(
LOGGER,
getFailures(name, value),
SqlError.CONN_CLOSED);
}
if (!connectionProperties.isSupportedProperty(name)) {
throw SqlError.createSQLClientInfoException(
LOGGER,
getFailures(name, value),
SqlError.INVALID_CONNECTION_PROPERTY, name, value);
}
try {
if (value != null) {
connectionProperties.validateAndSetProperty(name, value);
LOGGER.debug("Successfully set client info with name '{}' and value '{}'", name, value);
} else {
connectionProperties.remove(name);
LOGGER.debug("Successfully removed client info with name '{}'", name);
}
} catch (final SQLException ex) {
throw SqlError.createSQLClientInfoException(
LOGGER,
getFailures(name, value),
ex);
}
}
@Override
public Map<String, Class<?>> getTypeMap() throws SQLException {
verifyOpen();
return typeMap;
}
@Override
public void setTypeMap(final Map<String, Class<?>> map) throws SQLException {
verifyOpen();
if (map == null) {
LOGGER.debug("Null value is passed as conversion map, failing back to an empty hash map.");
typeMap = new HashMap<>();
} else {
typeMap = map;
}
}
@Override
public boolean isClosed() {
return isClosed.get();
}
@Override
public boolean isWrapperFor(final Class<?> iface) {
return CastHelper.isWrapperFor(iface, this);
}
@Override
public String nativeSQL(final String sql) throws SQLException {
verifyOpen();
return sql;
}
@Override
public <T> T unwrap(final Class<T> iface) throws SQLException {
return CastHelper.unwrap(iface, LOGGER, this);
}
@Override
public void clearWarnings() throws SQLException {
verifyOpen();
warnings = null;
}
@Override
public SQLWarning getWarnings() throws SQLException {
verifyOpen();
return warnings;
}
/**
* Set a new warning if there were none, or add a new warning to the end of the list.
*
* @param warning the {@link SQLWarning} to be set.SQLError
*/
protected void addWarning(final SQLWarning warning) {
LOGGER.warn(warning.getMessage());
if (this.warnings == null) {
this.warnings = warning;
return;
}
this.warnings.setNextWarning(warning);
}
protected abstract void doClose();
@Override
public void close() {
if (!isClosed.getAndSet(true)) {
if (sshTunnel.sshTunnelValid()) {
sshTunnel.disconnect();
}
doClose();
}
}
/**
* Verify the connection is open.
*
* @throws SQLException if the connection is closed.
*/
protected void verifyOpen() throws SQLException {
if (isClosed.get()) {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.CONN_CLOSED);
}
}
// Add default implementation of create functions which throw.
@Override
public Struct createStruct(final String typeName, final Object[] attributes) throws SQLException {
// Only reason to do this is for parameters, if you do not support them then this is a safe implementation.
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public java.sql.Blob createBlob() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public java.sql.Clob createClob() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public java.sql.NClob createNClob() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public SQLXML createSQLXML() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public Array createArrayOf(final String typeName, final Object[] elements) throws SQLException {
// Even though Arrays are supported, the only reason to create an Array in the application is to pass it as
// a parameter which is not supported.
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
// Add default of no schema and no catalog support.
@Override
public String getSchema() throws SQLException {
// No schema support. Return null.
return null;
}
@Override
public void setSchema(final String schema) throws SQLException {
// No schema support. Do nothing.
}
@Override
public String getCatalog() throws SQLException {
// No catalog support. Return null.
return null;
}
@Override
public void setCatalog(final String catalog) throws SQLException {
// No catalog support. Do nothing.
}
// Add default read-only and autocommit only implementation.
@Override
public boolean getAutoCommit() throws SQLException {
return true;
}
@Override
public void setAutoCommit(final boolean autoCommit) throws SQLException {
// Fake allowing autoCommit to be turned off, even though transactions are not supported, as some applications
// turn this off without checking support.
LOGGER.debug("Transactions are not supported, do nothing for setAutoCommit.");
}
@Override
public boolean isReadOnly() throws SQLException {
return true;
}
@Override
public void setReadOnly(final boolean readOnly) throws SQLException {
if (!readOnly) {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
}
// Default to forward only with read only concurrency.
@Override
public java.sql.Statement createStatement() throws SQLException {
verifyOpen();
return createStatement(java.sql.ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
}
// Add default no transaction support statement.
@Override
public java.sql.Statement createStatement(final int resultSetType, final int resultSetConcurrency,
final int resultSetHoldability) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public java.sql.PreparedStatement prepareStatement(final String sql, final int autoGeneratedKeys)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public java.sql.PreparedStatement prepareStatement(final String sql, final int resultSetType,
final int resultSetConcurrency)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public java.sql.PreparedStatement prepareStatement(final String sql, final int resultSetType,
final int resultSetConcurrency,
final int resultSetHoldability) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public java.sql.PreparedStatement prepareStatement(final String sql, final int[] columnIndexes)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public java.sql.PreparedStatement prepareStatement(final String sql, final String[] columnNames)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
// Add default no callable statement support.
@Override
public java.sql.CallableStatement prepareCall(final String sql) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public java.sql.CallableStatement prepareCall(final String sql, final int resultSetType,
final int resultSetConcurrency)
throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public java.sql.CallableStatement prepareCall(final String sql, final int resultSetType,
final int resultSetConcurrency,
final int resultSetHoldability) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
// Default transactions as unsupported.
@Override
public int getTransactionIsolation() throws SQLException {
return java.sql.Connection.TRANSACTION_NONE;
}
@Override
public void setTransactionIsolation(final int level) throws SQLException {
verifyOpen();
if (level != java.sql.Connection.TRANSACTION_NONE) {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
}
@Override
public void releaseSavepoint(final Savepoint savepoint) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void rollback() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void rollback(final Savepoint savepoint) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public Savepoint setSavepoint() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public Savepoint setSavepoint(final String name) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void abort(final Executor executor) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void commit() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public int getHoldability() throws SQLException {
return java.sql.ResultSet.CLOSE_CURSORS_AT_COMMIT;
}
@Override
public void setHoldability(final int holdability) throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public java.sql.Statement createStatement(final int resultSetType, final int resultSetConcurrency)
throws SQLException {
return new Statement(this, getQueryExecutor());
}
@Override
public java.sql.PreparedStatement prepareStatement(final String sql) throws SQLException {
return new PreparedStatement(this, sql, getQueryExecutor());
}
@Override
public int getNetworkTimeout() throws SQLFeatureNotSupportedException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void setNetworkTimeout(final Executor executor, final int milliseconds)
throws SQLFeatureNotSupportedException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public boolean isValid(final int timeout) throws SQLException {
if (timeout < 0) {
throw new SQLException("Timeout value must be greater than or equal to 0");
}
return getQueryExecutor().isValid(timeout);
}
}
| 7,472 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/PooledConnection.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.sql.ConnectionEvent;
import javax.sql.ConnectionEventListener;
import javax.sql.StatementEventListener;
import java.util.LinkedList;
import java.util.List;
/**
* Abstract implementation of PooledConnection for JDBC Driver.
*/
public abstract class PooledConnection implements javax.sql.PooledConnection {
private static final Logger LOGGER = LoggerFactory.getLogger(PooledConnection.class);
private final List<ConnectionEventListener> connectionEventListeners = new LinkedList<>();
private final java.sql.Connection connection;
/**
* PooledConnection constructor.
*
* @param connection Connection Object.
*/
public PooledConnection(final java.sql.Connection connection) {
this.connection = connection;
}
@Override
public void close() {
LOGGER.debug("Notify all connection listeners this PooledConnection object is closed.");
final ConnectionEvent event = new ConnectionEvent(this, null);
connectionEventListeners.forEach(l -> l.connectionClosed(event));
}
@Override
public void addConnectionEventListener(final ConnectionEventListener listener) {
LOGGER.debug("Add a ConnectionEventListener to this PooledConnection.");
connectionEventListeners.add(listener);
}
@Override
public void removeConnectionEventListener(final ConnectionEventListener listener) {
LOGGER.debug("Remove the ConnectionEventListener attached to this PooledConnection.");
connectionEventListeners.remove(listener);
}
@Override
public void addStatementEventListener(final StatementEventListener listener) {
// Do nothing, statement pooling is not supported.
LOGGER.debug("addStatementEventListener is called on the current PooledConnection object.");
}
@Override
public void removeStatementEventListener(final StatementEventListener listener) {
// Do nothing, statement pooling is not supported.
LOGGER.debug("removeStatementEventListener is called on the current PooledConnection object.");
}
}
| 7,473 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/ResultSet.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc;
import lombok.Getter;
import org.apache.commons.beanutils.ConversionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.jdbc.utilities.CastHelper;
import software.aws.neptune.jdbc.utilities.JavaToJdbcTypeConverter;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.Date;
import java.sql.NClob;
import java.sql.Ref;
import java.sql.ResultSetMetaData;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLWarning;
import java.sql.SQLXML;
import java.sql.Statement;
import java.sql.Time;
import java.sql.Timestamp;
import java.time.Instant;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import java.time.OffsetTime;
import java.time.ZoneId;
import java.time.ZonedDateTime;
import java.util.Calendar;
import java.util.GregorianCalendar;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Abstract implementation of ResultSet for JDBC Driver.
*/
public abstract class ResultSet implements java.sql.ResultSet {
private static final Logger LOGGER = LoggerFactory.getLogger(ResultSet.class);
private static final Calendar DEFAULT_CALENDAR = new GregorianCalendar();
private final AtomicBoolean isClosed = new AtomicBoolean(false);
private final List<String> columns;
private final int rowCount;
private final java.sql.Statement statement;
@Getter
private int rowIndex;
private SQLWarning warnings = null;
protected ResultSet(final java.sql.Statement statement, final List<String> columns, final int rowCount) {
this.statement = statement;
this.columns = columns;
this.rowCount = rowCount;
this.rowIndex = -1;
}
private static Date getCalendarDate(final Date date, final Calendar calendar) {
final Instant zdtInstant = date.toLocalDate().atStartOfDay(calendar.getTimeZone().toZoneId()).toInstant();
return new Date(zdtInstant.toEpochMilli());
}
private static Time getCalendarTime(final Time time, final Calendar calendar) {
final LocalDateTime localDateTime = time.toLocalTime().atDate(LocalDate.of(1970, 1, 1));
final ZoneId zonedDateTime = ZoneId.from(localDateTime.atZone(calendar.getTimeZone().toZoneId()));
return new Time(localDateTime.atZone(zonedDateTime).toInstant().toEpochMilli());
}
private static Timestamp getCalendarTimestamp(final Timestamp timestamp, final Calendar calendar) {
final Instant instant = timestamp.toLocalDateTime().atZone(calendar.getTimeZone().toZoneId()).toInstant();
final Timestamp timestampAdjusted = new Timestamp(instant.toEpochMilli());
timestampAdjusted.setNanos(instant.getNano());
return timestampAdjusted;
}
protected abstract void doClose() throws SQLException;
protected int getDriverFetchSize() throws SQLException {
LOGGER.warn("Feature is not supported");
return 0;
}
protected void setDriverFetchSize(final int rows) {
LOGGER.warn("Feature is not supported");
}
protected abstract Object getConvertedValue(int columnIndex) throws SQLException;
protected abstract ResultSetMetaData getResultMetadata() throws SQLException;
/**
* Verify the result set is open.
*
* @throws SQLException if the result set is closed.
*/
protected void verifyOpen() throws SQLException {
if (isClosed.get()) {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.RESULT_SET_CLOSED);
}
}
@Override
public boolean isClosed() throws SQLException {
return isClosed.get();
}
@Override
public void close() throws SQLException {
if (isClosed.getAndSet(true)) {
return;
}
doClose();
}
@Override
public boolean next() throws SQLException {
LOGGER.trace("Getting next row.");
// Increment row index, if it exceeds capacity, set it to one after the last element.
if (++this.rowIndex >= rowCount) {
this.rowIndex = rowCount;
}
return (this.rowIndex < rowCount);
}
// Warning implementation.
@Override
public SQLWarning getWarnings() throws SQLException {
verifyOpen();
return warnings;
}
@Override
public void clearWarnings() {
warnings = null;
}
/**
* Set a new warning if there were none, or add a new warning to the end of the list.
*
* @param warning The {@link SQLWarning} to add.
*/
protected void addWarning(final SQLWarning warning) {
LOGGER.warn(warning.getMessage());
if (this.warnings == null) {
this.warnings = warning;
return;
}
this.warnings.setNextWarning(warning);
}
@Override
public Statement getStatement() {
return statement;
}
@Override
public <T> T unwrap(final Class<T> iface) throws SQLException {
return CastHelper.unwrap(iface, LOGGER, this);
}
@Override
public boolean isWrapperFor(final Class<?> iface) {
return CastHelper.isWrapperFor(iface, this);
}
@Override
public boolean isBeforeFirst() throws SQLException {
verifyOpen();
return (getRowIndex() == -1);
}
@Override
public boolean isAfterLast() throws SQLException {
return (getRowIndex() >= rowCount);
}
@Override
public boolean isFirst() throws SQLException {
return (getRowIndex() == 0);
}
@Override
public int getFetchSize() throws SQLException {
verifyOpen();
return getDriverFetchSize();
}
@Override
public void setFetchSize(final int rows) throws SQLException {
verifyOpen();
if (rows < 0) {
throw SqlError.createSQLException(LOGGER, SqlState.DATA_EXCEPTION, SqlError.INVALID_FETCH_SIZE, rows);
}
setDriverFetchSize(rows);
}
@Override
public boolean isLast() throws SQLException {
verifyOpen();
return (getRowIndex() == (rowCount - 1));
}
@Override
public void beforeFirst() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void afterLast() throws SQLException {
verifyOpen();
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public boolean first() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public boolean last() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public int getRow() throws SQLException {
return getRowIndex() + 1;
}
@Override
public boolean absolute(final int row) throws SQLException {
verifyOpen();
if (row < 1) {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
} else if (getRowIndex() > row) {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
while ((getRowIndex() < row) && next()) {
continue;
}
return !isBeforeFirst() && !isAfterLast();
}
@Override
public int getFetchDirection() {
return java.sql.ResultSet.FETCH_FORWARD;
}
@Override
public void setFetchDirection(final int direction) throws SQLException {
if (direction != java.sql.ResultSet.FETCH_FORWARD) {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
}
@Override
public int findColumn(final String columnLabel) throws SQLException {
final int index = columns.indexOf(columnLabel);
if (index < 0) {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.INVALID_COLUMN_LABEL, columnLabel);
}
return columns.indexOf(columnLabel) + 1;
}
/**
* Gets the value in the target type on the current row and given index.
*
* @param columnIndex the index of the cell value.
* @param targetType the intended target type.
* @param <T> the intended target type.
* @return a value that is possibly converted to the target type.
* @throws SQLException the result set is closed, the row is incorrect or the given
* * column index is invalid.
*/
private <T> T getValue(final int columnIndex, final Class<T> targetType) throws SQLException {
Object o = getConvertedValue(columnIndex);
if (o == null) {
return null;
}
if (o instanceof LocalTime) {
o = getCalendarTime(Time.valueOf((LocalTime) o), DEFAULT_CALENDAR);
} else if (o instanceof LocalDate) {
o = getCalendarDate(Date.valueOf((LocalDate) o), DEFAULT_CALENDAR);
} else if (o instanceof LocalDateTime) {
o = getCalendarTimestamp(Timestamp.valueOf((LocalDateTime) o), DEFAULT_CALENDAR);
} else if (o instanceof ZonedDateTime) {
o = getCalendarTimestamp(Timestamp.valueOf(((ZonedDateTime) o).toLocalDateTime()), DEFAULT_CALENDAR);
} else if (o instanceof OffsetTime) {
o = getCalendarTime(Time.valueOf(((OffsetTime) o).toLocalTime()), DEFAULT_CALENDAR);
}
try {
return JavaToJdbcTypeConverter.get(o.getClass(), targetType).convert(targetType, o);
} catch (final ConversionException e) {
throw SqlError.createSQLException(LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.UNSUPPORTED_CONVERSION,
o.getClass().getSimpleName(),
targetType.getSimpleName());
}
}
@Override
public boolean getBoolean(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a Boolean.", columnIndex);
final Boolean value = getValue(columnIndex, Boolean.class);
return value != null && value;
}
@Override
public byte getByte(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a Byte.", columnIndex);
final Byte value = getValue(columnIndex, Byte.class);
return (value == null) ? 0 : value;
}
@Override
public short getShort(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a Short.", columnIndex);
final Short value = getValue(columnIndex, Short.class);
return (value == null) ? 0 : value;
}
@Override
public int getInt(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a Integer.", columnIndex);
final Integer value = getValue(columnIndex, Integer.class);
return (value == null) ? 0 : value;
}
@Override
public long getLong(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a Long.", columnIndex);
final Long value = getValue(columnIndex, Long.class);
return (value == null) ? 0 : value;
}
@Override
public float getFloat(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a Float.", columnIndex);
final Float value = getValue(columnIndex, Float.class);
return (value == null) ? 0 : value;
}
@Override
public double getDouble(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a Double.", columnIndex);
final Double value = getValue(columnIndex, Double.class);
return (value == null) ? 0 : value;
}
@Override
public BigDecimal getBigDecimal(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a BigDecimal.", columnIndex);
return getValue(columnIndex, BigDecimal.class);
}
@Override
public String getString(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a String.", columnIndex);
final Object val = getConvertedValue(columnIndex);
if (val != null) {
return val.toString();
}
return null;
}
@Override
public byte[] getBytes(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a byte array.", columnIndex);
return getValue(columnIndex, byte[].class);
}
@Override
public Date getDate(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a Date.", columnIndex);
return getDate(columnIndex, null);
}
@Override
public Time getTime(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a Time.", columnIndex);
return getTime(columnIndex, null);
}
@Override
public Timestamp getTimestamp(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as a Timestamp.", columnIndex);
return getTimestamp(columnIndex, null);
}
@Override
public Date getDate(final int columnIndex, final Calendar cal) throws SQLException {
LOGGER.trace("Getting column {} as a Date.", columnIndex);
return getMaybeAdjustedTime(getValue(columnIndex, Date.class), cal);
}
@Override
public Time getTime(final int columnIndex, final Calendar cal) throws SQLException {
LOGGER.trace("Getting column {} as a Time.", columnIndex);
return getMaybeAdjustedTime(getValue(columnIndex, Time.class), cal);
}
@Override
public Timestamp getTimestamp(final int columnIndex, final Calendar cal) throws SQLException {
LOGGER.trace("Getting column {} as a Timestamp.", columnIndex);
return getMaybeAdjustedTime(getValue(columnIndex, Timestamp.class), cal);
}
private Date getMaybeAdjustedTime(final Date utcTime, final Calendar cal) {
if (utcTime != null && cal != null) {
long adjustedTime = utcTime.getTime();
adjustedTime -= cal.getTimeZone().getOffset(adjustedTime);
return new Date(adjustedTime);
}
return utcTime;
}
private Time getMaybeAdjustedTime(final Time utcTime, final Calendar cal) {
if (utcTime != null && cal != null) {
long adjustedTime = utcTime.getTime();
adjustedTime -= cal.getTimeZone().getOffset(adjustedTime);
return new Time(adjustedTime);
}
return utcTime;
}
private Timestamp getMaybeAdjustedTime(final Timestamp utcTime, final Calendar cal) {
if (utcTime != null && cal != null) {
long adjustedTime = utcTime.getTime();
adjustedTime -= cal.getTimeZone().getOffset(adjustedTime);
return new Timestamp(adjustedTime);
}
return utcTime;
}
@Override
public Object getObject(final int columnIndex) throws SQLException {
LOGGER.trace("Getting column {} as an Object.", columnIndex);
return getConvertedValue(columnIndex);
}
@Override
public <T> T getObject(final int columnIndex, final Class<T> type) throws SQLException {
LOGGER.trace("Getting column {} as an Object using provided Type.", columnIndex);
return getValue(columnIndex, type);
}
@Override
public Object getObject(final int columnIndex, final Map<String, Class<?>> map) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public int getType() throws SQLException {
return java.sql.ResultSet.TYPE_FORWARD_ONLY;
}
@Override
public int getConcurrency() throws SQLException {
return java.sql.ResultSet.CONCUR_READ_ONLY;
}
@Override
public String getCursorName() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
// Add default not supported for all types.
@Override
public BigDecimal getBigDecimal(final int columnIndex, final int scale) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public InputStream getAsciiStream(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public InputStream getUnicodeStream(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public InputStream getBinaryStream(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public Ref getRef(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public Blob getBlob(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public Clob getClob(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public Array getArray(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public URL getURL(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public RowId getRowId(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public NClob getNClob(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public SQLXML getSQLXML(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public String getNString(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public Reader getNCharacterStream(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public Reader getCharacterStream(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
// Default implementation for all label functions to just use findColumn(label) to find idx and lookup with idx.
@Override
public Object getObject(final String columnLabel, final Map<String, Class<?>> map) throws SQLException {
return getObject(findColumn(columnLabel), map);
}
@Override
public Ref getRef(final String columnLabel) throws SQLException {
return getRef(findColumn(columnLabel));
}
@Override
public Blob getBlob(final String columnLabel) throws SQLException {
return getBlob(findColumn(columnLabel));
}
@Override
public Clob getClob(final String columnLabel) throws SQLException {
return getClob(findColumn(columnLabel));
}
@Override
public Array getArray(final String columnLabel) throws SQLException {
return getArray(findColumn(columnLabel));
}
@Override
public String getString(final String columnLabel) throws SQLException {
return getString(findColumn(columnLabel));
}
@Override
public boolean getBoolean(final String columnLabel) throws SQLException {
return getBoolean(findColumn(columnLabel));
}
@Override
public byte getByte(final String columnLabel) throws SQLException {
return getByte(findColumn(columnLabel));
}
@Override
public short getShort(final String columnLabel) throws SQLException {
return getShort(findColumn(columnLabel));
}
@Override
public int getInt(final String columnLabel) throws SQLException {
return getInt(findColumn(columnLabel));
}
@Override
public long getLong(final String columnLabel) throws SQLException {
return getLong(findColumn(columnLabel));
}
@Override
public float getFloat(final String columnLabel) throws SQLException {
return getFloat(findColumn(columnLabel));
}
@Override
public double getDouble(final String columnLabel) throws SQLException {
return getDouble(findColumn(columnLabel));
}
@Override
public BigDecimal getBigDecimal(final String columnLabel, final int scale) throws SQLException {
return getBigDecimal(findColumn(columnLabel));
}
@Override
public byte[] getBytes(final String columnLabel) throws SQLException {
return getBytes(findColumn(columnLabel));
}
@Override
public Date getDate(final String columnLabel) throws SQLException {
return getDate(findColumn(columnLabel));
}
@Override
public Time getTime(final String columnLabel) throws SQLException {
return getTime(findColumn(columnLabel));
}
@Override
public Timestamp getTimestamp(final String columnLabel) throws SQLException {
return getTimestamp(findColumn(columnLabel));
}
@Override
public InputStream getAsciiStream(final String columnLabel) throws SQLException {
return getAsciiStream(findColumn(columnLabel));
}
@Override
public InputStream getUnicodeStream(final String columnLabel) throws SQLException {
return getUnicodeStream(findColumn(columnLabel));
}
@Override
public InputStream getBinaryStream(final String columnLabel) throws SQLException {
return getBinaryStream(findColumn(columnLabel));
}
@Override
public Object getObject(final String columnLabel) throws SQLException {
return getObject(findColumn(columnLabel));
}
@Override
public Reader getCharacterStream(final String columnLabel) throws SQLException {
return getCharacterStream(findColumn(columnLabel));
}
@Override
public BigDecimal getBigDecimal(final String columnLabel) throws SQLException {
return getBigDecimal(findColumn(columnLabel));
}
@Override
public SQLXML getSQLXML(final String columnLabel) throws SQLException {
return getSQLXML(findColumn(columnLabel));
}
@Override
public URL getURL(final String columnLabel) throws SQLException {
return getURL(findColumn(columnLabel));
}
@Override
public RowId getRowId(final String columnLabel) throws SQLException {
return getRowId(findColumn(columnLabel));
}
@Override
public NClob getNClob(final String columnLabel) throws SQLException {
return getNClob(findColumn(columnLabel));
}
@Override
public String getNString(final String columnLabel) throws SQLException {
return getNString(findColumn(columnLabel));
}
@Override
public Reader getNCharacterStream(final String columnLabel) throws SQLException {
return getNCharacterStream(findColumn(columnLabel));
}
@Override
public Date getDate(final String columnLabel, final Calendar cal) throws SQLException {
return getDate(findColumn(columnLabel), cal);
}
@Override
public Time getTime(final String columnLabel, final Calendar cal) throws SQLException {
return getTime(findColumn(columnLabel), cal);
}
@Override
public Timestamp getTimestamp(final String columnLabel, final Calendar cal) throws SQLException {
return getTimestamp(findColumn(columnLabel), cal);
}
@Override
public <T> T getObject(final String columnLabel, final Class<T> type) throws SQLException {
return getObject(findColumn(columnLabel), type);
}
// All functions below have default implementation which is setup for read only and forward only cursors.
@Override
public int getHoldability() throws SQLException {
return java.sql.ResultSet.CLOSE_CURSORS_AT_COMMIT;
}
@Override
public boolean relative(final int rows) throws SQLException {
verifyOpen();
if (rows < 0) {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
int rowCopy = rows;
while (rowCopy-- > 0) {
if (!next()) {
return false;
}
}
return true;
}
@Override
public boolean rowDeleted() {
return false;
}
@Override
public boolean rowInserted() {
return false;
}
@Override
public boolean rowUpdated() {
return false;
}
@Override
public void moveToCurrentRow() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void refreshRow() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public boolean previous() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void insertRow() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void moveToInsertRow() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void deleteRow() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void cancelRowUpdates() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateArray(final int columnIndex, final Array x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateArray(final String columnLabel, final Array x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateAsciiStream(final int columnIndex, final InputStream x, final int length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateAsciiStream(final String columnLabel, final InputStream x, final int length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateAsciiStream(final int columnIndex, final InputStream x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateAsciiStream(final String columnLabel, final InputStream x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateAsciiStream(final int columnIndex, final InputStream x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateAsciiStream(final String columnLabel, final InputStream x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBigDecimal(final int columnIndex, final BigDecimal x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBigDecimal(final String columnLabel, final BigDecimal x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBinaryStream(final int columnIndex, final InputStream x, final int i1)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBinaryStream(final String columnLabel, final InputStream x, final int i)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBinaryStream(final int columnIndex, final InputStream x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBinaryStream(final String columnLabel, final InputStream x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBinaryStream(final int columnIndex, final InputStream x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBinaryStream(final String columnLabel, final InputStream x)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBlob(final int columnIndex, final Blob x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBlob(final String columnLabel, final Blob x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBlob(final int columnIndex, final InputStream x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBlob(final String columnLabel, final InputStream x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBlob(final int columnIndex, final InputStream x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBlob(final String columnLabel, final InputStream x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBoolean(final int columnIndex, final boolean x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBoolean(final String columnLabel, final boolean x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateByte(final int columnIndex, final byte x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateByte(final String columnLabel, final byte x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBytes(final int columnIndex, final byte[] x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateBytes(final String columnLabel, final byte[] x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateCharacterStream(final int columnIndex, final Reader x, final int length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateCharacterStream(final String columnLabel, final Reader x, final int length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateCharacterStream(final int columnIndex, final Reader x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateCharacterStream(final String columnLabel, final Reader x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateCharacterStream(final int columnIndex, final Reader x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateCharacterStream(final String columnLabel, final Reader x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateClob(final int columnIndex, final Clob x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateClob(final String columnLabel, final Clob x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateClob(final int columnIndex, final Reader x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateClob(final String columnLabel, final Reader x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateClob(final int columnIndex, final Reader x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateClob(final String columnLabel, final Reader x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateDate(final int columnIndex, final Date x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateDate(final String columnLabel, final Date x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateDouble(final int columnIndex, final double x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateDouble(final String columnLabel, final double x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateFloat(final int columnIndex, final float x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateFloat(final String columnLabel, final float x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateInt(final int columnIndex, final int x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateInt(final String columnLabel, final int x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateLong(final int columnIndex, final long l) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateLong(final String columnLabel, final long l) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNCharacterStream(final int columnIndex, final Reader x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNCharacterStream(final String columnLabel, final Reader x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNCharacterStream(final int columnIndex, final Reader x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNCharacterStream(final String columnLabel, final Reader x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNClob(final int columnIndex, final NClob nClob) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNClob(final String columnLabel, final NClob nClob) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNClob(final int columnIndex, final Reader x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNClob(final String columnLabel, final Reader x, final long length)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNClob(final int columnIndex, final Reader x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNClob(final String columnLabel, final Reader x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNString(final int columnIndex, final String x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNString(final String columnLabel, final String x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNull(final int columnIndex) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateNull(final String columnLabel) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateObject(final int columnIndex, final Object x, final int scaleOrLength)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateObject(final int columnIndex, final Object x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateObject(final String columnLabel, final Object x, final int scaleOrLength)
throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateObject(final String columnLabel, final Object x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateRef(final int columnIndex, final Ref x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateRef(final String columnLabel, final Ref x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateRow() throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateRowId(final int columnIndex, final RowId x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateRowId(final String columnLabel, final RowId x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateSQLXML(final int columnIndex, final SQLXML x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateSQLXML(final String columnLabel, final SQLXML x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateShort(final int columnIndex, final short x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateShort(final String columnLabel, final short x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateString(final int columnIndex, final String x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateString(final String columnLabel, final String x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateTime(final int columnIndex, final Time x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateTime(final String columnLabel, final Time x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateTimestamp(final int columnIndex, final Timestamp x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
@Override
public void updateTimestamp(final String columnLabel, final Timestamp x) throws SQLException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
protected void validateRowColumn(final int columnIndex) throws SQLException {
if ((getRowIndex() < 0) || (getRowIndex() >= rowCount)) {
throw SqlError
.createSQLException(LOGGER, SqlState.DATA_EXCEPTION, SqlError.INVALID_INDEX, getRowIndex() + 1,
rowCount);
}
if ((columnIndex <= 0) || (columnIndex > columns.size())) {
throw SqlError
.createSQLException(LOGGER, SqlState.DATA_EXCEPTION, SqlError.INVALID_COLUMN_INDEX, columnIndex,
columns.size());
}
}
@Override
public java.sql.ResultSetMetaData getMetaData() throws SQLException {
return getResultMetadata();
}
}
| 7,474 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/ResultSetMetaData.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.jdbc.utilities.CastHelper;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import java.sql.SQLException;
import java.sql.Types;
import java.util.List;
/**
* Abstract implementation of ResultSetMetaData for JDBC Driver.
*/
public abstract class ResultSetMetaData implements java.sql.ResultSetMetaData {
private static final Logger LOGGER = LoggerFactory.getLogger(ResultSetMetaData.class);
private final List<String> columns;
protected ResultSetMetaData(final List<String> columns) {
this.columns = columns;
}
@Override
public boolean isWrapperFor(final Class<?> iface) {
return CastHelper.isWrapperFor(iface, this);
}
@Override
public <T> T unwrap(final Class<T> iface) throws SQLException {
return CastHelper.unwrap(iface, LOGGER, this);
}
/**
* Verify if the given column index is valid.
*
* @param column the 1-based column index.
* @throws SQLException if the column index is not valid for this result.
*/
protected void verifyColumnIndex(final int column) throws SQLException {
if ((column <= 0) || (column > columns.size())) {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.INVALID_INDEX,
column,
columns.size());
}
}
@Override
public int getColumnCount() throws SQLException {
return columns.size();
}
@Override
public int getColumnDisplaySize(final int column) throws SQLException {
verifyColumnIndex(column);
final int type = getColumnType(column);
switch (type) {
case Types.BIT:
return 1;
case Types.VARCHAR:
return Integer.MAX_VALUE;
case Types.NULL:
return 0;
case Types.DOUBLE:
case Types.FLOAT:
case Types.REAL:
case Types.DECIMAL:
return 25;
case Types.DATE:
case Types.TIME:
case Types.TIMESTAMP:
return 24;
case Types.BIGINT:
case Types.INTEGER:
case Types.SMALLINT:
case Types.TINYINT:
return 20;
default:
LOGGER.warn(String.format("Unsupported data type for getColumnDisplaySize(%d).", type));
return 0;
}
}
@Override
public int getPrecision(final int column) throws SQLException {
verifyColumnIndex(column);
final int type = getColumnType(column);
return getTypePrecision(type);
}
/**
* Get the supported precision of the JDBC Type.
*
* @param type The int mapping value of the JDBC Type.
* @return The supported precision of the JDBC Type.
*/
public static int getTypePrecision(final int type) {
switch (type) {
case Types.BOOLEAN:
return 5;
case Types.BIT:
return 1;
case Types.VARCHAR:
return Integer.MAX_VALUE;
case Types.NULL:
return 0;
case Types.DOUBLE:
case Types.FLOAT:
case Types.REAL:
case Types.DECIMAL:
return 15;
case Types.DATE:
case Types.TIME:
case Types.TIMESTAMP:
return 24;
case Types.BIGINT:
return 20;
case Types.INTEGER:
return 11;
case Types.SMALLINT:
return 5;
case Types.TINYINT:
return 3;
default:
LOGGER.warn(String.format("Unsupported data type for getPrecision(%d).", type));
return 0;
}
}
@Override
public int getScale(final int column) throws SQLException {
verifyColumnIndex(column);
final int columnType = getColumnType(column);
switch (columnType) {
case Types.DOUBLE:
case Types.FLOAT:
case Types.DECIMAL:
// 15 significant digits after decimal.
return 15;
case Types.REAL:
// 6 Sig significant digits after decimal.
return 6;
default:
return 0;
}
}
@Override
public boolean isAutoIncrement(final int column) throws SQLException {
verifyColumnIndex(column);
// Concept doesn't exist.
return false;
}
@Override
public boolean isCaseSensitive(final int column) throws SQLException {
verifyColumnIndex(column);
return (getColumnClassName(column).equals(String.class.getName()));
}
@Override
public boolean isSearchable(final int column) throws SQLException {
verifyColumnIndex(column);
// We don't support WHERE clauses in the typical SQL way, so not searchable.
return false;
}
@Override
public boolean isCurrency(final int column) throws SQLException {
verifyColumnIndex(column);
// If it is currency, there's no way to know.
return false;
}
@Override
public int isNullable(final int column) throws SQLException {
verifyColumnIndex(column);
return java.sql.ResultSetMetaData.columnNullableUnknown;
}
@Override
public boolean isSigned(final int column) throws SQLException {
verifyColumnIndex(column);
final int type = getColumnType(column);
return ((type == Types.INTEGER) ||
(type == Types.BIGINT) ||
(type == Types.DOUBLE) ||
(type == Types.FLOAT) ||
(type == Types.REAL) ||
(type == Types.SMALLINT) ||
(type == Types.TINYINT) ||
(type == Types.DECIMAL));
}
@Override
public String getColumnLabel(final int column) throws SQLException {
verifyColumnIndex(column);
// Label is same as name.
return getColumnName(column);
}
@Override
public String getColumnName(final int column) throws SQLException {
verifyColumnIndex(column);
return columns.get(column - 1);
}
@Override
public boolean isReadOnly(final int column) throws SQLException {
verifyColumnIndex(column);
// Read only driver.
return true;
}
@Override
public boolean isWritable(final int column) throws SQLException {
verifyColumnIndex(column);
// Read only driver.
return false;
}
@Override
public boolean isDefinitelyWritable(final int column) throws SQLException {
verifyColumnIndex(column);
// Read only driver.
return false;
}
@Override
public String getTableName(final int column) throws SQLException {
verifyColumnIndex(column);
// Doesn't have the concept of tables.
return "";
}
@Override
public String getSchemaName(final int column) throws SQLException {
verifyColumnIndex(column);
// Doesn't have the concept of schema.
return "";
}
@Override
public String getCatalogName(final int column) throws SQLException {
verifyColumnIndex(column);
// Doesn't have the concept of catalog.
return "";
}
}
| 7,475 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/DataSource.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc;
import org.slf4j.LoggerFactory;
import software.aws.neptune.jdbc.utilities.CastHelper;
import software.aws.neptune.jdbc.utilities.SqlError;
import java.io.PrintWriter;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.logging.Logger;
/**
* Abstract implementation of DataSource for JDBC Driver.
*/
public abstract class DataSource implements javax.sql.DataSource, javax.sql.ConnectionPoolDataSource {
private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(DataSource.class);
@Override
public <T> T unwrap(final Class<T> iface) throws SQLException {
return CastHelper.unwrap(iface, LoggerFactory.getLogger(DataSource.class), this);
}
@Override
public boolean isWrapperFor(final Class<?> iface) {
return CastHelper.isWrapperFor(iface, this);
}
@Override
public PrintWriter getLogWriter() {
return null;
}
@Override
public void setLogWriter(final PrintWriter out) {
// NOOP
}
@Override
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
// TODO: Get and set of different properties. Either done generically through this class or custom in OpenCypher implementation.
}
| 7,476 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/utilities/CastHelper.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc.utilities;
import org.slf4j.Logger;
import java.sql.SQLException;
public class CastHelper {
/**
* Generic unwrap function implementation.
*
* @param iface Class Object passed in.
* @param logger Logger for errors.
* @param callingClass Calling class of function (should be this).
* @param <T> Template type of iface.
* @return Casted Object.
* @throws SQLException Thrown if it cannot be casted.
*/
public static <T> T unwrap(final Class<T> iface, final Logger logger, final Object callingClass)
throws SQLException {
if (iface.isAssignableFrom(callingClass.getClass())) {
return iface.cast(callingClass);
}
throw SqlError.createSQLException(
logger,
SqlState.DATA_EXCEPTION,
SqlError.CANNOT_UNWRAP,
iface.toString());
}
/**
* Generic isWrapperFor implementation.
*
* @param iface Class Object passed in.
* @param callingClass Calling class of function (should be this).
* @return Whether or not it is assignable.
*/
public static boolean isWrapperFor(final Class<?> iface, final Object callingClass) {
return (null != iface) && iface.isAssignableFrom(callingClass.getClass());
}
}
| 7,477 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/utilities/JavaToJdbcTypeConverter.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc.utilities;
import com.google.common.collect.ImmutableMap;
import org.apache.commons.beanutils.converters.AbstractConverter;
import org.apache.commons.beanutils.converters.ArrayConverter;
import org.apache.commons.beanutils.converters.BigDecimalConverter;
import org.apache.commons.beanutils.converters.BooleanConverter;
import org.apache.commons.beanutils.converters.ByteConverter;
import org.apache.commons.beanutils.converters.DateConverter;
import org.apache.commons.beanutils.converters.DoubleConverter;
import org.apache.commons.beanutils.converters.FloatConverter;
import org.apache.commons.beanutils.converters.IntegerConverter;
import org.apache.commons.beanutils.converters.LongConverter;
import org.apache.commons.beanutils.converters.ShortConverter;
import org.apache.commons.beanutils.converters.SqlDateConverter;
import org.apache.commons.beanutils.converters.SqlTimeConverter;
import org.apache.commons.beanutils.converters.SqlTimestampConverter;
import org.apache.commons.beanutils.converters.StringConverter;
import org.slf4j.LoggerFactory;
import java.math.BigDecimal;
import java.sql.Date;
import java.sql.SQLException;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.HashMap;
import java.util.Map;
public class JavaToJdbcTypeConverter {
public static final Map<Class<?>, Integer> CLASS_TO_JDBC_ORDINAL = new HashMap<>();
private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(JavaToJdbcTypeConverter.class);
private static final ImmutableMap<Class<?>, AbstractConverter> TYPE_CONVERTERS_MAP;
static {
CLASS_TO_JDBC_ORDINAL.put(Boolean.class, JdbcType.BIT.getJdbcCode());
CLASS_TO_JDBC_ORDINAL.put(Byte.class, JdbcType.TINYINT.getJdbcCode());
CLASS_TO_JDBC_ORDINAL.put(Short.class, JdbcType.SMALLINT.getJdbcCode());
CLASS_TO_JDBC_ORDINAL.put(Integer.class, JdbcType.INTEGER.getJdbcCode());
CLASS_TO_JDBC_ORDINAL.put(Long.class, JdbcType.BIGINT.getJdbcCode());
CLASS_TO_JDBC_ORDINAL.put(Float.class, JdbcType.REAL.getJdbcCode());
CLASS_TO_JDBC_ORDINAL.put(Double.class, JdbcType.DOUBLE.getJdbcCode());
CLASS_TO_JDBC_ORDINAL.put(Date.class, JdbcType.DATE.getJdbcCode());
CLASS_TO_JDBC_ORDINAL.put(Time.class, JdbcType.TIME.getJdbcCode());
CLASS_TO_JDBC_ORDINAL.put(String.class, JdbcType.VARCHAR.getJdbcCode());
CLASS_TO_JDBC_ORDINAL.put(java.math.BigDecimal.class, JdbcType.DECIMAL.getJdbcCode());
TYPE_CONVERTERS_MAP = ImmutableMap.<Class<?>, AbstractConverter>builder()
.put(BigDecimal.class, new BigDecimalConverter(0))
.put(Boolean.class, new BooleanConverter(false))
.put(boolean.class, new BooleanConverter(false))
.put(Byte.class, new ByteConverter(0))
.put(byte.class, new ByteConverter(0))
.put(Date.class, new SqlDateConverter())
.put(java.util.Date.class, new DateConverter())
.put(Time.class, new SqlTimeConverter())
.put(Double.class, new DoubleConverter(0.0))
.put(double.class, new DoubleConverter(0.0))
.put(Float.class, new FloatConverter(0.0))
.put(float.class, new FloatConverter(0.0))
.put(Integer.class, new IntegerConverter(0))
.put(int.class, new IntegerConverter(0))
.put(Long.class, new LongConverter(0))
.put(long.class, new LongConverter(0))
.put(Short.class, new ShortConverter(0))
.put(short.class, new ShortConverter(0))
.put(String.class, new StringConverter())
.put(Timestamp.class, new SqlTimestampConverter())
.put(Byte[].class, new ArrayConverter(Byte[].class, new ByteConverter()))
.put(byte[].class, new ArrayConverter(byte[].class, new ByteConverter()))
.build();
}
/**
* Gets the type converter for the given source type.
*
* @param sourceType the source type to get the converter for.
* @param targetType the target type used to log error in case of missing converter.
* @return a {@link AbstractConverter} instance for the source type.
* @throws SQLException if a converter cannot be found the source type.
*/
public static AbstractConverter get(final Class<? extends Object> sourceType,
final Class<? extends Object> targetType) throws SQLException {
final AbstractConverter converter = TYPE_CONVERTERS_MAP.get(sourceType);
if (converter == null) {
throw SqlError.createSQLException(LOGGER,
SqlState.DATA_EXCEPTION,
SqlError.UNSUPPORTED_CONVERSION,
sourceType.getSimpleName(),
targetType.getSimpleName());
}
return converter;
}
}
| 7,478 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/utilities/Warning.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc.utilities;
import java.util.ResourceBundle;
/**
* Enum representing the possible warning messages and lookup facilities for localization.
*/
public enum Warning {
ERROR_CANCELING_QUERY,
MAX_VALUE_TRUNCATED,
VALUE_TRUNCATED,
NULL_PROPERTY,
NULL_URL,
UNSUPPORTED_PROPERTY,
UNSUPPORTED_URL_PREFIX;
private static final ResourceBundle RESOURCE = ResourceBundle.getBundle("jdbc");
/**
* Looks up the resource bundle string corresponding to the key, and formats it with the provided
* arguments.
*
* @param key resource key for bundle provided to constructor.
* @param formatArgs any additional arguments to format the resource string with.
* @return resource String, formatted with formatArgs.
*/
public static String lookup(final Warning key, final Object... formatArgs) {
return String.format(RESOURCE.getString(key.name()), formatArgs);
}
}
| 7,479 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/utilities/QueryExecutor.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc.utilities;
import lombok.Getter;
import lombok.Setter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.lang.reflect.Constructor;
import java.sql.SQLException;
import java.util.Properties;
public abstract class QueryExecutor {
private static final Logger LOGGER = LoggerFactory.getLogger(QueryExecutor.class);
private final Object lock = new Object();
@Setter
@Getter
private int queryTimeout = -1;
@Setter
@Getter
private int fetchSize = Integer.MAX_VALUE;
private QueryState queryState = QueryState.NOT_STARTED;
protected static boolean propertiesEqual(
final ConnectionProperties connectionProperties1,
final ConnectionProperties connectionProperties2) {
final Properties properties1 = connectionProperties1.getProperties();
final Properties properties2 = connectionProperties2.getProperties();
if (!properties1.keySet().equals(properties2.keySet())) {
return false;
}
for (final Object key : properties1.keySet()) {
if (!properties1.get(key).equals(properties2.get(key))) {
return false;
}
}
return true;
}
/**
* Function to get max fetch size for driver.
*
* @return Max fetch size of driver.
*/
public abstract int getMaxFetchSize();
/**
* Verify that connection to database is functional.
*
* @param timeout Time in seconds to wait for the database operation used to validate the connection to complete.
* @return true if the connection is valid, otherwise false.
*/
public abstract boolean isValid(final int timeout);
/**
* Function to execute query.
*
* @param sql Query to execute.
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet object returned from query execution.
* @throws SQLException if query execution fails, or it was cancelled.
*/
public abstract java.sql.ResultSet executeQuery(final String sql, final java.sql.Statement statement) throws
SQLException;
/**
* Function to get tables.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet object returned from query execution.
* @throws SQLException if query execution fails, or it was cancelled.
*/
public abstract java.sql.ResultSet executeGetTables(final java.sql.Statement statement, final String tableName)
throws SQLException;
/**
* Function to get schema.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet Object containing schemas.
* @throws SQLException if query execution fails, or it was cancelled.
*/
public abstract java.sql.ResultSet executeGetSchemas(final java.sql.Statement statement)
throws SQLException;
/**
* Function to get catalogs.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet Object containing catalogs.
* @throws SQLException if query execution fails, or it was cancelled.
*/
public abstract java.sql.ResultSet executeGetCatalogs(final java.sql.Statement statement)
throws SQLException;
/**
* Function to get table types.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet Object containing table types.
* @throws SQLException if query execution fails, or it was cancelled.
*/
public abstract java.sql.ResultSet executeGetTableTypes(final java.sql.Statement statement)
throws SQLException;
/**
* Function to get table types.
*
* @param statement java.sql.Statement Object required for result set.
* @param nodes String containing nodes to get schema for.
* @return java.sql.ResultSet Object containing columns.
* @throws SQLException if query execution fails, or it was cancelled.
*/
public abstract java.sql.ResultSet executeGetColumns(final java.sql.Statement statement, final String nodes)
throws SQLException;
/**
* Function to get type info.
*
* @param statement java.sql.Statement Object required for result set.
* @return java.sql.ResultSet Object containing table types.
* @throws SQLException if query execution fails, or it was cancelled.
*/
public abstract java.sql.ResultSet executeGetTypeInfo(final java.sql.Statement statement)
throws SQLException;
/**
* This function is supposed to run the queries and construct the target ResultSet using reflection.
*
* @param constructor Target ResultSet type.
* @param statement Statement which is issuing query.
* @param query Query to execute.
* @return Target ResultSet Object.
* @throws SQLException if query execution fails, or it was cancelled.
*/
protected <T> java.sql.ResultSet runCancellableQuery(final Constructor<?> constructor,
final java.sql.Statement statement,
final String query) throws SQLException {
synchronized (lock) {
if (queryState.equals(QueryState.IN_PROGRESS)) {
throw SqlError.createSQLException(
LOGGER,
SqlState.OPERATION_CANCELED,
SqlError.QUERY_IN_PROGRESS);
}
queryState = QueryState.IN_PROGRESS;
}
try {
final T intermediateResult = runQuery(query);
synchronized (lock) {
if (queryState.equals(QueryState.CANCELLED)) {
resetQueryState();
throw SqlError.createSQLException(
LOGGER,
SqlState.OPERATION_CANCELED,
SqlError.QUERY_CANCELED);
}
resetQueryState();
}
return (java.sql.ResultSet) constructor.newInstance(statement, intermediateResult);
} catch (final SQLException e) {
throw e;
} catch (final Exception e) {
synchronized (lock) {
if (queryState.equals(QueryState.CANCELLED)) {
resetQueryState();
throw SqlError.createSQLException(
LOGGER,
SqlState.OPERATION_CANCELED,
SqlError.QUERY_CANCELED);
} else {
resetQueryState();
final StringWriter sw = new StringWriter();
final PrintWriter pw = new PrintWriter(sw);
e.printStackTrace(pw);
throw SqlError.createSQLException(
LOGGER,
SqlState.OPERATION_CANCELED,
SqlError.QUERY_FAILED, e + "Stack Trace: " + sw.toString());
}
}
}
}
private void resetQueryState() {
queryState = QueryState.NOT_STARTED;
}
protected abstract <T> T runQuery(final String query) throws SQLException;
/**
* Function to cancel running query.
* This has to be run in the different thread from the one running the query.
*
* @throws SQLException if query cancellation fails.
*/
public void cancelQuery(final boolean isClosing) throws SQLException {
synchronized (lock) {
if (queryState.equals(QueryState.NOT_STARTED)) {
if (isClosing) {
return;
}
throw SqlError.createSQLException(
LOGGER,
SqlState.OPERATION_CANCELED,
SqlError.QUERY_NOT_STARTED_OR_COMPLETE);
} else if (queryState.equals(QueryState.CANCELLED)) {
throw SqlError.createSQLException(
LOGGER,
SqlState.OPERATION_CANCELED,
SqlError.QUERY_CANCELED);
}
performCancel();
queryState = QueryState.CANCELLED;
LOGGER.debug("Cancel query succeeded.");
}
}
protected abstract void performCancel() throws SQLException;
enum QueryState {
NOT_STARTED,
IN_PROGRESS,
CANCELLED
}
}
| 7,480 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/utilities/SqlError.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc.utilities;
import org.slf4j.Logger;
import java.sql.ClientInfoStatus;
import java.sql.SQLClientInfoException;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.Map;
import java.util.ResourceBundle;
/**
* Enum representing the possible error messages and lookup facilities for localization.
*/
public enum SqlError {
AAD_ACCESS_TOKEN_ERROR,
ASYNC_RETRIEVAL_ERROR,
AAD_ACCESS_TOKEN_REQUEST_FAILED,
CANNOT_UNWRAP,
CANNOT_CONVERT_STRING_TO_RESULT_SET,
CANNOT_SLICE_A_STRING,
CONN_CLOSED,
CONN_FAILED,
FAILED_TO_BUFFER_RESULT_SET,
FAILED_TO_CREATE_DIRECTORY,
FAILED_TO_DELETE_DIRECTORY,
FAILED_TO_NOTIFY_CONSUMER_THREAD,
FAILED_TO_OBTAIN_AUTH_TOKEN,
FAILED_TO_PROPAGATE_ERROR,
FAILED_TO_RUN_SCHEMA_EXPORT,
FAILED_TO_SHUTDOWN_RETRIEVAL_EXECUTOR_SERVICE,
FEATURE_NOT_SUPPORTED,
INCORRECT_SOURCE_TYPE_AT_CELL,
INVALID_AAD_ACCESS_TOKEN_RESPONSE,
INVALID_COLUMN_LABEL,
INVALID_CONNECTION_PROPERTY,
MISSING_CONNECTION_PROPERTY,
INVALID_VALUE_CONNECTION_PROPERTY,
INVALID_CREDENTIALS_FILE_PATH,
INVALID_DATA_AT_ARRAY,
INVALID_DATA_AT_ROW,
INVALID_ENDPOINT,
INVALID_FETCH_SIZE,
INVALID_LARGE_MAX_ROWS_SIZE,
INVALID_MAX_CONNECTIONS,
INVALID_MAX_FIELD_SIZE,
INVALID_MAX_RETRY_COUNT,
INVALID_NUMERIC_CONNECTION_VALUE,
INVALID_ROW_VALUE,
INVALID_COLUMN_INDEX,
INVALID_INDEX,
INVALID_TYPE_CONVERSION,
INVALID_TIMEOUT,
KNOWN_HOSTS_FILE_NOT_FOUND,
INVALID_TYPE,
INVALID_QUERY,
INVALID_SAML_RESPONSE,
INVALID_SESSION_TOKEN_RESPONSE,
MISSING_REQUIRED_IDP_PARAMETER,
MISSING_SERVICE_REGION,
OKTA_SAML_ASSERTION_ERROR,
OKTA_SAML_ASSERTION_REQUEST_FAILED,
OKTA_SESSION_TOKEN_REQUEST_FAILED,
OKTA_SESSION_TOKEN_ERROR,
PARAMETERS_NOT_SUPPORTED,
QUERY_FAILED,
QUERY_IN_PROGRESS,
QUERY_NOT_STARTED_OR_COMPLETE,
QUERY_CANNOT_BE_CANCELLED,
QUERY_CANCELED,
QUERY_TIMED_OUT,
READ_ONLY,
RESULT_FORWARD_ONLY,
RESULT_SET_CLOSED,
STMT_CLOSED,
STMT_CLOSED_DURING_EXECUTE,
TRANSACTIONS_NOT_SUPPORTED,
UNSUPPORTED_AWS_CREDENTIALS_PROVIDER,
UNSUPPORTED_BINARY_STREAM,
UNSUPPORTED_CLASS,
UNSUPPORTED_COLUMN_PRIVILEGES,
UNSUPPORTED_CONVERSION,
UNSUPPORTED_CROSS_REFERENCE,
UNSUPPORTED_EXPORTED_KEYS,
UNSUPPORTED_FETCH_DIRECTION,
UNSUPPORTED_FUNCTIONS,
UNSUPPORTED_FUNCTION_COLUMNS,
UNSUPPORTED_GENERATED_KEYS,
UNSUPPORTED_LANGUAGE,
UNSUPPORTED_PREPARE_STATEMENT,
UNSUPPORTED_PREPARE_CALL,
UNSUPPORTED_PROCEDURE_COLUMNS,
UNSUPPORTED_PROPERTIES_STRING,
UNSUPPORTED_PROPERTY,
UNSUPPORTED_PSEUDO_COLUMNS,
UNSUPPORTED_REFRESH_ROW,
UNSUPPORTED_REQUEST,
UNSUPPORTED_RESULT_SET_TYPE,
UNSUPPORTED_TABLE_PRIVILEGES,
UNSUPPORTED_TYPE,
UNSUPPORTED_SAML_CREDENTIALS_PROVIDER,
UNSUPPORTED_SCHEMA,
UNSUPPORTED_SUPER_TABLES,
UNSUPPORTED_SUPER_TYPES,
UNSUPPORTED_USER_DEFINED_TYPES,
UNSUPPORTED_VERSION_COLUMNS,
VALUE_OUT_OF_RANGE;
private static final ResourceBundle RESOURCE = ResourceBundle.getBundle("jdbc");
/**
* Looks up the resource bundle string corresponding to the key, and formats it with the provided
* arguments.
*
* @param key Resource key for bundle provided to constructor.
* @param formatArgs Any additional arguments to format the resource string with.
* @return resource String, formatted with formatArgs.
*/
public static String lookup(final SqlError key, final Object... formatArgs) {
return String.format(RESOURCE.getString(key.name()), formatArgs);
}
/**
* Create SQLException of error and log the message with a {@link Logger}.
*
* @param logger The {@link Logger} contains log info.
* @param sqlState A code identifying the SQL error condition.
* @param key Resource key for bundle provided to constructor.
* @param formatArgs Any additional arguments to format the resource string with.
* @return SQLException with error message.
*/
public static SQLException createSQLException(
final Logger logger,
final SqlState sqlState,
final SqlError key,
final Object... formatArgs) {
final String error = lookup(key, formatArgs);
logger.error(error);
return new SQLException(error, sqlState.getSqlState());
}
/**
* Create {@link SQLFeatureNotSupportedException} of error and log the message with a {@link Logger}.
*
* @param logger The {@link Logger} contains log info.
* @return SQLFeatureNotSupportedException with error message.
*/
public static SQLFeatureNotSupportedException createSQLFeatureNotSupportedException(
final Logger logger) {
final String error = lookup(FEATURE_NOT_SUPPORTED);
logger.trace(error);
return new SQLFeatureNotSupportedException(error);
}
/**
* Create {@link SQLClientInfoException} of error and log the message with a {@link Logger}.
*
* @param logger The {@link Logger} contains log info.
* @param map A Map containing the property values that could not be set.
* @param key Resource key for bundle provided to constructor.
* @param formatArgs Any additional arguments to format the resource string with.
* @return SQLClientInfoException with error message.
*/
public static SQLClientInfoException createSQLClientInfoException(
final Logger logger,
final Map<String, ClientInfoStatus> map,
final SqlError key,
final Object... formatArgs) {
final String error = lookup(key, formatArgs);
logger.error(error);
return new SQLClientInfoException(error, map);
}
/**
* Create {@link SQLClientInfoException} of error and log the message with a {@link Logger}.
*
* @param logger The {@link Logger} contains log info.
* @param map A Map containing the property values that could not be set.
* @param e The SQLException thrown by previous error handling.
* @return SQLClientInfoException with error message.
*/
public static SQLClientInfoException createSQLClientInfoException(
final Logger logger,
final Map<String, ClientInfoStatus> map,
final SQLException e) {
logger.error(e.getMessage());
return new SQLClientInfoException(e.getMessage(), map);
}
}
| 7,481 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/utilities/SqlState.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc.utilities;
/**
* Copy of the SQLSTATE codes but as an enum, for use in throwing SQLException.
*/
public enum SqlState {
CONNECTION_EXCEPTION("08000"),
CONNECTION_FAILURE("08006"),
DATA_EXCEPTION("22000"),
DATA_TYPE_TRANSFORM_VIOLATION("0700B"),
DATA_EXCEPTION_NULL_VALUE("22002"),
EMPTY_STRING("2200F"),
INVALID_AUTHORIZATION_SPECIFICATION("28000"),
INVALID_QUERY_EXPRESSION("2201S"),
RESTRICTED_DATA_TYPE_VIOLATION("07006"),
NUMERIC_VALUE_OUT_OF_RANGE("22003"),
NO_RESULT_SET_RETURNED("02001"),
OPERATION_CANCELED("HY008");
/**
* The SQLSTATE code.
*/
private final String sqlState;
/**
* SqlState constructor.
*
* @param sqlState The SQLSTATE code associated with this sql state.
*/
SqlState(final String sqlState) {
this.sqlState = sqlState;
}
public String getSqlState() {
return sqlState;
}
}
| 7,482 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/utilities/SshTunnel.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc.utilities;
import com.jcraft.jsch.HostKey;
import com.jcraft.jsch.JSch;
import com.jcraft.jsch.JSchException;
import com.jcraft.jsch.Session;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.regex.Matcher;
public class SshTunnel {
public static final String SSH_KNOWN_HOSTS_FILE = "~/.ssh/known_hosts";
public static final String STRICT_HOST_KEY_CHECKING = "StrictHostKeyChecking";
public static final String HASH_KNOWN_HOSTS = "HashKnownHosts";
public static final String SERVER_HOST_KEY = "server_host_key";
public static final String USER_HOME_PROPERTY = "user.home";
public static final String HOME_PATH_PREFIX_REG_EXPR = "^~[/\\\\].*$";
public static final String YES = "yes";
public static final String NO = "no";
private static final Logger LOGGER = LoggerFactory.getLogger(SshTunnel.class);
private static final int DEFAULT_PORT = 22;
private static final String LOCALHOST = "localhost";
private static final int CONNECTION_TIMEOUT_MILLISECONDS = 3000;
private Integer localPort = null;
private Session session = null;
/**
* Constructor for SshTunnel.
*
* @param connectionProperties ConnectionProperties for constructing the ssh tunnel.
* @throws SQLException If construction fails.
*/
public SshTunnel(final ConnectionProperties connectionProperties) throws SQLException {
if (!connectionProperties.enableSshTunnel()) {
return;
}
try {
// Add private key and optional passphrase.
final JSch jSch = new JSch();
jSch.addIdentity(getPath(connectionProperties.getSshPrivateKeyFile()).toString());
session = jSch.getSession(connectionProperties.getSshUser(), getHostName(connectionProperties),
getPort(connectionProperties));
setHostKeyType(jSch, session, connectionProperties);
session.connect(CONNECTION_TIMEOUT_MILLISECONDS);
// Need to force lport because there is port range locks on the Neptune export utility.
localPort = session.setPortForwardingL(LOCALHOST, 0, connectionProperties.getHostname(),
connectionProperties.getPort());
} catch (final Exception e) {
localPort = null;
session = null;
throw (e instanceof SQLException) ? (SQLException) e : new SQLException(e.getMessage(), e);
}
}
/**
* Gets an absolute path from the given file path. It performs the substitution for a leading
* '~' to be replaced by the user's home directory.
*
* @param filePath the given file path to process.
* @return a {@link Path} for the absolution path for the given file path.
*/
public static Path getPath(final String filePath) {
if (filePath.matches(HOME_PATH_PREFIX_REG_EXPR)) {
final String userHomePath = Matcher.quoteReplacement(
System.getProperty(USER_HOME_PROPERTY));
return Paths.get(filePath.replaceFirst("~", userHomePath)).toAbsolutePath();
}
return Paths.get(filePath).toAbsolutePath();
}
private static int getPort(final ConnectionProperties connectionProperties) {
final int portSeparatorIndex = connectionProperties.getSshHostname().indexOf(':');
return (portSeparatorIndex >= 0) ?
Integer.parseInt(connectionProperties.getSshHostname().substring(portSeparatorIndex + 1)) :
DEFAULT_PORT;
}
private static String getHostName(final ConnectionProperties connectionProperties) {
final int portSeparatorIndex = connectionProperties.getSshHostname().indexOf(':');
return (portSeparatorIndex >= 0) ? connectionProperties.getSshHostname().substring(0, portSeparatorIndex)
: connectionProperties.getSshHostname();
}
private static void setHostKeyType(final JSch jSch, final Session session,
final ConnectionProperties connectionProperties)
throws SQLException {
// If strict checking is disabled, set it to NO and exit.
if (!connectionProperties.getSshStrictHostKeyChecking()) {
session.setConfig(STRICT_HOST_KEY_CHECKING, NO);
return;
}
// Strict checking is enabled, need to get known hosts file.
final String knowHostsFilename = getPath(StringUtils.isBlank(connectionProperties.getSshKnownHostsFile()) ?
SSH_KNOWN_HOSTS_FILE : connectionProperties.getSshKnownHostsFile()).toString();
if (!Files.exists(Paths.get(knowHostsFilename))) {
throw SqlError.createSQLException(
LOGGER,
SqlState.INVALID_AUTHORIZATION_SPECIFICATION,
SqlError.KNOWN_HOSTS_FILE_NOT_FOUND,
connectionProperties.getSshKnownHostsFile());
}
try {
jSch.setKnownHosts(knowHostsFilename);
} catch (final JSchException e) {
throw new SQLException(e.getMessage(), e);
}
final HostKey[] hostKeys = jSch.getHostKeyRepository().getHostKey();
final HostKey hostKey = Arrays.stream(hostKeys)
.filter(hk -> hk.getHost().equals(getHostName(connectionProperties)))
.findFirst().orElse(null);
// This will ensure a match between how the host key was hashed in the known_hosts file.
final String hostKeyType = (hostKey != null) ? hostKey.getType() : null;
// Set the hash algorithm
if (hostKeyType != null) {
session.setConfig(SERVER_HOST_KEY, hostKeyType);
}
session.setConfig(HASH_KNOWN_HOSTS, YES);
}
/**
* Get host for tunnel.
*
* @return Host for tunnel.
*/
public String getTunnelHost() {
return LOCALHOST;
}
/**
* Get port for tunnel.
*
* @return Port for tunnel.
*/
public int getTunnelPort() {
return (localPort != null) ? localPort : 0;
}
/**
* Return whether ssh tunnel is valid.
*
* @return True if valid, false otherwise.
*/
public boolean sshTunnelValid() {
return session != null;
}
/**
* Disconnect SSH tunnel.
*/
public void disconnect() {
if (sshTunnelValid()) {
session.disconnect();
}
}
}
| 7,483 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/utilities/ConnectionProperties.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc.utilities;
import com.google.common.collect.ImmutableMap;
import lombok.NonNull;
import org.apache.commons.lang3.StringUtils;
import org.apache.log4j.Level;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.schema.SqlSchemaGrabber;
import software.aws.neptune.jdbc.Connection;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* Class that contains connection properties.
*/
public abstract class ConnectionProperties extends Properties {
public static final String SCAN_TYPE_KEY = "scanType";
public static final String APPLICATION_NAME_KEY = "applicationName";
public static final String AUTH_SCHEME_KEY = "authScheme";
public static final String CONNECTION_TIMEOUT_MILLIS_KEY = "connectionTimeout";
public static final String CONNECTION_RETRY_COUNT_KEY = "connectionRetryCount";
public static final String LOG_LEVEL_KEY = "logLevel";
public static final String SSH_USER = "sshUser";
public static final String SSH_HOSTNAME = "sshHost";
public static final String SSH_PRIVATE_KEY_FILE = "sshPrivateKeyFile";
public static final String SSH_PRIVATE_KEY_PASSPHRASE = "sshPrivateKeyPassphrase";
public static final String SSH_STRICT_HOST_KEY_CHECKING = "sshStrictHostKeyChecking";
public static final String SSH_KNOWN_HOSTS_FILE = "sshKnownHostsFile";
public static final String SERVICE_REGION_KEY = "serviceRegion";
public static final AuthScheme DEFAULT_AUTH_SCHEME = AuthScheme.IAMSigV4;
public static final SqlSchemaGrabber.ScanType DEFAULT_SCAN_TYPE = SqlSchemaGrabber.ScanType.All;
public static final int DEFAULT_CONNECTION_TIMEOUT_MILLIS = 5000;
public static final int DEFAULT_CONNECTION_RETRY_COUNT = 3;
public static final String DEFAULT_SSH_STRICT_CHECKING = "true";
public static final Level DEFAULT_LOG_LEVEL = Level.OFF;
public static final String DEFAULT_SERVICE_REGION = "";
public static final Map<String, Object> DEFAULT_PROPERTIES_MAP = new HashMap<>();
private static final Map<String, ConnectionProperties.PropertyConverter<?>> PROPERTY_CONVERTER_MAP =
new HashMap<>();
private static final Logger LOGGER = LoggerFactory.getLogger(ConnectionProperties.class);
static {
PROPERTY_CONVERTER_MAP.put(SCAN_TYPE_KEY, ConnectionProperties::toScanType);
PROPERTY_CONVERTER_MAP.put(APPLICATION_NAME_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(AUTH_SCHEME_KEY, ConnectionProperties::toAuthScheme);
PROPERTY_CONVERTER_MAP.put(CONNECTION_TIMEOUT_MILLIS_KEY, ConnectionProperties::toUnsigned);
PROPERTY_CONVERTER_MAP.put(CONNECTION_RETRY_COUNT_KEY, ConnectionProperties::toUnsigned);
PROPERTY_CONVERTER_MAP.put(LOG_LEVEL_KEY, ConnectionProperties::toLogLevel);
PROPERTY_CONVERTER_MAP.put(SERVICE_REGION_KEY, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(SSH_USER, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(SSH_HOSTNAME, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(SSH_PRIVATE_KEY_FILE, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(SSH_PRIVATE_KEY_PASSPHRASE, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(SSH_STRICT_HOST_KEY_CHECKING, (key, value) -> value);
PROPERTY_CONVERTER_MAP.put(SSH_KNOWN_HOSTS_FILE, (key, value) -> value);
}
static {
DEFAULT_PROPERTIES_MAP.put(SCAN_TYPE_KEY, DEFAULT_SCAN_TYPE);
DEFAULT_PROPERTIES_MAP.put(CONNECTION_TIMEOUT_MILLIS_KEY, DEFAULT_CONNECTION_TIMEOUT_MILLIS);
DEFAULT_PROPERTIES_MAP.put(CONNECTION_RETRY_COUNT_KEY, DEFAULT_CONNECTION_RETRY_COUNT);
DEFAULT_PROPERTIES_MAP.put(AUTH_SCHEME_KEY, DEFAULT_AUTH_SCHEME);
DEFAULT_PROPERTIES_MAP.put(LOG_LEVEL_KEY, DEFAULT_LOG_LEVEL);
DEFAULT_PROPERTIES_MAP.put(SERVICE_REGION_KEY, DEFAULT_SERVICE_REGION);
}
/**
* ConnectionProperties constructor.
*/
public ConnectionProperties() throws SQLException {
this(new Properties(), null, null);
}
/**
* ConnectionProperties constructor.
*
* @param properties initial set of connection properties coming from the connection string.
*/
public ConnectionProperties(@NonNull final Properties properties,
final Map<String, Object> defaultPropertiesMap,
final Map<String, ConnectionProperties.PropertyConverter<?>> propertyConverterMap)
throws SQLException {
if (defaultPropertiesMap != null) {
DEFAULT_PROPERTIES_MAP.putAll(defaultPropertiesMap);
}
if (propertyConverterMap != null) {
PROPERTY_CONVERTER_MAP.putAll(propertyConverterMap);
}
if (properties.isEmpty()) {
putAll(DEFAULT_PROPERTIES_MAP);
return;
}
resolveProperties(properties);
}
protected static SqlSchemaGrabber.ScanType toScanType(@NonNull final String key, @NonNull final String value)
throws SQLException {
if (isWhitespace(value)) {
return DEFAULT_SCAN_TYPE;
}
if (SqlSchemaGrabber.ScanType.fromString(value) == null) {
throw invalidConnectionPropertyError(key, value);
}
return SqlSchemaGrabber.ScanType.fromString(value);
}
protected static Level toLogLevel(@NonNull final String key, @NonNull final String value) throws SQLException {
if (isWhitespace(value)) {
return DEFAULT_LOG_LEVEL;
}
final Map<String, Level> logLevelsMap = ImmutableMap.<String, Level>builder()
.put("OFF", Level.OFF)
.put("FATAL", Level.FATAL)
.put("ERROR", Level.ERROR)
.put("WARN", Level.WARN)
.put("INFO", Level.INFO)
.put("DEBUG", Level.DEBUG)
.put("TRACE", Level.TRACE)
.put("ALL", Level.ALL)
.build();
if (!logLevelsMap.containsKey(value.toUpperCase())) {
throw invalidConnectionPropertyError(key, value);
}
return logLevelsMap.get(value.toUpperCase());
}
protected static int toUnsigned(@NonNull final String key, @NonNull final String value) throws SQLException {
if (isWhitespace(value)) {
if (DEFAULT_PROPERTIES_MAP.containsKey(key)) {
return (int) DEFAULT_PROPERTIES_MAP.get(key);
} else {
throw invalidConnectionPropertyError(key, value);
}
}
try {
final int intValue = Integer.parseUnsignedInt(value);
if (intValue < 0) {
throw invalidConnectionPropertyError(key, value);
}
return intValue;
} catch (final NumberFormatException | SQLException e) {
throw invalidConnectionPropertyError(key, value);
}
}
protected static boolean toBoolean(@NonNull final String key, @NonNull final String value) throws SQLException {
if (isWhitespace(value)) {
if (DEFAULT_PROPERTIES_MAP.containsKey(key)) {
return (boolean) DEFAULT_PROPERTIES_MAP.get(key);
} else {
throw invalidConnectionPropertyError(key, value);
}
}
final Map<String, Boolean> stringBooleanMap = ImmutableMap.of(
"1", true, "true", true,
"0", false, "false", false);
if (!stringBooleanMap.containsKey(value.toLowerCase())) {
throw invalidConnectionPropertyError(key, value);
}
return stringBooleanMap.get(value.toLowerCase());
}
protected static AuthScheme toAuthScheme(@NonNull final String key, @NonNull final String value)
throws SQLException {
if (isWhitespace(value)) {
return DEFAULT_AUTH_SCHEME;
}
if (AuthScheme.fromString(value) == null) {
throw invalidConnectionPropertyError(key, value);
}
return AuthScheme.fromString(value);
}
protected static boolean isWhitespace(@NonNull final String value) {
return Pattern.matches("^\\s*$", value);
}
protected static SQLException invalidConnectionPropertyError(final Object key, final Object value) {
return SqlError.createSQLException(
LOGGER,
SqlState.CONNECTION_EXCEPTION,
SqlError.INVALID_CONNECTION_PROPERTY, key, value);
}
protected static SQLException missingConnectionPropertyError(final String reason) {
return SqlError.createSQLException(
LOGGER,
SqlState.CONNECTION_EXCEPTION,
SqlError.MISSING_CONNECTION_PROPERTY, reason);
}
protected static SQLException invalidConnectionPropertyValueError(final String key, final String reason) {
return SqlError.createSQLException(
LOGGER,
SqlState.CONNECTION_EXCEPTION,
SqlError.INVALID_VALUE_CONNECTION_PROPERTY, key, reason);
}
/**
* Gets an absolute path from the given file path. It performs the substitution for a leading
* '~' to be replaced by the user's home directory.
*
* @param filePath the given file path to process.
* @return a {@link Path} for the absolution path for the given file path.
*/
public static Path getPath(final String filePath) {
if (filePath.matches(SshTunnel.HOME_PATH_PREFIX_REG_EXPR)) {
final String userHomePath = Matcher.quoteReplacement(
System.getProperty(SshTunnel.USER_HOME_PROPERTY));
return Paths.get(filePath.replaceFirst("~", userHomePath)).toAbsolutePath();
}
return Paths.get(filePath).toAbsolutePath();
}
/**
* Gets the scan type.
*
* @return The scan type.
*/
public SqlSchemaGrabber.ScanType getScanType() {
return (SqlSchemaGrabber.ScanType) get(SCAN_TYPE_KEY);
}
/**
* Sets the scan type.
*
* @param scanType Thescan type
*/
public void setScanType(@NonNull final SqlSchemaGrabber.ScanType scanType) {
put(SCAN_TYPE_KEY, scanType);
}
/**
* Function to get the hostname.
*
* @return hostname.
* @throws SQLException If the hostname cannot be obtained.
*/
public abstract String getHostname() throws SQLException;
/**
* Function to get the port.
*
* @return port.
* @throws SQLException If the port cannot be obatined.
*/
public abstract int getPort() throws SQLException;
/**
* Function to override the current port with the ssh tunnel port.
*
* @param port Port to override with.
* @throws SQLException If the override fails.
*/
public abstract void sshTunnelOverride(int port) throws SQLException;
/**
* Function get encryption status of child.
*
* @return True if encrpytion is enabled, false otherwise.
*/
protected abstract boolean isEncryptionEnabled();
/**
* Gets the application name.
*
* @return The application name.
*/
public String getApplicationName() {
return getProperty(APPLICATION_NAME_KEY);
}
/**
* Sets the application name.
*
* @param applicationName The application name.
* @throws SQLException if value is invalid.
*/
public void setApplicationName(@NonNull final String applicationName) throws SQLException {
setProperty(APPLICATION_NAME_KEY, applicationName);
}
/**
* Gets the authentication scheme.
*
* @return The authentication scheme.
*/
public AuthScheme getAuthScheme() {
return (AuthScheme) get(AUTH_SCHEME_KEY);
}
/**
* Sets the authentication scheme.
*
* @param authScheme The authentication scheme.
* @throws SQLException if value is invalid.
*/
public void setAuthScheme(@NonNull final AuthScheme authScheme) throws SQLException {
if (authScheme.equals(AuthScheme.IAMSigV4) && !isEncryptionEnabled()) {
throw SqlError.createSQLClientInfoException(
LOGGER,
Connection.getFailures("authScheme", "IAMSigV4"),
SqlError.INVALID_CONNECTION_PROPERTY, "authScheme",
"'IAMSigV4' when encryption is not enabled.");
}
put(AUTH_SCHEME_KEY, authScheme);
}
/**
* Gets indicator of whether the options indicate the SSH port forwarding tunnel
* should be enabled.
*
* @return {@code true} if the SSH port forwarding tunnel should be enabled,
* otherwise {@code false}.
*/
public boolean enableSshTunnel() {
return !StringUtils.isBlank(getSshUser())
&& !StringUtils.isBlank(getSshHostname())
&& !StringUtils.isBlank(getSshPrivateKeyFile())
&& Files.exists(getPath(getSshPrivateKeyFile()));
}
/**
* Gets the connection timeout in milliseconds.
*
* @return The connection timeout in milliseconds.
*/
public int getConnectionTimeoutMillis() {
return (int) get(CONNECTION_TIMEOUT_MILLIS_KEY);
}
/**
* Sets the connection timeout in milliseconds.
*
* @param timeoutMillis The connection timeout in milliseconds.
* @throws SQLException if value is invalid.
*/
public void setConnectionTimeoutMillis(final int timeoutMillis) throws SQLException {
if (timeoutMillis < 0) {
throw invalidConnectionPropertyError(CONNECTION_TIMEOUT_MILLIS_KEY, timeoutMillis);
}
put(CONNECTION_TIMEOUT_MILLIS_KEY, timeoutMillis);
}
/**
* Gets the connection retry count.
*
* @return The connection retry count.
*/
public int getConnectionRetryCount() {
return (int) get(CONNECTION_RETRY_COUNT_KEY);
}
/**
* Sets the connection retry count.
*
* @param retryCount The connection retry count.
* @throws SQLException if value is invalid.
*/
public void setConnectionRetryCount(final int retryCount) throws SQLException {
if (retryCount < 0) {
throw invalidConnectionPropertyError(CONNECTION_RETRY_COUNT_KEY, retryCount);
}
put(CONNECTION_RETRY_COUNT_KEY, retryCount);
}
/**
* Gets the region.
*
* @return The region.
*/
public String getServiceRegion() {
return getProperty(SERVICE_REGION_KEY);
}
/**
* Sets the region.
*
* @param region The region.
* @throws SQLException if value is invalid.
*/
public void setServiceRegion(@NonNull final String region) throws SQLException {
put(SERVICE_REGION_KEY, region);
}
/**
* Validate properties.
*/
protected abstract void validateProperties() throws SQLException;
/**
* Check if the property is supported by the driver.
*
* @param name The name of the property.
* @return {@code true} if property is supported; {@code false} otherwise.
*/
public abstract boolean isSupportedProperty(final String name);
/**
* Gets the entire set of properties.
*
* @return The entire set of properties.
*/
public Properties getProperties() {
final Properties newProperties = new Properties();
newProperties.putAll(this);
return newProperties;
}
/**
* Resolves a property and sets its value.
*
* @param name The name of the property.
* @param value The value of the property.
* @throws SQLException If the property name or value is invalid.
*/
public void validateAndSetProperty(final String name, final Object value) throws SQLException {
final Properties properties = new Properties();
properties.put(name, value);
resolveProperties(properties);
}
/**
* Resolves input properties and converts them into the valid set of properties.
*
* @param inputProperties map of properties coming from the connection string.
* @throws SQLException if invalid input property name or value is detected.
*/
private void resolveProperties(final Properties inputProperties) throws SQLException {
// List of input properties keys used to keep track of unresolved properties.
final Set<Object> inputPropertiesKeys = new HashSet<>(inputProperties.keySet());
for (final String mapKey : PROPERTY_CONVERTER_MAP.keySet()) {
for (final Map.Entry<Object, Object> entry : inputProperties.entrySet()) {
final String key = entry.getKey().toString();
final String value = entry.getValue().toString();
// Find matching property by comparing keys (case-insensitive)
if (key.equalsIgnoreCase(mapKey)) {
// Insert resolved property into the map.
put(mapKey, PROPERTY_CONVERTER_MAP.get(mapKey).convert(key, value));
// Remove key for the resolved property.
inputPropertiesKeys.remove(key);
break;
}
}
}
setDefaults();
// Go through properties in the supportedProperties
final Set<Object> inputPropertiesKeyCopy = new HashSet<>(inputPropertiesKeys);
for (final Object inputPropertiesKey : inputPropertiesKeyCopy) {
if (isSupportedProperty(inputPropertiesKey.toString())) {
put(inputPropertiesKey, inputProperties.get(inputPropertiesKey));
inputPropertiesKeys.remove(inputPropertiesKey);
}
}
// If there are any unresolved properties left, log a warning.
if (!inputPropertiesKeys.isEmpty()) {
for (final Object property : inputPropertiesKeys) {
LOGGER.warn(
String.format("Property '%s' is not supported by the connection string.", property.toString()));
}
}
validateProperties();
}
void setDefaults() {
for (final String key : DEFAULT_PROPERTIES_MAP.keySet()) {
if (get(key) == null) {
put(key, DEFAULT_PROPERTIES_MAP.get(key));
}
}
}
/**
* Validation if serviceRegion was not provided as a property, that SERVICE_REGION environment variable is set, and
* then set the serviceRegion property value to SERVICE_REGION
*
* @throws SQLException if no region was provided, an error will be thrown
*/
protected void validateServiceRegionEnvVariable() throws SQLException {
final String envServiceRegion = System.getenv("SERVICE_REGION");
if ("".equals(getServiceRegion())) {
if (envServiceRegion == null || isWhitespace(envServiceRegion)) {
throw missingConnectionPropertyError(
"A Service Region must be provided to use IAMSigV4 Authentication, set through " +
"the SERVICE_REGION environment variable or the serviceRegion connection property. " +
"For example, append 'serviceRegion=us-east-1' to your connection string");
}
LOGGER.info(String.format("serviceRegion property was not set by user, using system SERVICE_REGION='%s' " +
"environment variable", envServiceRegion));
setServiceRegion(envServiceRegion);
}
}
/**
* Gets the SSH tunnel user.
*
* @return the SSH tunnel user.
*/
public String getSshUser() {
return getProperty(SSH_USER);
}
/**
* Sets the SSH tunnel user.
*
* @param sshUser the SSH tunnel user.
*/
public void setSshUser(final String sshUser) {
setProperty(SSH_USER, sshUser);
}
/**
* Gets the SSH tunnel host name and optional port number.
*
* @return the SSH tunnel host name and optional port number.
*/
public String getSshHostname() {
return getProperty(SSH_HOSTNAME);
}
/**
* Sets the SSH tunnel host name. Can optionally contain the port number using `host-name:port'
* syntax. If port is not provided, port 22 is assumed.
*
* @param sshHostname the SSH tunnel host name and optional port number.
*/
public void setSshHostname(final String sshHostname) {
setProperty(SSH_HOSTNAME, sshHostname);
}
/**
* Gets the file path of the private key file.
*
* @return the file path of the private key file.
*/
public String getSshPrivateKeyFile() {
return getProperty(SSH_PRIVATE_KEY_FILE);
}
/**
* Sets the file path of the private key file. Can be prefixed with '~' to indicate the
* current user's home directory.
*
* @param sshPrivateKeyFile the file path of the private key file.
*/
public void setSshPrivateKeyFile(final String sshPrivateKeyFile) {
setProperty(SSH_PRIVATE_KEY_FILE, sshPrivateKeyFile);
}
/**
* Gets the passphrase of the private key file.
*
* @return the passphrase of the private key file
*/
public String getSshPrivateKeyPassphrase() {
return getProperty(SSH_PRIVATE_KEY_PASSPHRASE);
}
/**
* Sets the passphrase of the private key file. If not set, no passphrase will be used.
*
* @param sshPrivateKeyPassphrase the passphrase of the private key file
*/
public void setSshPrivateKeyPassphrase(final String sshPrivateKeyPassphrase) {
setProperty(SSH_PRIVATE_KEY_PASSPHRASE, sshPrivateKeyPassphrase);
}
/**
* Gets the indicator for whether the SSH tunnel will perform strict host key checking.
*
* @return the indicator for whether the SSH tunnel will perform strict host key checking.
*/
public boolean getSshStrictHostKeyChecking() {
return Boolean.parseBoolean(getProperty(SSH_STRICT_HOST_KEY_CHECKING, DEFAULT_SSH_STRICT_CHECKING));
}
/**
* Sets the indicator for whether the SSH tunnel will perform strict host key checking. When
* {@code true}, the 'known_hosts' file is checked to ensure the hashed host key is the same
* as the target host.
*
* @param sshStrictHostKeyChecking the indicator for whether the SSH tunnel will perform strict
* host key checking.
*/
public void setSshStrictHostKeyChecking(final String sshStrictHostKeyChecking) {
setProperty(
SSH_STRICT_HOST_KEY_CHECKING,
String.valueOf(Boolean.parseBoolean(sshStrictHostKeyChecking)));
}
/**
* Gets the file path to the 'known_hosts' file.
*
* @return the file path to the 'known_hosts' file.
*/
public String getSshKnownHostsFile() {
return getProperty(SSH_KNOWN_HOSTS_FILE);
}
/**
* Gets the file path to the 'known_hosts' file. If not set, '~/.ssh/known_hosts' is assumed.
*
* @param sshKnownHostsFile the file path to the 'known_hosts' file.
*/
public void setSshKnownHostsFile(final String sshKnownHostsFile) {
setProperty(SSH_KNOWN_HOSTS_FILE, sshKnownHostsFile);
}
/**
* Property converter interface.
*
* @param <T> Type to convert string property to.
*/
protected interface PropertyConverter<T> {
T convert(@NonNull String key, @NonNull String value) throws SQLException;
}
}
| 7,484 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/utilities/AuthScheme.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc.utilities;
import lombok.NonNull;
/**
* Auth Scheme enum.
*/
public enum AuthScheme {
IAMSigV4("IAMSigV4"),
None("None");
private final String stringValue;
AuthScheme(@NonNull final String stringValue) {
this.stringValue = stringValue;
}
/**
* Converts case-insensitive string to enum value.
*
* @param in The case-insensitive string to be converted to enum.
* @return The enum value if string is recognized as a valid value, otherwise null.
*/
public static AuthScheme fromString(@NonNull final String in) {
for (final AuthScheme scheme : AuthScheme.values()) {
if (scheme.stringValue.equalsIgnoreCase(in)) {
return scheme;
}
}
return null;
}
@Override
public java.lang.String toString() {
return this.stringValue;
}
}
| 7,485 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/jdbc/utilities/JdbcType.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.jdbc.utilities;
/**
* Copy of the java.sql.Types constants but as an enum, for use in lookups.
*/
public enum JdbcType {
BIT(-7),
TINYINT(-6),
SMALLINT(5),
INTEGER(4),
BIGINT(-5),
FLOAT(6),
REAL(7),
DOUBLE(8),
NUMERIC(2),
DECIMAL(3),
CHAR(1),
VARCHAR(12),
LONGVARCHAR(-1),
DATE(91),
TIME(92),
TIMESTAMP(93),
BINARY(-2),
VARBINARY(-3),
LONGVARBINARY(-4),
BLOB(2004),
CLOB(2005),
BOOLEAN(16),
ARRAY(2003),
STRUCT(2002),
JAVA_OBJECT(2000),
ROWID(-8),
NCHAR(-15),
NVARCHAR(-9),
LONGNVARCHAR(-16),
NCLOB(2011),
SQLXML(2009),
REF_CURSOR(2012),
NULL(0);
/**
* The java.sql.Types JDBC type.
*/
private final int jdbcCode;
/**
* JdbcType constructor.
*
* @param jdbcCode The java.sql.Types JDBC type associated with this value.
*/
JdbcType(final int jdbcCode) {
this.jdbcCode = jdbcCode;
}
public int getJdbcCode() {
return jdbcCode;
}
}
| 7,486 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/EmptyResultSet.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common;
import software.aws.neptune.jdbc.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
/**
* Class to provide an empty {@link java.sql.ResultSet}.
*/
public class EmptyResultSet extends ResultSet implements java.sql.ResultSet {
/**
* Initialize the EmptyResultSet. {@link java.sql.Statement} is required as an input.
*
* @param statement {@link java.sql.Statement} to initialize with.
*/
public EmptyResultSet(final Statement statement) {
super(statement, null, 0);
}
@Override
protected void doClose() throws SQLException {
}
@Override
protected int getDriverFetchSize() throws SQLException {
return 0;
}
@Override
protected void setDriverFetchSize(final int rows) {
}
@Override
protected Object getConvertedValue(final int columnIndex) throws SQLException {
return null;
}
@Override
protected ResultSetMetaData getResultMetadata() throws SQLException {
return new EmptyResultSetMetadata(new ArrayList<>(), new ArrayList<>());
}
@Override
public boolean wasNull() throws SQLException {
return false;
}
}
| 7,487 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/ResultSetInfoWithoutRows.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common;
import lombok.AllArgsConstructor;
import lombok.Getter;
import java.util.List;
@AllArgsConstructor
@Getter
public class ResultSetInfoWithoutRows {
private final int rowCount;
private final List<String> columns;
}
| 7,488 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/IAMHelper.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common;
import com.amazon.neptune.gremlin.driver.sigv4.ChainedSigV4PropertiesProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.neptune.auth.NeptuneNettyHttpSigV4Signer;
import com.amazonaws.neptune.auth.NeptuneSigV4SignerException;
import org.apache.tinkerpop.gremlin.driver.Cluster;
public class IAMHelper {
public static void addHandshakeInterceptor(Cluster.Builder builder) {
builder.handshakeInterceptor( r ->
{
try {
NeptuneNettyHttpSigV4Signer sigV4Signer =
new NeptuneNettyHttpSigV4Signer(
new ChainedSigV4PropertiesProvider().getSigV4Properties().getServiceRegion(),
new DefaultAWSCredentialsProviderChain());
sigV4Signer.signRequest(r);
} catch (NeptuneSigV4SignerException e) {
throw new RuntimeException("Exception occurred while signing the request", e);
}
return r;
}
);
}
}
| 7,489 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/EmptyResultSetMetadata.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common;
import software.aws.neptune.jdbc.ResultSetMetaData;
import software.aws.neptune.jdbc.utilities.JdbcType;
import java.sql.SQLException;
import java.util.List;
public class EmptyResultSetMetadata extends ResultSetMetaData implements java.sql.ResultSetMetaData {
private final List<JdbcType> columnTypes;
EmptyResultSetMetadata(final List<String> columns, final List<JdbcType> columnTypes) {
super(columns);
this.columnTypes = columnTypes;
}
@Override
public int getColumnType(final int column) throws SQLException {
verifyColumnIndex(column);
return columnTypes.get(column - 1).getJdbcCode();
}
@Override
public String getColumnTypeName(final int column) throws SQLException {
verifyColumnIndex(column);
return columnTypes.get(column - 1).name();
}
@Override
public String getColumnClassName(final int column) throws SQLException {
verifyColumnIndex(column);
return columnTypes.get(column - 1).getClass().getName();
}
}
| 7,490 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel/SchemaHelperGremlinDataModel.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common.gremlindatamodel;
import org.apache.tinkerpop.gremlin.driver.Client;
import org.apache.tinkerpop.gremlin.driver.Cluster;
import org.apache.tinkerpop.gremlin.driver.remote.DriverRemoteConnection;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.common.IAMHelper;
import software.aws.neptune.gremlin.adapter.converter.schema.SqlSchemaGrabber;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import java.sql.SQLException;
import static org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource.traversal;
public class SchemaHelperGremlinDataModel {
private static final Logger LOGGER = LoggerFactory.getLogger(SchemaHelperGremlinDataModel.class);
private static final int MIN_CONNECTION_POOL_SIZE = 32;
private static final int MAX_CONNECTION_POOL_SIZE = 96;
private static final int CONNECTION_TIMEOUT = 180 * 1000;
private static Client getClient(final String endpoint, final int port, final boolean useIam, final boolean useSsl) {
final Cluster.Builder builder = Cluster.build();
builder.addContactPoint(endpoint);
builder.port(port);
builder.enableSsl(useSsl);
builder.maxWaitForConnection(CONNECTION_TIMEOUT);
builder.maxConnectionPoolSize(MAX_CONNECTION_POOL_SIZE);
builder.minConnectionPoolSize(MIN_CONNECTION_POOL_SIZE);
if (useIam) {
IAMHelper.addHandshakeInterceptor(builder);
}
final Cluster cluster = builder.create();
final Client client = cluster.connect();
client.init();
return client;
}
private static String getAdjustedEndpoint(final String endpoint, final MetadataCache.PathType pathType)
throws SQLException {
if (pathType == MetadataCache.PathType.Bolt) {
final String[] endpointSplit = endpoint.split(":");
if ((endpointSplit.length != 3) || (!endpointSplit[1].startsWith("//"))) {
throw SqlError
.createSQLException(LOGGER, SqlState.CONNECTION_FAILURE, SqlError.INVALID_ENDPOINT, endpoint);
}
return endpointSplit[1].substring(2);
} else {
return endpoint;
}
}
/**
* Function to get the schema of the graph.
*
* @param endpoint Endpoint of database.
* @param port Port of database.
* @param useIAM Boolean for whether or not to use IAM.
* @param useSsl Boolean for whether or not to use SSL.
* @param pathType Type of path.
* @param scanType Scan type.
* @return Graph Schema.
* @throws SQLException If graph schema cannot be obtained.
*/
public static GremlinSchema getGraphSchema(final String endpoint, final int port, final boolean useIAM,
final boolean useSsl,
final MetadataCache.PathType pathType,
final SqlSchemaGrabber.ScanType scanType)
throws SQLException {
final String adjustedEndpoint = getAdjustedEndpoint(endpoint, pathType);
return SqlSchemaGrabber.getSchema(
traversal().withRemote(DriverRemoteConnection.using(getClient(adjustedEndpoint, port, useIAM, useSsl))),
scanType);
}
}
| 7,491 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel/MetadataCache.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common.gremlindatamodel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.schema.SqlSchemaGrabber;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinEdgeTable;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinVertexTable;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetColumns;
import software.aws.neptune.common.gremlindatamodel.resultset.ResultSetGetTables;
import software.aws.neptune.gremlin.GremlinConnectionProperties;
import software.aws.neptune.jdbc.utilities.AuthScheme;
import software.aws.neptune.opencypher.OpenCypherConnectionProperties;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class MetadataCache {
private static final Logger LOGGER = LoggerFactory.getLogger(MetadataCache.class);
private static final Object LOCK = new Object();
private static final int SCHEMA_CACHE_SIZE = 3;
private static final Map<String, GremlinSchema> GREMLIN_SCHEMAS =
new LinkedHashMap<String, GremlinSchema>() {
@Override
protected boolean removeEldestEntry(final Map.Entry eldest) {
return size() > SCHEMA_CACHE_SIZE;
}
};
/**
* Function to update the cache of the metadata.
*
* @param endpoint Endpoint of target database.
* @param port Port of target database.
* @param useIam Flag to use IAM or not.
* @param useSsl Flag to use SSL.
* @param pathType Path type.
* @throws SQLException Thrown if error occurs during update.
*/
public static void updateCache(final String endpoint, final int port, final boolean useIam, final boolean useSsl,
final PathType pathType, final SqlSchemaGrabber.ScanType scanType)
throws SQLException {
synchronized (LOCK) {
if (!GREMLIN_SCHEMAS.containsKey(endpoint)) {
GREMLIN_SCHEMAS.put(endpoint, SchemaHelperGremlinDataModel.getGraphSchema(
endpoint, port, useIam, useSsl, pathType, scanType));
}
}
}
/**
* Function to update the cache of the metadata.
*
* @param gremlinConnectionProperties GremlinConnectionProperties to use.
* @throws SQLException Thrown if error occurs during update.
*/
public static void updateCacheIfNotUpdated(final GremlinConnectionProperties gremlinConnectionProperties)
throws SQLException {
if (!isMetadataCached(gremlinConnectionProperties.getContactPoint())) {
updateCache(gremlinConnectionProperties.getContactPoint(), gremlinConnectionProperties.getPort(),
(gremlinConnectionProperties.getAuthScheme() == AuthScheme.IAMSigV4),
gremlinConnectionProperties.getEnableSsl(),
MetadataCache.PathType.Gremlin, gremlinConnectionProperties.getScanType());
}
}
/**
* Function to update the cache of the metadata.
*
* @param openCypherConnectionProperties OpenCypherConnectionProperties to use.
* @throws SQLException Thrown if error occurs during update.
*/
public static void updateCacheIfNotUpdated(final OpenCypherConnectionProperties openCypherConnectionProperties)
throws SQLException {
if (!isMetadataCached(openCypherConnectionProperties.getEndpoint())) {
updateCache(openCypherConnectionProperties.getEndpoint(), openCypherConnectionProperties.getPort(),
(openCypherConnectionProperties.getAuthScheme() == AuthScheme.IAMSigV4),
openCypherConnectionProperties.getUseEncryption(),
PathType.Bolt, openCypherConnectionProperties.getScanType());
}
}
/**
* Function to return whether cache is valid.
*
* @return True if cache is valid, false otherwise.
*/
public static boolean isMetadataCached(final String endpoint) {
synchronized (LOCK) {
return GREMLIN_SCHEMAS.containsKey(endpoint);
}
}
/**
* Function to filter cached NodeColumnInfo.
*
* @param nodeFilter Filter to apply.
* @return Filtered NodeColumnInfo List.
*/
public static GremlinSchema getFilteredCacheNodeColumnInfos(final String nodeFilter, final String endpoint)
throws SQLException {
synchronized (LOCK) {
if (!getGremlinSchemas().containsKey(endpoint)) {
throw new SQLException("Error, cache must be updated before filtered cache can be retrieved.");
} else if (nodeFilter == null || "%".equals(nodeFilter)) {
return getGremlinSchemas().get(endpoint);
}
LOGGER.info("Getting vertices.");
final List<GremlinVertexTable> vertices = getGremlinSchemas().get(endpoint).getVertices();
LOGGER.info("Getting edges.");
final List<GremlinEdgeTable> edges = getGremlinSchemas().get(endpoint).getEdges();
final List<GremlinVertexTable> filteredGremlinVertexTables = vertices.stream().filter(
table -> Arrays.stream(nodeFilter.split(":")).allMatch(f -> table.getLabel().equals(f)))
.collect(Collectors.toList());
final List<GremlinEdgeTable> filteredGremlinEdgeTables = edges.stream().filter(
table -> Arrays.stream(nodeFilter.split(":")).allMatch(f -> table.getLabel().equals(f)))
.collect(Collectors.toList());
return new GremlinSchema(filteredGremlinVertexTables, filteredGremlinEdgeTables);
}
}
/**
* Helper function to get GREMLIN_SCHEMAS.
*
* @return A map of Gremlin Schemas.
*/
static Map<String, GremlinSchema> getGremlinSchemas() {
return GREMLIN_SCHEMAS;
}
/**
* Function to filter ResultSetInfoWithoutRows.
*
* @param nodeFilter Filter to apply.
* @return Filtered ResultSetInfoWithoutRows Object.
*/
public static ResultSetInfoWithoutRows getFilteredResultSetInfoWithoutRowsForColumns(
final String nodeFilter, final String endpoint) throws SQLException {
return new ResultSetInfoWithoutRows(getFilteredCacheNodeColumnInfos(nodeFilter, endpoint)
.getAllTables()
.stream()
.mapToInt(table -> table.getColumns().size()).sum(), ResultSetGetColumns.getColumns());
}
/**
* Function to filter ResultSetInfoWithoutRows.
*
* @param nodeFilter Filter to apply.
* @return Filtered ResultSetInfoWithoutRows Object.
*/
public static ResultSetInfoWithoutRows getFilteredResultSetInfoWithoutRowsForTables(
final String nodeFilter, final String endpoint) throws SQLException {
return new ResultSetInfoWithoutRows(
getFilteredCacheNodeColumnInfos(nodeFilter, endpoint).getAllTables().size(),
ResultSetGetTables.getColumns());
}
/**
* Get the GremlinSchema of the host endpoint.
*
* @param endpoint The host endpoint.
* @return GremlinSchema Object.
*/
public static GremlinSchema getGremlinSchema(final String endpoint) {
return GREMLIN_SCHEMAS.get(endpoint);
}
public enum PathType {
Bolt,
Gremlin
}
}
| 7,492 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel/resultset/ResultSetGetSchemas.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common.gremlindatamodel.resultset;
import com.google.common.collect.ImmutableList;
import java.sql.Statement;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Base ResultSet for getSchemas.
*/
public abstract class ResultSetGetSchemas extends ResultSetGetString {
/**
* TABLE_CAT String => catalog name
*/
private static final List<String> COLUMNS = ImmutableList.of("TABLE_SCHEM", "TABLE_CAT");
private static final Map<String, String> CONVERSION_MAP = new HashMap<>();
static {
CONVERSION_MAP.put("TABLE_SCHEM", "gremlin");
CONVERSION_MAP.put("TABLE_CAT", null);
}
/**
* ResultSetGetSchemas constructor, initializes super class.
*
* @param statement Statement Object.
*/
public ResultSetGetSchemas(final Statement statement) {
super(statement, ImmutableList.of("TABLE_SCHEM", "TABLE_CAT"), 1,
ImmutableList.of(CONVERSION_MAP));
}
protected List<String> getColumns() {
return COLUMNS;
}
}
| 7,493 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel/resultset/ResultSetGetColumns.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common.gremlindatamodel.resultset;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinProperty;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinTableBase;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.jdbc.ResultSet;
import software.aws.neptune.jdbc.utilities.JavaToJdbcTypeConverter;
import software.aws.neptune.jdbc.utilities.JdbcType;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import java.sql.DatabaseMetaData;
import java.sql.Date;
import java.sql.SQLException;
import java.sql.Statement;
import java.sql.Time;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
/**
* Base ResultSet for getColumns.
*/
public abstract class ResultSetGetColumns extends ResultSet
implements java.sql.ResultSet {
public static final Map<String, Class<?>> GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP = new HashMap<>();
private static final Logger LOGGER = LoggerFactory.getLogger(ResultSetGetColumns.class);
/**
* TABLE_CAT String => table catalog (may be null)
* TABLE_SCHEM String => table schema (may be null)
* TABLE_NAME String => table name
* COLUMN_NAME String => column name
* DATA_TYPE int => SQL type from java.sql.Types
* TYPE_NAME String => Data source dependent type name, for a UDT the type name is fully qualified
* COLUMN_SIZE int => column size.
* BUFFER_LENGTH is not used.
* DECIMAL_DIGITS int => the number of fractional digits. Null is returned for data types where DECIMAL_DIGITS is not applicable.
* NUM_PREC_RADIX int => Radix (typically either 10 or 2)
* NULLABLE int => is NULL allowed.
* columnNoNulls - might not allow NULL values
* columnNullable - definitely allows NULL values
* columnNullableUnknown - nullability unknown
* REMARKS String => comment describing column (may be null)
* COLUMN_DEF String => default value for the column, which should be interpreted as a string when the value is enclosed in single quotes (may be null)
* SQL_DATA_TYPE int => unused
* SQL_DATETIME_SUB int => unused
* CHAR_OCTET_LENGTH int => for char types the maximum number of bytes in the column
* ORDINAL_POSITION int => index of column in table (starting at 1)
* IS_NULLABLE String => ISO rules are used to determine the nullability for a column.
* YES --- if the column can include NULLs
* NO --- if the column cannot include NULLs
* empty string --- if the nullability for the column is unknown
* SCOPE_CATALOG String => catalog of table that is the scope of a reference attribute (null if DATA_TYPE isn't REF)
* SCOPE_SCHEMA String => schema of table that is the scope of a reference attribute (null if the DATA_TYPE isn't REF)
* SCOPE_TABLE String => table name that this the scope of a reference attribute (null if the DATA_TYPE isn't REF)
* SOURCE_DATA_TYPE short => source type of a distinct type or user-generated Ref type, SQL type from java.sql.Types (null if DATA_TYPE isn't DISTINCT or user-generated REF)
* IS_AUTOINCREMENT String => Indicates whether this column is auto incremented
* YES --- if the column is auto incremented
* NO --- if the column is not auto incremented
* empty string --- if it cannot be determined whether the column is auto incremented
* IS_GENERATEDCOLUMN String => Indicates whether this is a generated column
* YES --- if this a generated column
* NO --- if this not a generated column
* empty string --- if it cannot be determined whether this is a generated column
*/
private static final Map<String, Object> CONVERSION_MAP = new HashMap<>();
private static final List<String> ORDERED_COLUMNS = new ArrayList<>();
static {
CONVERSION_MAP.put("TABLE_CAT", null);
CONVERSION_MAP.put("TABLE_SCHEM", "gremlin");
CONVERSION_MAP.put("BUFFER_LENGTH", null); // null
CONVERSION_MAP.put("NULLABLE", DatabaseMetaData.columnNullable);
CONVERSION_MAP.put("REMARKS", null); // null
CONVERSION_MAP.put("SQL_DATA_TYPE", null); // null
CONVERSION_MAP.put("SQL_DATETIME_SUB", null); // null
CONVERSION_MAP.put("IS_NULLABLE", "YES");
CONVERSION_MAP.put("SCOPE_CATALOG", null); // null
CONVERSION_MAP.put("SCOPE_SCHEMA", null); // null
CONVERSION_MAP.put("SCOPE_TABLE", null); // null
CONVERSION_MAP.put("SOURCE_DATA_TYPE", null); // null
CONVERSION_MAP.put("IS_AUTOINCREMENT", "NO");
CONVERSION_MAP.put("IS_GENERATEDCOLUMN", "NO");
CONVERSION_MAP.put("COLUMN_DEF", null);
ORDERED_COLUMNS.add("TABLE_CAT");
ORDERED_COLUMNS.add("TABLE_SCHEM");
ORDERED_COLUMNS.add("TABLE_NAME");
ORDERED_COLUMNS.add("COLUMN_NAME");
ORDERED_COLUMNS.add("DATA_TYPE");
ORDERED_COLUMNS.add("TYPE_NAME");
ORDERED_COLUMNS.add("COLUMN_SIZE");
ORDERED_COLUMNS.add("BUFFER_LENGTH");
ORDERED_COLUMNS.add("DECIMAL_DIGITS");
ORDERED_COLUMNS.add("NUM_PREC_RADIX");
ORDERED_COLUMNS.add("NULLABLE");
ORDERED_COLUMNS.add("REMARKS");
ORDERED_COLUMNS.add("COLUMN_DEF");
ORDERED_COLUMNS.add("SQL_DATA_TYPE");
ORDERED_COLUMNS.add("SQL_DATETIME_SUB");
ORDERED_COLUMNS.add("CHAR_OCTET_LENGTH");
ORDERED_COLUMNS.add("ORDINAL_POSITION");
ORDERED_COLUMNS.add("IS_NULLABLE");
ORDERED_COLUMNS.add("SCOPE_CATALOG");
ORDERED_COLUMNS.add("SCOPE_SCHEMA");
ORDERED_COLUMNS.add("SCOPE_TABLE");
ORDERED_COLUMNS.add("SOURCE_DATA_TYPE");
ORDERED_COLUMNS.add("IS_AUTOINCREMENT");
ORDERED_COLUMNS.add("IS_GENERATEDCOLUMN");
GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP.put("Byte", Byte.class);
GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP.put("Short", Short.class);
GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP.put("Integer", Integer.class);
GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP.put("Boolean", Boolean.class);
GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP.put("Long", Long.class);
GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP.put("Float", Float.class);
GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP.put("Double", Double.class);
GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP.put("String", String.class);
GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP.put("Date", Date.class);
GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP.put("Time", Time.class);
}
private final List<Map<String, Object>> rows = new ArrayList<>();
private boolean wasNull = false;
/**
* ResultSetGetColumns constructor, initializes super class.
*
* @param statement Statement Object.
* @param gremlinSchema GremlinSchema Object.
* @param resultSetInfoWithoutRows ResultSetInfoWithoutRows Object.
*/
public ResultSetGetColumns(final Statement statement, final GremlinSchema gremlinSchema,
final ResultSetInfoWithoutRows resultSetInfoWithoutRows)
throws SQLException {
super(statement, resultSetInfoWithoutRows.getColumns(), resultSetInfoWithoutRows.getRowCount());
for (final GremlinTableBase gremlinTableBase : gremlinSchema.getAllTables()) {
int i = 1;
for (final Map.Entry<String, GremlinProperty> property : gremlinTableBase.getColumns().entrySet()) {
// Add defaults.
final Map<String, Object> map = new HashMap<>(CONVERSION_MAP);
// Set table name.
map.put("TABLE_NAME", gremlinTableBase.getLabel());
// Get column type.
final String dataType = property.getValue().getType();
map.put("TYPE_NAME", dataType);
final Optional<? extends Class<?>> javaClassOptional =
GREMLIN_STRING_TYPE_TO_JAVA_TYPE_CONVERTER_MAP.
entrySet().stream().
filter(d -> d.getKey().equalsIgnoreCase(dataType)).
map(Map.Entry::getValue).
findFirst();
final Class<?> javaClass = javaClassOptional.isPresent() ? javaClassOptional.get() : String.class;
map.put("CHAR_OCTET_LENGTH", (javaClass == String.class) ? Integer.MAX_VALUE : null);
final int jdbcType = JavaToJdbcTypeConverter.CLASS_TO_JDBC_ORDINAL
.getOrDefault(javaClass, JdbcType.VARCHAR.getJdbcCode());
map.put("DATA_TYPE", jdbcType);
map.put("SQL_DATA_TYPE", jdbcType);
map.put("COLUMN_NAME", property.getKey());
map.put("NULLABLE", DatabaseMetaData.columnNullable);
map.put("IS_NULLABLE", "YES");
// TODO: These need to be verified for Tableau.
map.put("DECIMAL_DIGITS", null);
map.put("NUM_PREC_RADIX", 10);
map.put("ORDINAL_POSITION", i++);
// TODO AN-839: Fix COLUMN_SIZE.
map.put("COLUMN_SIZE", 10);
if (!map.keySet().equals(new HashSet<>(ORDERED_COLUMNS))) {
throw SqlError.createSQLException(
LOGGER,
SqlState.DATA_TYPE_TRANSFORM_VIOLATION,
SqlError.UNSUPPORTED_TYPE, map.keySet().toString());
}
rows.add(map);
}
}
}
public static List<String> getColumns() {
return ORDERED_COLUMNS;
}
@Override
protected Object getConvertedValue(final int columnIndex) throws SQLException {
verifyOpen();
final int index = getRowIndex();
if ((index < 0) || (index >= rows.size())) {
throw SqlError
.createSQLException(LOGGER, SqlState.DATA_EXCEPTION, SqlError.INVALID_INDEX, getRowIndex() + 1,
rows.size());
}
if ((columnIndex <= 0) || (columnIndex > ORDERED_COLUMNS.size())) {
throw SqlError
.createSQLException(LOGGER, SqlState.DATA_EXCEPTION, SqlError.INVALID_COLUMN_INDEX, columnIndex,
ORDERED_COLUMNS.size());
}
final String key = ORDERED_COLUMNS.get(columnIndex - 1);
if (rows.get(index).containsKey(key)) {
final Object data = rows.get(index).get(key);
wasNull = (data == null);
return data;
} else {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
}
@Override
public boolean wasNull() throws SQLException {
return wasNull;
}
@Override
protected void doClose() throws SQLException {
}
@Override
protected int getDriverFetchSize() throws SQLException {
return 0;
}
@Override
protected void setDriverFetchSize(final int rows) {
}
}
| 7,494 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel/resultset/ResultSetGetString.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common.gremlindatamodel.resultset;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.jdbc.utilities.SqlError;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
import java.util.Map;
/**
* Base ResultSet for String types.
*/
public abstract class ResultSetGetString extends GenericResultSet {
private static final Logger LOGGER = LoggerFactory.getLogger(ResultSetGetString.class);
private final List<String> columns;
private final List<Map<String, String>> constantReturns;
private boolean wasNull = false;
/**
* ResultSetGetString constructor, initializes super class.
*
* @param statement Statement Object.
* @param columns Columns for result.
* @param rowCount Row count for result.
* @param constantReturns Map of return values for given keys.
*/
public ResultSetGetString(final Statement statement,
final List<String> columns,
final int rowCount,
final List<Map<String, String>> constantReturns) {
super(statement, columns, rowCount);
this.columns = columns;
this.constantReturns = constantReturns;
}
@Override
protected Object getConvertedValue(final int columnIndex) throws SQLException {
verifyOpen();
final int index = getRowIndex();
if ((index >= constantReturns.size()) || (index < 0)
|| ((columnIndex > columns.size()) || (columnIndex <= 0))) {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
final String key = columns.get(columnIndex - 1);
if (constantReturns.get(index).containsKey(key)) {
final Object value = constantReturns.get(index).get(key);
wasNull = value == null;
return value;
} else {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
}
@Override
public boolean wasNull() throws SQLException {
return this.wasNull;
}
}
| 7,495 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel/resultset/ResultSetGetCatalogs.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common.gremlindatamodel.resultset;
import com.google.common.collect.ImmutableList;
import java.sql.Statement;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
// TODO: Things extending this should switch to dependency injection to reduce the number of classes that have the same code.
/**
* Base ResultSet for getCatalogs.
*/
public abstract class ResultSetGetCatalogs extends ResultSetGetString {
/**
* TABLE_CAT String => catalog name
*/
private static final List<String> COLUMNS = ImmutableList.of("TABLE_CAT");
private static final Map<String, String> CONVERSION_MAP = new HashMap<>();
static {
CONVERSION_MAP.put("TABLE_CAT", null);
}
/**
* ResultSetGetCatalogs constructor, initializes super class.
*
* @param statement Statement Object.
*/
public ResultSetGetCatalogs(final Statement statement) {
super(statement, ImmutableList.of("TABLE_CAT"), 0, ImmutableList.of(CONVERSION_MAP));
}
protected List<String> getColumns() {
return COLUMNS;
}
}
| 7,496 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel/resultset/GenericResultSet.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common.gremlindatamodel.resultset;
import software.aws.neptune.jdbc.ResultSet;
import java.sql.SQLException;
import java.util.List;
/**
* Generic ResultSet class.
*/
public abstract class GenericResultSet extends ResultSet implements java.sql.ResultSet {
/**
* OpenCypherResultSet constructor, initializes super class.
*
* @param statement Statement Object.
* @param columns Columns for result.
* @param rowCount Row count for result.
*/
public GenericResultSet(final java.sql.Statement statement, final List<String> columns, final int rowCount) {
super(statement, columns, rowCount);
}
@Override
protected void doClose() {
}
@Override
protected int getDriverFetchSize() {
// Do we want to update this or statement?
return 0;
}
@Override
protected void setDriverFetchSize(final int rows) {
// Do we want to update this or statement?
}
@Override
public boolean wasNull() throws SQLException {
return false;
}
}
| 7,497 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel/resultset/ResultSetGetTables.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common.gremlindatamodel.resultset;
import com.google.common.collect.ImmutableList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.aws.neptune.gremlin.adapter.converter.schema.calcite.GremlinSchema;
import software.aws.neptune.gremlin.adapter.converter.schema.gremlin.GremlinTableBase;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import software.aws.neptune.jdbc.utilities.SqlError;
import software.aws.neptune.jdbc.utilities.SqlState;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Base ResultSet for getTables.
*/
public abstract class ResultSetGetTables extends GenericResultSet implements java.sql.ResultSet {
private static final Logger LOGGER = LoggerFactory.getLogger(ResultSetGetTables.class);
/**
* TABLE_CAT String => table catalog (may be null)
* TABLE_SCHEM String => table schema (may be null)
* TABLE_NAME String => table name
* TABLE_TYPE String => table type. Typical types are "TABLE", "VIEW", "SYSTEM TABLE", "GLOBAL TEMPORARY",
* "LOCAL TEMPORARY", "ALIAS", "SYNONYM".
* REMARKS String => explanatory comment on the table
* TYPE_CAT String => the types catalog (may be null)
* TYPE_SCHEM String => the types schema (may be null)
* TYPE_NAME String => type name (may be null)
* SELF_REFERENCING_COL_NAME String => name of the designated "identifier" column of a typed table (may be null)
* REF_GENERATION String => specifies how values in SELF_REFERENCING_COL_NAME are created. Values are "SYSTEM", "USER", "DERIVED". (may be null)
*/
private static final List<String> ORDERED_COLUMNS = ImmutableList.of(
"TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "TABLE_TYPE", "REMARKS", "TYPE_CAT", "TYPE_SCHEM", "TYPE_NAME",
"SELF_REFERENCING_COL_NAME", "REF_GENERATION");
private static final Map<String, Object> MAPPED_KEYS = new HashMap<>();
private static final String TABLE_NAME = "TABLE_NAME";
static {
MAPPED_KEYS.put("TABLE_CAT", null);
MAPPED_KEYS.put("TABLE_SCHEM", "gremlin");
MAPPED_KEYS.put("TABLE_TYPE", "TABLE");
MAPPED_KEYS.put("REMARKS", "");
MAPPED_KEYS.put("TYPE_CAT", null);
MAPPED_KEYS.put("TYPE_SCHEM", null);
MAPPED_KEYS.put("TYPE_NAME", null);
MAPPED_KEYS.put("SELF_REFERENCING_COL_NAME", null);
MAPPED_KEYS.put("REF_GENERATION", null);
}
private final List<Map<String, Object>> rows = new ArrayList<>();
private boolean wasNull = false;
/**
* ResultSetGetTables constructor, initializes super class.
*
* @param statement Statement Object.
* @param gremlinSchema GremlinSchema Object.
* @param resultSetInfoWithoutRows ResultSetInfoWithoutRows Object.
*/
public ResultSetGetTables(final Statement statement,
final GremlinSchema gremlinSchema,
final ResultSetInfoWithoutRows resultSetInfoWithoutRows) {
super(statement, resultSetInfoWithoutRows.getColumns(), resultSetInfoWithoutRows.getRowCount());
for (final GremlinTableBase gremlinTableBase : gremlinSchema.getAllTables()) {
// Add defaults, table name, and push into List.
final Map<String, Object> map = new HashMap<>(MAPPED_KEYS);
map.put(TABLE_NAME, gremlinTableBase.getLabel());
rows.add(map);
}
}
/**
* Function to sort nodes so that node sorting is consistent so that table names which are concatenated node labels
* are also sorted.
*
* @param nodes List of nodes to sort and Stringify.
* @return Return String joined list after sorting.
*/
public static String nodeListToString(final List<String> nodes) {
// Don't overly care how it is sorted as long as it is consistent.
// Need to copy list in case it is an ImmutableList underneath.
final List<String> sortedNodes = new ArrayList<>(nodes);
java.util.Collections.sort(sortedNodes);
return String.join(":", sortedNodes);
}
public static List<String> getColumns() {
return ORDERED_COLUMNS;
}
@Override
protected Object getConvertedValue(final int columnIndex) throws SQLException {
verifyOpen();
final int index = getRowIndex();
if ((index < 0) || (index >= rows.size())) {
throw SqlError.createSQLException(LOGGER, SqlState.DATA_EXCEPTION, SqlError.INVALID_INDEX, index + 1,
rows.size());
}
if ((columnIndex <= 0) || (columnIndex > ORDERED_COLUMNS.size())) {
throw SqlError
.createSQLException(LOGGER, SqlState.DATA_EXCEPTION, SqlError.INVALID_COLUMN_INDEX, columnIndex,
ORDERED_COLUMNS.size());
}
final String key = ORDERED_COLUMNS.get(columnIndex - 1);
if (rows.get(index).containsKey(key)) {
final Object data = rows.get(index).get(key);
wasNull = (data == null);
return data;
} else {
throw SqlError.createSQLFeatureNotSupportedException(LOGGER);
}
}
@Override
public boolean wasNull() throws SQLException {
return wasNull;
}
@Override
protected void doClose() {
}
@Override
protected int getDriverFetchSize() {
return 0;
}
@Override
protected void setDriverFetchSize(final int rows) {
}
}
| 7,498 |
0 | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel | Create_ds/amazon-neptune-jdbc-driver/src/main/java/software/aws/neptune/common/gremlindatamodel/resultset/ResultSetGetTableTypes.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*
*/
package software.aws.neptune.common.gremlindatamodel.resultset;
import com.google.common.collect.ImmutableList;
import software.aws.neptune.common.ResultSetInfoWithoutRows;
import java.sql.Statement;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Base ResultSet for getTableTypes.
*/
public abstract class ResultSetGetTableTypes extends ResultSetGetString {
/**
* TABLE_TYPE String => table type. Typical types are "TABLE", "VIEW", "SYSTEM TABLE", "GLOBAL TEMPORARY", "LOCAL TEMPORARY", "ALIAS", "SYNONYM".
*/
private static final List<String> COLUMNS = ImmutableList.of("TABLE_TYPE");
private static final ResultSetInfoWithoutRows RESULT_SET_INFO_WITHOUT_ROWS =
new ResultSetInfoWithoutRows(1, COLUMNS);
private static final Map<String, String> CONVERSION_MAP = new HashMap<>();
static {
CONVERSION_MAP.put("TABLE_TYPE", "TABLE");
}
/**
* ResultSetGetTableTypes constructor, initializes super class.
*
* @param statement Statement Object.
*/
public ResultSetGetTableTypes(final Statement statement) {
super(statement, RESULT_SET_INFO_WITHOUT_ROWS.getColumns(), RESULT_SET_INFO_WITHOUT_ROWS.getRowCount(),
ImmutableList.of(CONVERSION_MAP));
}
public static List<String> getColumns() {
return COLUMNS;
}
}
| 7,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.