index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal | Create_ds/airpal/src/main/java/com/airbnb/airpal/modules/AirpalModule.java | package com.airbnb.airpal.modules;
import com.airbnb.airlift.http.client.ForQueryInfoClient;
import com.airbnb.airpal.AirpalConfiguration;
import com.airbnb.airpal.api.output.PersistentJobOutputFactory;
import com.airbnb.airpal.api.output.builders.OutputBuilderFactory;
import com.airbnb.airpal.api.output.persistors.CSVPersistorFactory;
import com.airbnb.airpal.api.output.persistors.PersistorFactory;
import com.airbnb.airpal.core.AirpalUserFactory;
import com.airbnb.airpal.core.execution.ExecutionClient;
import com.airbnb.airpal.core.health.PrestoHealthCheck;
import com.airbnb.airpal.core.store.files.ExpiringFileStore;
import com.airbnb.airpal.core.store.history.JobHistoryStore;
import com.airbnb.airpal.core.store.history.JobHistoryStoreDAO;
import com.airbnb.airpal.core.store.jobs.ActiveJobsStore;
import com.airbnb.airpal.core.store.jobs.InMemoryActiveJobsStore;
import com.airbnb.airpal.core.store.queries.QueryStore;
import com.airbnb.airpal.core.store.queries.QueryStoreDAO;
import com.airbnb.airpal.core.store.usage.CachingUsageStore;
import com.airbnb.airpal.core.store.usage.SQLUsageStore;
import com.airbnb.airpal.core.store.usage.UsageStore;
import com.airbnb.airpal.presto.ClientSessionFactory;
import com.airbnb.airpal.presto.ForQueryRunner;
import com.airbnb.airpal.presto.QueryInfoClient;
import com.airbnb.airpal.presto.QueryInfoClient.BasicQueryInfo;
import com.airbnb.airpal.presto.metadata.ColumnCache;
import com.airbnb.airpal.presto.metadata.PreviewTableCache;
import com.airbnb.airpal.presto.metadata.SchemaCache;
import com.airbnb.airpal.resources.ExecuteResource;
import com.airbnb.airpal.resources.FilesResource;
import com.airbnb.airpal.resources.HealthResource;
import com.airbnb.airpal.resources.PingResource;
import com.airbnb.airpal.resources.QueryResource;
import com.airbnb.airpal.resources.ResultsPreviewResource;
import com.airbnb.airpal.resources.S3FilesResource;
import com.airbnb.airpal.resources.SessionResource;
import com.airbnb.airpal.resources.TablesResource;
import com.airbnb.airpal.resources.sse.SSEEventSourceServlet;
import com.airbnb.airpal.sql.DbType;
import com.airbnb.airpal.sql.beans.TableRow;
import com.airbnb.airpal.sql.jdbi.QueryStoreMapper;
import com.airbnb.airpal.sql.jdbi.URIArgumentFactory;
import com.airbnb.airpal.sql.jdbi.UUIDArgumentFactory;
import com.amazonaws.auth.AWSCredentials;
import com.amazonaws.auth.BasicAWSCredentials;
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3Client;
import com.amazonaws.services.s3.AmazonS3EncryptionClient;
import com.amazonaws.services.s3.model.EncryptionMaterialsProvider;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Strings;
import com.google.common.eventbus.AsyncEventBus;
import com.google.common.eventbus.EventBus;
import com.google.inject.AbstractModule;
import com.google.inject.Provider;
import com.google.inject.Provides;
import com.google.inject.Scopes;
import com.google.inject.Singleton;
import com.google.inject.name.Names;
import io.airlift.configuration.ConfigDefaults;
import io.airlift.configuration.ConfigurationFactory;
import io.airlift.http.client.HttpClient;
import io.airlift.http.client.HttpClientConfig;
import io.airlift.units.DataSize;
import io.airlift.units.Duration;
import io.dropwizard.jdbi.DBIFactory;
import io.dropwizard.setup.Environment;
import lombok.extern.slf4j.Slf4j;
import org.apache.shiro.web.env.EnvironmentLoaderListener;
import org.skife.jdbi.v2.DBI;
import javax.annotation.Nullable;
import javax.inject.Named;
import javax.validation.constraints.Null;
import java.net.URI;
import java.util.Collections;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import static com.airbnb.airpal.presto.QueryRunner.QueryRunnerFactory;
import static io.airlift.http.client.HttpClientBinder.httpClientBinder;
import static io.airlift.json.JsonCodec.jsonCodec;
@Slf4j
public class AirpalModule extends AbstractModule
{
private static final ConfigDefaults<HttpClientConfig> HTTP_CLIENT_CONFIG_DEFAULTS = d -> new HttpClientConfig()
.setConnectTimeout(new Duration(10, TimeUnit.SECONDS));
private final AirpalConfiguration config;
private final Environment environment;
public AirpalModule(AirpalConfiguration config, Environment environment)
{
this.config = config;
this.environment = environment;
}
@Override
protected void configure()
{
bind(TablesResource.class).in(Scopes.SINGLETON);
bind(ExecuteResource.class).in(Scopes.SINGLETON);
bind(QueryResource.class).in(Scopes.SINGLETON);
bind(HealthResource.class).in(Scopes.SINGLETON);
bind(PingResource.class).in(Scopes.SINGLETON);
bind(SessionResource.class).in(Scopes.SINGLETON);
bind(SSEEventSourceServlet.class).in(Scopes.SINGLETON);
bind(FilesResource.class).in(Scopes.SINGLETON);
bind(ResultsPreviewResource.class).in(Scopes.SINGLETON);
bind(S3FilesResource.class).in(Scopes.SINGLETON);
httpClientBinder(binder()).bindHttpClient("query-info", ForQueryInfoClient.class)
.withConfigDefaults(HTTP_CLIENT_CONFIG_DEFAULTS);
httpClientBinder(binder()).bindHttpClient("query-runner", ForQueryRunner.class)
.withConfigDefaults(HTTP_CLIENT_CONFIG_DEFAULTS);
bind(EnvironmentLoaderListener.class).in(Scopes.SINGLETON);
bind(String.class).annotatedWith(Names.named("createTableDestinationSchema")).toInstance(config.getCreateTableDestinationSchema());
bind(String.class).annotatedWith(Names.named("s3Bucket")).toInstance(Strings.nullToEmpty(config.getS3Bucket()));
bind(PrestoHealthCheck.class).in(Scopes.SINGLETON);
bind(ExecutionClient.class).in(Scopes.SINGLETON);
bind(PersistentJobOutputFactory.class).in(Scopes.SINGLETON);
bind(JobHistoryStore.class).to(JobHistoryStoreDAO.class).in(Scopes.SINGLETON);
}
@Singleton
@Provides
public DbType provideDbType()
{
String driverClass = config.getDataSourceFactory().getDriverClass();
if (driverClass.equalsIgnoreCase("com.mysql.jdbc.Driver")) {
return DbType.MySQL;
}
else if (driverClass.equalsIgnoreCase("org.h2.Driver")) {
return DbType.H2;
}
else {
return DbType.Default;
}
}
@Singleton
@Provides
public DBI provideDBI(ObjectMapper objectMapper)
throws ClassNotFoundException
{
final DBIFactory factory = new DBIFactory();
final DBI dbi = factory.build(environment, config.getDataSourceFactory(), provideDbType().name());
dbi.registerMapper(new TableRow.TableRowMapper(objectMapper));
dbi.registerMapper(new QueryStoreMapper(objectMapper));
dbi.registerArgumentFactory(new UUIDArgumentFactory());
dbi.registerArgumentFactory(new URIArgumentFactory());
return dbi;
}
@Singleton
@Provides
public ConfigurationFactory provideConfigurationFactory()
{
return new ConfigurationFactory(Collections.<String, String>emptyMap());
}
@Named("coordinator-uri")
@Provides
public URI providePrestoCoordinatorURI()
{
return config.getPrestoCoordinator();
}
@Singleton
@Named("default-catalog")
@Provides
public String provideDefaultCatalog()
{
return config.getPrestoCatalog();
}
@Provides
@Singleton
public ClientSessionFactory provideClientSessionFactory(@Named("coordinator-uri") Provider<URI> uriProvider)
{
return new ClientSessionFactory(uriProvider,
config.getPrestoUser(),
config.getPrestoSource(),
config.getPrestoCatalog(),
config.getPrestoSchema(),
config.isPrestoDebug(),
null);
}
@Provides
public QueryRunnerFactory provideQueryRunner(ClientSessionFactory sessionFactory,
@ForQueryRunner HttpClient httpClient)
{
return new QueryRunnerFactory(sessionFactory, httpClient);
}
@Provides
public QueryInfoClient provideQueryInfoClient(@ForQueryInfoClient HttpClient httpClient)
{
return new QueryInfoClient(httpClient, jsonCodec(BasicQueryInfo.class));
}
@Singleton
@Provides
public SchemaCache provideSchemaCache(QueryRunnerFactory queryRunnerFactory,
@Named("presto") ExecutorService executorService)
{
final SchemaCache cache = new SchemaCache(queryRunnerFactory, executorService);
cache.populateCache(config.getPrestoCatalog());
return cache;
}
@Singleton
@Provides
public ColumnCache provideColumnCache(QueryRunnerFactory queryRunnerFactory,
@Named("presto") ExecutorService executorService)
{
return new ColumnCache(queryRunnerFactory,
new Duration(5, TimeUnit.MINUTES),
new Duration(60, TimeUnit.MINUTES),
executorService);
}
@Singleton
@Provides
public PreviewTableCache providePreviewTableCache(QueryRunnerFactory queryRunnerFactory,
@Named("presto") ExecutorService executorService)
{
return new PreviewTableCache(queryRunnerFactory,
new Duration(20, TimeUnit.MINUTES),
executorService,
100);
}
@Singleton
@Named("event-bus")
@Provides
public ExecutorService provideEventBusExecutorService()
{
return Executors.newCachedThreadPool(SchemaCache.daemonThreadsNamed("event-bus-%d"));
}
@Singleton
@Named("presto")
@Provides
public ExecutorService provideCompleterExecutorService()
{
return Executors.newCachedThreadPool(SchemaCache.daemonThreadsNamed("presto-%d"));
}
@Singleton
@Named("hive")
@Provides
public ScheduledExecutorService provideTableCacheUpdater()
{
return Executors.newSingleThreadScheduledExecutor();
}
@Singleton
@Named("sse")
@Provides
public ExecutorService provideSSEExecutorService()
{
return Executors.newCachedThreadPool(SchemaCache.daemonThreadsNamed("sse-%d"));
}
@Singleton
@Provides
public EventBus provideEventBus(@Named("event-bus") ExecutorService executor)
{
return new AsyncEventBus(executor);
}
@Provides
@Nullable
public AWSCredentials provideAWSCredentials()
{
if ((config.getS3AccessKey() == null) || (config.getS3SecretKey() == null)) {
return null;
}
else {
return new BasicAWSCredentials(config.getS3AccessKey(),
config.getS3SecretKey());
}
}
@Singleton
@Provides
@Nullable
public AmazonS3 provideAmazonS3Client(@Nullable AWSCredentials awsCredentials, @Nullable EncryptionMaterialsProvider encryptionMaterialsProvider)
{
if (awsCredentials == null) {
if (encryptionMaterialsProvider == null) {
return new AmazonS3Client(new InstanceProfileCredentialsProvider());
}
else {
return new AmazonS3EncryptionClient(new InstanceProfileCredentialsProvider(), encryptionMaterialsProvider);
}
}
if (encryptionMaterialsProvider == null) {
return new AmazonS3Client(awsCredentials);
}
else {
return new AmazonS3EncryptionClient(awsCredentials, encryptionMaterialsProvider);
}
}
@Nullable
@Singleton
@Provides
private EncryptionMaterialsProvider provideEncryptionMaterialsProvider()
{
String empClassName = config.getS3EncryptionMaterialsProvider();
if (empClassName != null) {
try {
Class<?> empClass = Class.forName(empClassName);
Object instance = empClass.newInstance();
if (instance instanceof EncryptionMaterialsProvider) {
return (EncryptionMaterialsProvider) instance;
}
else {
throw new IllegalArgumentException("Class " + empClassName + " must implement EncryptionMaterialsProvider");
}
}
catch (Exception x) {
throw new RuntimeException("Unable to initialize EncryptionMaterialsProvider class " + empClassName + ": " + x, x);
}
}
return null;
}
@Singleton
@Provides
public UsageStore provideUsageCache(DBI dbi)
{
UsageStore delegate = new SQLUsageStore(config.getUsageWindow(), dbi, provideDbType());
return new CachingUsageStore(delegate, io.dropwizard.util.Duration.minutes(6));
}
@Provides
public QueryStore provideQueryStore(DBI dbi)
{
return dbi.onDemand(QueryStoreDAO.class);
}
@Provides
@Singleton
public AirpalUserFactory provideAirpalUserFactory()
{
return new AirpalUserFactory(config.getPrestoSchema(), org.joda.time.Duration.standardMinutes(15), "default");
}
@Provides
@Singleton
public ActiveJobsStore provideActiveJobsStore()
{
return new InMemoryActiveJobsStore();
}
@Provides
@Singleton
public ExpiringFileStore provideExpiringFileStore()
{
return new ExpiringFileStore(new DataSize(100, DataSize.Unit.MEGABYTE));
}
@Provides
@Singleton
public CSVPersistorFactory provideCSVPersistorFactory(ExpiringFileStore fileStore, AmazonS3 s3Client, @Named("s3Bucket") String s3Bucket)
{
return new CSVPersistorFactory(config.isUseS3(), s3Client, s3Bucket, fileStore, config.isCompressedOutput());
}
@Provides
@Singleton
public PersistorFactory providePersistorFactory(CSVPersistorFactory csvPersistorFactory)
{
return new PersistorFactory(csvPersistorFactory);
}
@Provides
@Singleton
public OutputBuilderFactory provideOutputBuilderFactory()
{
long maxFileSizeInBytes = Math.round(Math.floor(config.getMaxOutputSize().getValue(DataSize.Unit.BYTE)));
return new OutputBuilderFactory(maxFileSizeInBytes, config.isCompressedOutput());
}
}
| 7,400 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/DbType.java | package com.airbnb.airpal.sql;
public enum DbType {
MySQL,
H2,
Default
}
| 7,401 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/Util.java | package com.airbnb.airpal.sql;
import com.airbnb.airpal.presto.Table;
import com.google.common.base.Joiner;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import java.util.HashSet;
import java.util.Set;
import static java.lang.String.format;
public class Util
{
private static Joiner OR_JOINER = Joiner.on(" OR ").skipNulls();
private static Joiner DOT_JOINER = Joiner.on(".").skipNulls();
public static String getTableCondition(Iterable<Table> tables)
{
return getTableCondition(null, tables);
}
public static String getTableCondition(String alias, Iterable<Table> tables)
{
Set<String> tablesUsedByQuery = new HashSet<>(Iterables.size(tables));
for (Table table : tables) {
tablesUsedByQuery.add(
format("(connector_id = '%s' AND schema_ = '%s' AND table_ = '%s')",
DOT_JOINER.join(alias, table.getConnectorId()),
DOT_JOINER.join(alias, table.getSchema()),
DOT_JOINER.join(alias, table.getTable())));
}
return OR_JOINER.join(tablesUsedByQuery);
}
public static String getQueryFinishedCondition(DbType type)
{
if (type == DbType.H2) {
return "query_finished > DATEADD('DAY', -:day_interval, CURRENT_TIMESTAMP())";
} else {
return "query_finished > DATE_SUB(UTC_TIMESTAMP(), INTERVAL :day_interval day)";
}
}
}
| 7,402 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/jdbi/UUIDArgument.java | package com.airbnb.airpal.sql.jdbi;
import org.skife.jdbi.v2.StatementContext;
import org.skife.jdbi.v2.tweak.Argument;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.UUID;
public class UUIDArgument
implements Argument
{
private final UUID uuid;
public UUIDArgument(UUID uuid)
{
this.uuid = uuid;
}
@Override
public void apply(int position, PreparedStatement statement, StatementContext ctx)
throws SQLException
{
statement.setString(position, uuid.toString());
}
}
| 7,403 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/jdbi/URIArgument.java | package com.airbnb.airpal.sql.jdbi;
import org.skife.jdbi.v2.StatementContext;
import org.skife.jdbi.v2.tweak.Argument;
import java.net.URI;
import java.sql.PreparedStatement;
import java.sql.SQLException;
public class URIArgument implements Argument
{
private final URI uri;
public URIArgument(URI uri)
{
this.uri = uri;
}
@Override
public void apply(int position, PreparedStatement statement, StatementContext ctx)
throws SQLException
{
statement.setString(position, uri.toString());
}
}
| 7,404 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/jdbi/URIArgumentFactory.java | package com.airbnb.airpal.sql.jdbi;
import org.skife.jdbi.v2.StatementContext;
import org.skife.jdbi.v2.tweak.Argument;
import org.skife.jdbi.v2.tweak.ArgumentFactory;
import java.net.URI;
public class URIArgumentFactory implements ArgumentFactory<URI>
{
@Override
public boolean accepts(Class<?> expectedType, Object value, StatementContext ctx)
{
return value instanceof URI;
}
@Override
public Argument build(Class<?> expectedType, URI value, StatementContext ctx)
{
return new URIArgument(value);
}
}
| 7,405 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/jdbi/UUIDArgumentFactory.java | package com.airbnb.airpal.sql.jdbi;
import org.skife.jdbi.v2.StatementContext;
import org.skife.jdbi.v2.tweak.Argument;
import org.skife.jdbi.v2.tweak.ArgumentFactory;
import java.util.UUID;
public class UUIDArgumentFactory implements ArgumentFactory<UUID>
{
@Override
public boolean accepts(Class<?> expectedType, Object value, StatementContext ctx)
{
return value instanceof UUID;
}
@Override
public Argument build(Class<?> expectedType, UUID value, StatementContext ctx)
{
return new UUIDArgument(value);
}
}
| 7,406 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/jdbi/QueryStoreMapper.java | package com.airbnb.airpal.sql.jdbi;
import com.airbnb.airpal.api.queries.FeaturedQuery;
import com.airbnb.airpal.api.queries.SavedQuery;
import com.airbnb.airpal.api.queries.UserSavedQuery;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.skife.jdbi.v2.StatementContext;
import org.skife.jdbi.v2.tweak.ResultSetMapper;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.UUID;
@Slf4j
public class QueryStoreMapper implements ResultSetMapper<SavedQuery>
{
private final ObjectMapper objectMapper;
public QueryStoreMapper(ObjectMapper objectMapper)
{
this.objectMapper = objectMapper;
}
@Override
public SavedQuery map(int index, ResultSet r, StatementContext ctx)
throws SQLException
{
try {
return new UserSavedQuery(
objectMapper.readValue(r.getString("query"), FeaturedQuery.QueryWithPlaceholders.class),
r.getString("user"),
r.getString("name"),
r.getString("description"),
null,
UUID.fromString(r.getString("uuid")),
false);
}
catch (IOException e) {
log.error("Caught exception mapping SavedQuery", e);
}
return null;
}
}
| 7,407 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/dao/JobTableDAO.java | package com.airbnb.airpal.sql.dao;
import com.airbnb.airpal.sql.beans.JobTableRow;
import org.skife.jdbi.v2.sqlobject.BindBean;
import org.skife.jdbi.v2.sqlobject.SqlBatch;
public interface JobTableDAO
{
@SqlBatch(
"INSERT INTO job_tables (job_id, table_id) " +
"VALUES (:jobId, :tableId)")
public void createJobTables(@BindBean Iterable<JobTableRow> jobTableRows);
} | 7,408 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/dao/TableDAO.java | package com.airbnb.airpal.sql.dao;
import com.airbnb.airpal.presto.Table;
import com.airbnb.airpal.sql.Util;
import com.airbnb.airpal.sql.beans.TableRow;
import com.hubspot.rosetta.jdbi.RosettaBinder;
import lombok.extern.slf4j.Slf4j;
import org.skife.jdbi.v2.Handle;
import org.skife.jdbi.v2.sqlobject.GetGeneratedKeys;
import org.skife.jdbi.v2.sqlobject.SqlBatch;
import org.skife.jdbi.v2.sqlobject.mixins.GetHandle;
import java.util.Collections;
import java.util.List;
import static java.lang.String.format;
@Slf4j
public abstract class TableDAO
implements GetHandle
{
@SqlBatch(
"INSERT INTO tables (connector_id, schema_, table_, columns) " +
"VALUES (:connectorId, :schema, :table, :columns)")
@GetGeneratedKeys
public abstract void createTables(@RosettaBinder Iterable<Table> tables);
public List<TableRow> getTables(List<Table> tables)
{
try (Handle handle = getHandle()) {
return handle
.createQuery(format("SELECT * FROM tables WHERE %s", Util.getTableCondition(tables)))
.mapTo(TableRow.class)
.list();
} catch (Exception e) {
log.error("getTables caught exception", e);
return Collections.emptyList();
}
}
}
| 7,409 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/dao/JobOutputDAO.java | package com.airbnb.airpal.sql.dao;
import com.airbnb.airpal.api.output.PersistentJobOutput;
import org.skife.jdbi.v2.sqlobject.Bind;
import org.skife.jdbi.v2.sqlobject.BindBean;
import org.skife.jdbi.v2.sqlobject.GetGeneratedKeys;
import org.skife.jdbi.v2.sqlobject.SqlUpdate;
public interface JobOutputDAO
{
@SqlUpdate(
"INSERT INTO job_outputs (type, description, location, job_id) " +
"VALUES (:type, :description, :location, :jobId)")
@GetGeneratedKeys
long createJobOutput(
@BindBean PersistentJobOutput output,
@Bind("jobId") long jobId
);
}
| 7,410 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/dao/JobDAO.java | package com.airbnb.airpal.sql.dao;
import com.airbnb.airpal.api.Job;
import com.hubspot.rosetta.jdbi.RosettaBinder;
import org.skife.jdbi.v2.sqlobject.GetGeneratedKeys;
import org.skife.jdbi.v2.sqlobject.SqlUpdate;
public interface JobDAO
{
@SqlUpdate(
"INSERT INTO jobs (query, user, uuid, queryStats, state, columns, query_finished, query_started, error) " +
"VALUES (" +
":query, " +
":user, " +
":uuid, " +
":queryStats, " +
":state, " +
":columns, " +
":queryFinished, " +
":queryStarted, " +
":error)")
@GetGeneratedKeys
long createJob(
@RosettaBinder Job job
);
} | 7,411 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/beans/JobUsageCountRow.java | package com.airbnb.airpal.sql.beans;
import com.airbnb.airpal.presto.Table;
import lombok.Data;
import org.skife.jdbi.v2.FoldController;
import org.skife.jdbi.v2.Folder3;
import org.skife.jdbi.v2.StatementContext;
import java.sql.SQLException;
import java.util.Map;
@Data
public class JobUsageCountRow
{
private long count;
private String connectorId;
private String schema;
private String table;
public Table toTable()
{
return new Table(getConnectorId(), getSchema(), getTable());
}
public static class CountFolder implements Folder3<Map<Table, Long>, JobUsageCountRow>
{
@Override
public Map<Table, Long> fold(Map<Table, Long> accumulator, JobUsageCountRow rs, FoldController control, StatementContext ctx)
throws SQLException
{
Table table = rs.toTable();
long currentCount = accumulator.containsKey(table) ? accumulator.get(table) : 0;
accumulator.put(table, currentCount + rs.getCount());
return accumulator;
}
}
}
| 7,412 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/beans/JobTableOutputJoinRow.java | package com.airbnb.airpal.sql.beans;
import com.airbnb.airpal.api.Job;
import com.airbnb.airpal.api.JobState;
import com.airbnb.airpal.api.output.PersistentJobOutput;
import com.airbnb.airpal.api.output.PersistentJobOutputFactory;
import com.airbnb.airpal.presto.Table;
import com.facebook.presto.client.Column;
import com.facebook.presto.client.QueryError;
import com.facebook.presto.execution.QueryStats;
import com.hubspot.rosetta.StoredAsJson;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.joda.time.DateTime;
import org.skife.jdbi.v2.FoldController;
import org.skife.jdbi.v2.Folder3;
import org.skife.jdbi.v2.StatementContext;
import java.net.URI;
import java.sql.SQLException;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.UUID;
@Slf4j
@Data
public class JobTableOutputJoinRow
{
private long id;
private String query;
private String user;
private UUID uuid;
@StoredAsJson
private QueryStats queryStats;
private JobState state;
@StoredAsJson
private List<Column> columns;
private DateTime queryFinished;
private DateTime queryStarted;
@StoredAsJson
private QueryError error;
private String connectorId;
private String schema;
private String table;
private String type;
private String description;
private URI location;
public PersistentJobOutput getJobOutput()
{
return PersistentJobOutputFactory.create(getType(), getDescription(), getLocation());
}
public static class JobFolder implements Folder3<Map<Long, Job>, JobTableOutputJoinRow>
{
@Override
public Map<Long, Job> fold(
Map<Long, Job> accumulator,
JobTableOutputJoinRow rs,
FoldController control,
StatementContext ctx)
throws SQLException
{
if (!accumulator.containsKey(rs.getId())) {
accumulator.put(
rs.getId(),
new Job(
rs.getUser(),
rs.getQuery(),
rs.getUuid(),
rs.getJobOutput(),
rs.getQueryStats(),
rs.getState(),
rs.getColumns(),
new HashSet<Table>(),
rs.getQueryStarted(),
rs.getError(),
rs.getQueryFinished()));
}
Job job = accumulator.get(rs.getId());
if (rs.getConnectorId() != null && rs.getSchema() != null && rs.getTable() != null) {
job.getTablesUsed().add(new Table(rs.getConnectorId(), rs.getSchema(), rs.getTable()));
}
return accumulator;
}
}
}
| 7,413 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/beans/TableRow.java | package com.airbnb.airpal.sql.beans;
import com.airbnb.airpal.presto.Table;
import com.facebook.presto.client.Column;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Function;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.skife.jdbi.v2.StatementContext;
import org.skife.jdbi.v2.tweak.ResultSetMapper;
import javax.annotation.Nullable;
import java.io.IOException;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.List;
import static com.google.common.base.Preconditions.checkNotNull;
@Data
@AllArgsConstructor
public class TableRow
{
private long id;
private String connectorId;
private String schema;
private String table;
private List<Column> columns;
public static MapToTable MAP_TO_TABLE = new MapToTable();
public static class MapToTable implements Function<TableRow, Table>
{
@Nullable
@Override
public Table apply(@Nullable TableRow input)
{
checkNotNull(input, "input was null");
return new Table(input.getConnectorId(), input.getSchema(), input.getTable());
}
}
@Slf4j
public static class TableRowMapper implements ResultSetMapper<TableRow>
{
private final ObjectMapper objectMapper;
private final TypeReference<List<Column>> columnTypeReference;
public TableRowMapper(ObjectMapper objectMapper)
{
this.objectMapper = objectMapper;
this.columnTypeReference = new TypeReference<List<Column>>() {};
}
@Override
public TableRow map(int index, ResultSet r, StatementContext ctx)
throws SQLException
{
try {
return new TableRow(
r.getLong("id"),
r.getString("connector_id"),
r.getString("schema_"),
r.getString("table_"),
objectMapper.<List<Column>>readValue(r.getString("columns"), columnTypeReference));
}
catch (IOException e) {
log.error("Caught exception mapping TableRow", e);
return null;
}
}
}
}
| 7,414 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql | Create_ds/airpal/src/main/java/com/airbnb/airpal/sql/beans/JobTableRow.java | package com.airbnb.airpal.sql.beans;
import lombok.AllArgsConstructor;
import lombok.Data;
@Data
@AllArgsConstructor
public class JobTableRow
{
private long id;
private long jobId;
private long tableId;
}
| 7,415 |
0 | Create_ds/airpal/src/main/java/com/airbnb/airlift/http | Create_ds/airpal/src/main/java/com/airbnb/airlift/http/client/ForQueryInfoClient.java | package com.airbnb.airlift.http.client;
import javax.inject.Qualifier;
import java.lang.annotation.Retention;
import java.lang.annotation.Target;
import static java.lang.annotation.ElementType.FIELD;
import static java.lang.annotation.ElementType.METHOD;
import static java.lang.annotation.ElementType.PARAMETER;
import static java.lang.annotation.RetentionPolicy.RUNTIME;
@Retention(RUNTIME)
@Target({FIELD, PARAMETER, METHOD})
@Qualifier
public @interface ForQueryInfoClient
{
} | 7,416 |
0 | Create_ds/airpal/src/main/java/com/airbnb | Create_ds/airpal/src/main/java/com/airbnb/shiro/AllowAllUser.java | package com.airbnb.shiro;
import com.airbnb.airpal.core.AirpalUser;
import lombok.Data;
import org.joda.time.Duration;
import java.io.Serializable;
import java.util.Set;
@Data
public class AllowAllUser
implements AirpalUser, Serializable
{
private static final long serialVersionUID = 2138145047434723791L;
private final String userName;
private final Set<String> groups;
private final String defaultSchema;
private final Duration queryTimeout;
private final String accessLevel;
@Override
public boolean isPermitted(String permission)
{
return true;
}
} | 7,417 |
0 | Create_ds/airpal/src/main/java/com/airbnb | Create_ds/airpal/src/main/java/com/airbnb/shiro/AllowAllToken.java | package com.airbnb.shiro;
import com.google.common.base.Charsets;
import com.google.common.collect.Sets;
import com.google.common.hash.Funnel;
import com.google.common.hash.Hashing;
import com.google.common.hash.PrimitiveSink;
import lombok.Getter;
import org.apache.shiro.authc.HostAuthenticationToken;
import org.apache.shiro.authc.RememberMeAuthenticationToken;
import org.joda.time.Duration;
import java.util.Set;
public class AllowAllToken
implements RememberMeAuthenticationToken, HostAuthenticationToken
{
private final boolean rememberMe;
private final String host;
@Getter
private final String userName;
@Getter
private final Set<String> groups;
@Getter
private final String defaultSchema;
@Getter
private final Duration queryTimeout;
@Getter
private final String accessLevel;
public AllowAllToken(String host,
boolean rememberMe,
String userName,
Iterable<String> groups,
String defaultSchema,
Duration queryTimeout,
String accessLevel)
{
this.host = host;
this.rememberMe = rememberMe;
this.userName = userName;
this.groups = Sets.newHashSet(groups);
this.defaultSchema = defaultSchema;
this.queryTimeout = queryTimeout;
this.accessLevel = accessLevel;
}
@Override
public String getHost()
{
return host;
}
@Override
public boolean isRememberMe()
{
return rememberMe;
}
@Override
public Object getPrincipal()
{
return new AllowAllUser(getUserName(), getGroups(), getDefaultSchema(), getQueryTimeout(), getAccessLevel());
}
@Override
public Object getCredentials()
{
AllowAllUser user = (AllowAllUser) getPrincipal();
if (user != null) {
return Hashing.sha256().hashObject(user, new Funnel<AllowAllUser>()
{
@Override
public void funnel(AllowAllUser from, PrimitiveSink into)
{
Set<String> fromGroups = from.getGroups();
String fromName = from.getUserName();
into.putString(fromName, Charsets.UTF_8);
for (String fromGroup : fromGroups) {
into.putString(fromGroup, Charsets.UTF_8);
}
}
});
}
return null;
}
}
| 7,418 |
0 | Create_ds/airpal/src/main/java/com/airbnb | Create_ds/airpal/src/main/java/com/airbnb/shiro/AllowAllRealm.java | package com.airbnb.shiro;
import com.google.common.base.Strings;
import com.google.common.collect.Sets;
import lombok.Setter;
import org.apache.shiro.authc.AuthenticationException;
import org.apache.shiro.authc.AuthenticationInfo;
import org.apache.shiro.authc.AuthenticationToken;
import org.apache.shiro.authc.SimpleAuthenticationInfo;
import org.apache.shiro.authz.AuthorizationException;
import org.apache.shiro.authz.AuthorizationInfo;
import org.apache.shiro.authz.Permission;
import org.apache.shiro.authz.SimpleAuthorizationInfo;
import org.apache.shiro.realm.AuthorizingRealm;
import org.apache.shiro.subject.PrincipalCollection;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
public class AllowAllRealm
extends AuthorizingRealm
{
private static String REALM_NAME = AllowAllRealm.class.getSimpleName();
@Setter
private List<UserGroup> groups = Collections.emptyList();
@Override
public void setName(String name)
{}
@Override
public String getName()
{
return REALM_NAME;
}
@Override
public boolean supports(AuthenticationToken token)
{
return (token instanceof AllowAllToken);
}
@Override
protected AuthorizationInfo doGetAuthorizationInfo(PrincipalCollection principals)
{
Set<String> roles = Sets.newHashSet("user");
Set<Permission> permissions = Sets.newHashSet();
Collection<AllowAllUser> principalsCollection = principals.byType(AllowAllUser.class);
if (principalsCollection.isEmpty()) {
throw new AuthorizationException("No principals!");
}
for (AllowAllUser user : principalsCollection) {
for (UserGroup userGroup : groups) {
if (userGroup.representedByGroupStrings(user.getGroups())) {
permissions.addAll(userGroup.getPermissions());
break;
}
}
}
SimpleAuthorizationInfo authorizationInfo = new SimpleAuthorizationInfo(roles);
authorizationInfo.setObjectPermissions(permissions);
return authorizationInfo;
}
@Override
protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException
{
if (!(token instanceof AllowAllToken)) {
throw new AuthenticationException("Incorrect token provided");
}
AllowAllToken authToken = (AllowAllToken) token;
if (Strings.isNullOrEmpty(authToken.getUserName())) {
throw new AuthenticationException("No valid username");
} else if ((authToken.getGroups() == null) || authToken.getGroups().isEmpty()) {
throw new AuthenticationException("No valid groups");
}
return new SimpleAuthenticationInfo(authToken.getPrincipal(),
authToken.getCredentials(),
REALM_NAME);
}
}
| 7,419 |
0 | Create_ds/airpal/src/main/java/com/airbnb | Create_ds/airpal/src/main/java/com/airbnb/shiro/UserGroup.java | package com.airbnb.shiro;
import com.google.common.collect.ImmutableSet;
import lombok.Getter;
import lombok.Setter;
import org.apache.shiro.authz.Permission;
import org.apache.shiro.authz.permission.WildcardPermission;
import org.joda.time.Duration;
import java.util.Collection;
import java.util.Collections;
import java.util.Set;
public class UserGroup
{
@Getter @Setter
private String defaultConnector;
@Getter @Setter
private String defaultSchema;
@Getter
private Set<Permission> permissions = Collections.emptySet();
@Setter
private Set<String> groups = Collections.emptySet();
@Setter
private String timeout;
private Duration queryTimeout;
@Getter @Setter
private String accessLevel;
public boolean representedByGroupStrings(Collection<String> groups)
{
for (String group : groups) {
if (this.representedByGroupString(group)) {
return true;
}
}
return false;
}
public boolean representedByGroupString(String group)
{
return this.groups.contains(group);
}
public void setPermissions(Set<String> permissions)
{
ImmutableSet.Builder<Permission> builder = ImmutableSet.builder();
for (String permission : permissions) {
builder.add(new WildcardPermission(permission));
}
this.permissions = builder.build();
}
public Duration getQueryTimeout()
{
if (queryTimeout == null) {
io.dropwizard.util.Duration duration = io.dropwizard.util.Duration.parse(timeout);
queryTimeout = Duration.millis(duration.toMilliseconds());
}
return queryTimeout;
}
}
| 7,420 |
0 | Create_ds/airpal/src/main/java/com/airbnb | Create_ds/airpal/src/main/java/com/airbnb/shiro/ExampleLDAPRealm.java | package com.airbnb.shiro;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Sets;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.shiro.authc.SimpleAuthenticationInfo;
import org.apache.shiro.authc.AuthenticationToken;
import org.apache.shiro.authc.AuthenticationInfo;
import org.apache.shiro.authc.UsernamePasswordToken;
import org.apache.shiro.authc.AuthenticationException;
import org.apache.shiro.authz.AuthorizationException;
import org.apache.shiro.authz.AuthorizationInfo;
import org.apache.shiro.authz.Permission;
import org.apache.shiro.authz.SimpleAuthorizationInfo;
import org.apache.shiro.ldap.UnsupportedAuthenticationMechanismException;
import org.apache.shiro.realm.ldap.JndiLdapContextFactory;
import org.apache.shiro.realm.ldap.JndiLdapRealm;
import org.apache.shiro.subject.PrincipalCollection;
import org.joda.time.Duration;
import javax.naming.AuthenticationNotSupportedException;
import javax.naming.NamingException;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Set;
@Slf4j
public class ExampleLDAPRealm extends JndiLdapRealm
{
@Setter
private List<UserGroup> groups = Collections.emptyList();
private JndiLdapContextFactory lcf;
private static String REALM_NAME = ExampleLDAPRealm.class.getSimpleName();
public ExampleLDAPRealm()
{
this.setUserDnTemplate("Default DN template");
lcf = new JndiLdapContextFactory();
lcf.setSystemUsername("System username that will be used when creating an LDAP connection");
lcf.setSystemPassword("System Password for the username used to create LDAP connection ");
lcf.setAuthenticationMechanism("simple");
lcf.setUrl("LDAP URL");
this.setContextFactory(lcf);
}
@Override
public void setName(String name)
{}
@Override
public String getName()
{
return REALM_NAME;
}
@Override
public boolean supports(AuthenticationToken token)
{
return (token instanceof UsernamePasswordToken);
}
@Override
protected AuthorizationInfo doGetAuthorizationInfo(PrincipalCollection principals)
{
Set<String> roles = Sets.newHashSet("user");
Set<Permission> permissions = Sets.newHashSet();
Collection<AllowAllUser> principalsCollection = principals.byType(AllowAllUser.class);
if (principalsCollection.isEmpty()) {
throw new AuthorizationException("No principals!");
}
for (AllowAllUser user : principalsCollection) {
for (UserGroup userGroup : groups) {
if (userGroup.representedByGroupStrings(user.getGroups())) {
permissions.addAll(userGroup.getPermissions());
break;
}
}
}
SimpleAuthorizationInfo authorizationInfo = new SimpleAuthorizationInfo(roles);
authorizationInfo.setObjectPermissions(permissions);
return authorizationInfo;
}
@Override
protected AuthenticationInfo doGetAuthenticationInfo(AuthenticationToken token) throws AuthenticationException
{
UsernamePasswordToken usernamePasswordToken = (UsernamePasswordToken) token;
try {
queryForAuthenticationInfo(usernamePasswordToken, getContextFactory());
} catch (AuthenticationNotSupportedException e) {
throw new UnsupportedAuthenticationMechanismException("Unsupported configured authentication mechanism", e);
} catch (javax.naming.AuthenticationException e) {
throw new AuthenticationException("LDAP authentication failed.", e);
} catch (NamingException e) {
throw new AuthenticationException("LDAP naming error while attempting to authenticate user.", e);
}
return new SimpleAuthenticationInfo(token.getPrincipal(),
token.getCredentials(),
ExampleLDAPRealm.class.getSimpleName());
}
}
| 7,421 |
0 | Create_ds/airpal/src/main/java/com/airbnb | Create_ds/airpal/src/main/java/com/airbnb/shiro/SessionListenerLogger.java | package com.airbnb.shiro;
import lombok.extern.slf4j.Slf4j;
import org.apache.shiro.session.Session;
import org.apache.shiro.session.SessionListenerAdapter;
@Slf4j
public class SessionListenerLogger extends SessionListenerAdapter
{
@Override
public void onStart(Session session)
{
log.warn("Saw START of Session: [{}]", session.toString());
}
@Override
public void onStop(Session session)
{
log.warn("Saw STOP of Session: [{}]", session.toString());
}
@Override
public void onExpiration(Session session)
{
log.warn("Saw EXPIRATION of Session: [{}]", session.toString());
}
}
| 7,422 |
0 | Create_ds/airpal/src/main/java/com/airbnb/shiro | Create_ds/airpal/src/main/java/com/airbnb/shiro/filter/AllowAllFilter.java | package com.airbnb.shiro.filter;
import com.airbnb.shiro.AllowAllToken;
import com.airbnb.shiro.UserGroup;
import com.google.common.collect.ImmutableSet;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.shiro.SecurityUtils;
import org.apache.shiro.authc.AuthenticationToken;
import org.apache.shiro.session.Session;
import org.apache.shiro.web.filter.authc.AuthenticatingFilter;
import org.apache.shiro.web.util.WebUtils;
import org.joda.time.Duration;
import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.net.URI;
import java.util.Collections;
import java.util.List;
@Slf4j
public class AllowAllFilter
extends AuthenticatingFilter
{
public static final String JSESSIONID = "JSESSIONID";
@Setter
private List<UserGroup> groups = Collections.emptyList();
public AllowAllFilter() {}
@Override
protected AuthenticationToken createToken(ServletRequest request, ServletResponse response) throws Exception
{
log.info("createToken called");
return new AllowAllToken(request.getRemoteHost(), true, "anonymous", ImmutableSet.of("all"), "default", Duration.standardHours(1), "default");
}
@Override
protected boolean executeLogin(ServletRequest request, ServletResponse response)
throws Exception
{
return super.executeLogin(request, response);
}
@Override
protected boolean onAccessDenied(ServletRequest request, ServletResponse response) throws Exception
{
log.info("onAccessDenied called");
return executeLogin(request, response);
}
private boolean userIsLoggedIn()
{
Session session = SecurityUtils.getSubject().getSession(false);
return (session != null);
}
private void redirectToInternalLogin(ServletRequest request, ServletResponse response) throws IOException
{
Cookie sessionCookie = new Cookie(JSESSIONID, "");
sessionCookie.setMaxAge(0);
HttpServletResponse httpResponse = WebUtils.toHttp(response);
httpResponse.addCookie(sessionCookie);
WebUtils.issueRedirect(request,
response,
getLoginUrl(),
Collections.emptyMap(),
!(URI.create(getLoginUrl()).isAbsolute()));
}
}
| 7,423 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/Csv2RdfConfigTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Iterator;
import java.util.regex.Pattern;
import org.junit.jupiter.api.Test;
public class Csv2RdfConfigTest {
@Test
public void defaultValues() {
PropertyGraph2RdfConverter converter = new PropertyGraph2RdfConverter(null);
assertEquals("csv", converter.getInputFileExtension());
assertTrue(converter.getMapper().isAlwaysAddPropertyStatements());
assertNotNull(converter.getMapper().getMapping());
assertNotNull(converter.getTransformer().getUriPostTransformations());
}
@Test
public void allConfigurationFields() throws Exception {
Path config = Paths.get("src", "test", "configurationTests", "complete.properties");
PropertyGraph2RdfConverter converter = new PropertyGraph2RdfConverter(config.toFile());
assertEquals("ife", converter.getInputFileExtension());
assertEquals("tn", converter.getMapper().getMapping().getTypeNamespace());
assertEquals("vn", converter.getMapper().getMapping().getVertexNamespace());
assertEquals("en", converter.getMapper().getMapping().getEdgeNamespace());
assertEquals("ec", converter.getMapper().getMapping().getEdgeContextNamespace());
assertEquals("vpn", converter.getMapper().getMapping().getVertexPropertyNamespace());
assertEquals("epn", converter.getMapper().getMapping().getEdgePropertyNamespace());
assertEquals("dng:a", converter.getMapper().getMapping().getDefaultNamedGraph().toString());
assertEquals("dt:a", converter.getMapper().getMapping().getDefaultType().toString());
assertEquals("dp:a", converter.getMapper().getMapping().getDefaultPredicate().toString());
assertEquals(false, converter.getMapper().isAlwaysAddPropertyStatements());
assertEquals(2, converter.getMapper().getMapping().getPgVertexType2PropertyForRdfsLabel().size());
assertEquals("A", converter.getMapper().getMapping().getPgVertexType2PropertyForRdfsLabel().get("a"));
assertEquals("B", converter.getMapper().getMapping().getPgVertexType2PropertyForRdfsLabel().get("b"));
assertEquals(1, converter.getMapper().getMapping().getPgProperty2RdfResourcePattern().size());
assertEquals("A{{VALUE}}", converter.getMapper().getMapping().getPgProperty2RdfResourcePattern().get("a"));
assertEquals(4, converter.getTransformer().getUriPostTransformations().size());
Iterator<UriPostTransformation> iterator = converter.getTransformer().getUriPostTransformations().iterator();
assertEquals(new UriPostTransformation("sp0", "tu0", "vp0", "dp0{{VALUE}}"), iterator.next());
assertEquals(new UriPostTransformation("sp1", "tu1", "vp1", "dp1{{VALUE}}"), iterator.next());
assertEquals(new UriPostTransformation("sp2", "tu2", "vp2", "dp2{{VALUE}}"), iterator.next());
assertEquals(new UriPostTransformation("sp6", "tu6", "vp6", "dp6{{VALUE}}"), iterator.next());
}
@Test
public void duplicateIntOverridesFirst() throws Exception {
Path config = Paths.get("src", "test", "configurationTests", "duplicate-int.properties");
PropertyGraph2RdfConverter converter = new PropertyGraph2RdfConverter(config.toFile());
assertEquals(2, converter.getTransformer().getUriPostTransformations().size());
Iterator<UriPostTransformation> iterator = converter.getTransformer().getUriPostTransformations().iterator();
assertEquals(new UriPostTransformation("sp2", "tu2", "vp2", "dp2{{VALUE}}"), iterator.next());
assertEquals(new UriPostTransformation("sp4", "tu3", "vp3", "dp3{{VALUE}}"), iterator.next());
}
@Test
public void missingRequiredPropertyFails() throws Exception {
Path config = Paths.get("src", "test", "configurationTests", "missing-property.properties");
Exception exception = assertThrows(Csv2RdfException.class,
() -> new PropertyGraph2RdfConverter(config.toFile()));
assertEquals(
"Loading configuration failed because of invalid input at srcPattern: Missing required creator property 'srcPattern' (index 0)",
exception.getMessage());
}
@Test
public void unknownPropertyFails() throws Exception {
Path config = Paths.get("src", "test", "configurationTests", "unknown-property.properties");
Exception exception = assertThrows(Csv2RdfException.class,
() -> new PropertyGraph2RdfConverter(config.toFile()));
assertEquals("Loading configuration failed because of unknown property: _anything_", exception.getMessage());
}
@Test
public void outputFileExtensionDisallowed() throws Exception {
Path config = Paths.get("src", "test", "configurationTests", "output-file-extension.properties");
Exception exception = assertThrows(Csv2RdfException.class,
() -> new PropertyGraph2RdfConverter(config.toFile()));
assertEquals("Loading configuration failed because of unknown property: outputFileExtension",
exception.getMessage());
}
@Test
public void unknownNestedPropertyFails() throws Exception {
Path config = Paths.get("src", "test", "configurationTests", "unknown-nested-property.properties");
Exception exception = assertThrows(Csv2RdfException.class,
() -> new PropertyGraph2RdfConverter(config.toFile()));
assertEquals("Loading configuration failed because of unknown property: prop", exception.getMessage());
}
@Test
public void invalidTransformationDestinationPatternFails() throws Exception {
Path config = Paths.get("src", "test", "configurationTests", "invalid-pattern.properties");
Exception exception = assertThrows(Csv2RdfException.class,
() -> new PropertyGraph2RdfConverter(config.toFile()));
assertEquals(
"Loading configuration failed because of invalid input at uriPostTransformations: The pattern <dp6> for the new URI must contain the replacement variable {{VALUE}}.",
exception.getMessage());
}
@Test
public void invalidTransformationSourceRegexFails() throws Exception {
Path config = Paths.get("src", "test", "configurationTests", "invalid-regex.properties");
Exception exception = assertThrows(Csv2RdfException.class,
() -> new PropertyGraph2RdfConverter(config.toFile()));
assertTrue(
exception.getMessage().startsWith("Loading configuration failed because of invalid input at uriPostTransformations:")
&& exception.getMessage().contains("{sp6}/resource/(\\d+)"),
"{sp6}/resource/(\\d+) is an invalid regex and should have caused an exception when parsed");
}
@Test
public void invalidDefaultNamedGraphIsRejected() throws Exception {
Path config = Paths.get("src", "test", "configurationTests", "invalid-defaultNamedGraph.properties");
Exception exception = assertThrows(Csv2RdfException.class,
() -> new PropertyGraph2RdfConverter(config.toFile()));
assertEquals(
"Loading configuration failed because of invalid input at defaultNamedGraph: Not a valid (absolute) IRI: dng",
exception.getMessage());
}
@Test
public void invalidDefaulTypeIsRejected() throws Exception {
Path config = Paths.get("src", "test", "configurationTests", "invalid-defaultType.properties");
Exception exception = assertThrows(Csv2RdfException.class,
() -> new PropertyGraph2RdfConverter(config.toFile()));
assertEquals(
"Loading configuration failed because of invalid input at defaultType: Not a valid (absolute) IRI: dt",
exception.getMessage());
}
@Test
public void invalidDefaultPredicateIsRejected() throws Exception {
Path config = Paths.get("src", "test", "configurationTests", "invalid-defaultPredicate.properties");
Exception exception = assertThrows(Csv2RdfException.class,
() -> new PropertyGraph2RdfConverter(config.toFile()));
assertEquals(
"Loading configuration failed because of invalid input at defaultPredicate: Not a valid (absolute) IRI: dp",
exception.getMessage());
}
@Test
public void missingConfigurationFileFails() throws Exception {
File nonExistingProperties = Paths.get("target", "non-existing.properties").toFile();
Exception exception = assertThrows(Csv2RdfException.class,
() -> new PropertyGraph2RdfConverter(nonExistingProperties));
assertTrue(exception.getMessage()
.matches("Configuration file not found: .*" + Pattern.quote(nonExistingProperties.getPath())));
}
}
| 7,424 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/NeptunePropertyGraphElementTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.io.StringReader;
import java.util.Iterator;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVRecord;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import software.amazon.neptune.csv2rdf.NeptuneCsvUserDefinedColumn.DataType;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptuneCsvSetValuedUserDefinedProperty;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptuneCsvSingleValuedUserDefinedProperty;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptuneCsvUserDefinedArrayProperty;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptuneCsvUserDefinedProperty;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptunePropertyGraphEdge;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptunePropertyGraphVertex;
public class NeptunePropertyGraphElementTest {
private CSVFormat csvFormat;
@BeforeEach
public void init() {
csvFormat = NeptuneCsvInputParser.createCSVFormat();
}
@Test
public void vertex() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,relation")).iterator().next();
NeptuneCsvHeader header = NeptuneCsvHeader.parse(record);
CSVRecord data = csvFormat.parse(new StringReader("1,name,next")).iterator().next();
NeptunePropertyGraphVertex vertex = (NeptunePropertyGraphVertex) NeptuneCsvInputParser.create(header, data);
assertEquals("1", vertex.getId());
assertEquals(1, vertex.getLabels().size());
assertEquals("name", vertex.getLabels().get(0));
assertEquals(1, vertex.getUserDefinedProperties().size());
NeptuneCsvUserDefinedProperty columnValue = vertex.getUserDefinedProperties().get(0);
assertEquals("relation", columnValue.getName());
assertEquals(1, columnValue.getValues().size());
assertEquals("next", columnValue.getValues().iterator().next());
}
@Test
public void vertexWithArrayProperty() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,relation:string[],costs:int(set)[]")).iterator()
.next();
NeptuneCsvHeader header = NeptuneCsvHeader.parse(record);
CSVRecord data = csvFormat.parse(new StringReader("1,name,next;prev,30;45;93")).iterator().next();
NeptunePropertyGraphVertex vertex = (NeptunePropertyGraphVertex) NeptuneCsvInputParser.create(header, data);
assertEquals("1", vertex.getId());
assertEquals(1, vertex.getLabels().size());
assertEquals("name", vertex.getLabels().get(0));
assertEquals(2, vertex.getUserDefinedProperties().size());
NeptuneCsvUserDefinedProperty relation = vertex.getUserDefinedProperties().get(0);
assertEquals("relation", relation.getName());
assertEquals(2, relation.getValues().size());
Iterator<String> ri = relation.getValues().iterator();
assertEquals("next", ri.next());
assertEquals("prev", ri.next());
NeptuneCsvUserDefinedProperty costs = vertex.getUserDefinedProperties().get(1);
assertEquals("costs", costs.getName());
assertEquals(3, costs.getValues().size());
Iterator<String> ci = costs.getValues().iterator();
assertEquals("30", ci.next());
assertEquals("45", ci.next());
assertEquals("93", ci.next());
}
@Test
public void missingVertexIdIsRejected() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label")).iterator().next();
NeptuneCsvHeader header = NeptuneCsvHeader.parse(record);
CSVRecord data = csvFormat.parse(new StringReader(",name")).iterator().next();
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvInputParser.create(header, data));
assertEquals("Vertex or edge ID must not be null or empty.", exception.getMessage());
}
@Test
public void edge() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,~from,~to,relation")).iterator().next();
NeptuneCsvHeader header = NeptuneCsvHeader.parse(record);
CSVRecord data = csvFormat.parse(new StringReader("1,name,2,3,next")).iterator().next();
NeptunePropertyGraphEdge edge = (NeptunePropertyGraphEdge) NeptuneCsvInputParser.create(header, data);
assertEquals("1", edge.getId());
assertEquals("2", edge.getFrom());
assertEquals("3", edge.getTo());
assertTrue(edge.hasLabel());
assertEquals("name", edge.getLabel());
assertEquals(1, edge.getUserDefinedProperties().size());
NeptunePropertyGraphElement.NeptuneCsvSingleValuedUserDefinedProperty columnValue = edge
.getUserDefinedProperties().get(0);
assertEquals("relation", columnValue.getName());
assertEquals("next", columnValue.getValue());
}
@Test
public void missingEdgeIdIsRejected() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,~from,~to")).iterator().next();
NeptuneCsvHeader header = NeptuneCsvHeader.parse(record);
CSVRecord data = csvFormat.parse(new StringReader(",name,2,3")).iterator().next();
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvInputParser.create(header, data));
assertEquals("Vertex or edge ID must not be null or empty.", exception.getMessage());
}
@Test
public void missingFromIsRejected() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,~from,~to")).iterator().next();
NeptuneCsvHeader header = NeptuneCsvHeader.parse(record);
CSVRecord data = csvFormat.parse(new StringReader("1,name,,3")).iterator().next();
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvInputParser.create(header, data));
assertEquals("Value for ~from is missing at edge 1.", exception.getMessage());
}
@Test
public void missingToISRejected() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,~from,~to")).iterator().next();
NeptuneCsvHeader header = NeptuneCsvHeader.parse(record);
CSVRecord data = csvFormat.parse(new StringReader("1,name,2,")).iterator().next();
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvInputParser.create(header, data));
assertEquals("Value for ~to is missing at edge 1.", exception.getMessage());
}
@Test
public void vertexLabelIsOptional() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label")).iterator().next();
NeptuneCsvHeader header = NeptuneCsvHeader.parse(record);
CSVRecord data = csvFormat.parse(new StringReader("1,")).iterator().next();
NeptunePropertyGraphVertex vertex = (NeptunePropertyGraphVertex) NeptuneCsvInputParser.create(header, data);
assertEquals("1", vertex.getId());
assertTrue(vertex.getLabels().isEmpty());
assertTrue(vertex.getUserDefinedProperties().isEmpty());
}
@Test
public void edgeLabelIsOptional() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,~from,~to")).iterator().next();
NeptuneCsvHeader header = NeptuneCsvHeader.parse(record);
CSVRecord data = csvFormat.parse(new StringReader("1,,2,3")).iterator().next();
NeptunePropertyGraphEdge edge = (NeptunePropertyGraphEdge) NeptuneCsvInputParser.create(header, data);
assertEquals("1", edge.getId());
assertEquals("2", edge.getFrom());
assertEquals("3", edge.getTo());
assertFalse(edge.hasLabel());
assertTrue(edge.getUserDefinedProperties().isEmpty());
}
@Test
public void vertexRejectsNullLabels() {
NeptunePropertyGraphVertex vertex = new NeptunePropertyGraphVertex("test");
String label = null;
Exception exception = assertThrows(Csv2RdfException.class, () -> vertex.add(label));
assertEquals("Vertex labels must not be null or empty.", exception.getMessage());
}
@Test
public void vertexRejectsEmptyLabels() {
NeptunePropertyGraphVertex vertex = new NeptunePropertyGraphVertex("test");
String label = "";
Exception exception = assertThrows(Csv2RdfException.class, () -> vertex.add(label));
assertEquals("Vertex labels must not be null or empty.", exception.getMessage());
}
@Test
public void arrayIgnoresNullAndEmptyValues() {
assertTrue(new NeptuneCsvUserDefinedArrayProperty("test", DataType.INT, "").getValues().isEmpty());
NeptuneCsvUserDefinedArrayProperty multivaluedProperty = new NeptuneCsvUserDefinedArrayProperty("test",
DataType.INT, ";a;b;;c;");
assertEquals(3, multivaluedProperty.getValues().size());
Iterator<String> it = multivaluedProperty.getValues().iterator();
assertEquals("a", it.next());
assertEquals("b", it.next());
assertEquals("c", it.next());
}
@Test
public void setValuedPropertyHasUniqueValues() {
NeptuneCsvSetValuedUserDefinedProperty property = new NeptuneCsvSetValuedUserDefinedProperty("test",
DataType.INT, "1");
property.add("3");
property.add("3");
property.add("4");
property.add("2");
property.add("4");
assertEquals(4, property.getValues().size());
Iterator<String> it = property.getValues().iterator();
assertEquals("1", it.next());
assertEquals("3", it.next());
assertEquals("4", it.next());
assertEquals("2", it.next());
}
@Test
public void arrayPropertySplitsMultipleValues() {
NeptuneCsvUserDefinedArrayProperty property = new NeptuneCsvUserDefinedArrayProperty("test", DataType.INT,
"1;2");
property.add("3;4");
assertEquals(4, property.getValues().size());
Iterator<String> it = property.getValues().iterator();
assertEquals("1", it.next());
assertEquals("2", it.next());
assertEquals("3", it.next());
assertEquals("4", it.next());
}
@Test
public void arrayPropertyAddsSingleValues() {
NeptuneCsvUserDefinedArrayProperty property = new NeptuneCsvUserDefinedArrayProperty("test", DataType.INT, "1");
property.add("2");
property.add("1");
assertEquals(2, property.getValues().size());
Iterator<String> it = property.getValues().iterator();
assertEquals("1", it.next());
assertEquals("2", it.next());
}
@Test
public void singleValuedPropertyHasOneValue() {
NeptuneCsvSingleValuedUserDefinedProperty property = new NeptuneCsvSingleValuedUserDefinedProperty("test",
DataType.INT, "1");
assertEquals("1", property.getValue());
assertEquals(1, property.getValues().size());
Iterator<String> it = property.getValues().iterator();
assertEquals("1", it.next());
}
}
| 7,425 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/NeptuneCsvHeaderTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.io.StringReader;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVRecord;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import software.amazon.neptune.csv2rdf.NeptuneCsvHeader.NeptuneCsvEdgeHeader;
import software.amazon.neptune.csv2rdf.NeptuneCsvHeader.NeptuneCsvVertexHeader;
import software.amazon.neptune.csv2rdf.NeptuneCsvUserDefinedColumn.DataType;
public class NeptuneCsvHeaderTest {
private CSVFormat csvFormat;
@BeforeEach
public void init() {
csvFormat = NeptuneCsvInputParser.createCSVFormat();
}
@Test
public void invalidColumnIsRejected() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id:person")).iterator().next();
Csv2RdfException exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Invalid system column encountered: ~id:person", exception.getMessage());
}
@Test
public void nullColumnIsRejected() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,,~label")).iterator().next();
Csv2RdfException exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Empty column header encountered.", exception.getMessage());
}
@Test
public void parseVertexHeaderIsSucsessful() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("code:, ~iD , :name;~id:ByTE")).iterator().next();
NeptuneCsvHeader header = NeptuneCsvHeader.parse(record);
assertNull(header.getLabel());
assertTrue(header instanceof NeptuneCsvVertexHeader);
assertEquals(2, header.getUserDefinedTypes().size());
NeptuneCsvUserDefinedColumn code = header.getUserDefinedTypes().get(0);
assertEquals("code:", code.getName());
assertEquals(DataType.STRING, code.getDataType());
assertEquals(1, header.getId());
NeptuneCsvUserDefinedColumn name = header.getUserDefinedTypes().get(1);
assertEquals(":name;~id", name.getName());
assertEquals(DataType.BYTE, name.getDataType());
}
@Test
public void vertexHeaderWithEmptyColumnIsRejected() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,,name")).iterator().next();
Exception exception = Assertions.assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
Assertions.assertEquals("Empty column header encountered.", exception.getMessage());
CSVRecord record2 = csvFormat.parse(new StringReader("~id,~label, ,name")).iterator().next();
Exception exception2 = Assertions.assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record2));
Assertions.assertEquals("Empty column header encountered.", exception2.getMessage());
}
@Test
public void fromAndToMustBothBePresent() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,~from,name")).iterator().next();
Exception exception = Assertions.assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
Assertions.assertEquals("An edge requires a ~to field.", exception.getMessage());
CSVRecord record2 = csvFormat.parse(new StringReader("~id,~label,~to,name")).iterator().next();
Exception exception2 = Assertions.assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record2));
Assertions.assertEquals("An edge requires a ~from field.", exception2.getMessage());
}
@Test
public void parseEdgeHeaderIsSucsessful() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("code:int, ~Label , ~id,~to, name:String, ~from"))
.iterator().next();
NeptuneCsvHeader header = NeptuneCsvHeader.parse(record);
assertTrue(header instanceof NeptuneCsvEdgeHeader);
assertEquals(2, header.getUserDefinedTypes().size());
NeptuneCsvUserDefinedColumn code = header.getUserDefinedTypes().get(0);
assertEquals("code", code.getName());
assertEquals(DataType.INT, code.getDataType());
assertEquals(2, header.getId());
assertEquals(1, header.getLabel());
assertEquals(3, ((NeptuneCsvEdgeHeader) header).getTo());
NeptuneCsvUserDefinedColumn name = header.getUserDefinedTypes().get(1);
assertEquals("name", name.getName());
assertEquals(DataType.STRING, name.getDataType());
assertEquals(5, ((NeptuneCsvEdgeHeader) header).getFrom());
}
@Test
public void edgeHeaderWithEmptyColumnIsRejected() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,~from,~to,,name")).iterator().next();
Exception exception = Assertions.assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
Assertions.assertEquals("Empty column header encountered.", exception.getMessage());
CSVRecord record2 = csvFormat.parse(new StringReader("~id,~label,~from,~to, ,name")).iterator().next();
Exception exception2 = Assertions.assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record2));
Assertions.assertEquals("Empty column header encountered.", exception2.getMessage());
}
@Test
public void multipleIdNotAllowed() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,~id,name")).iterator().next();
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Found duplicate field: ~id", exception.getMessage());
}
@Test
public void multipleFromNotAllowed() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,~from,name,~from,~to")).iterator().next();
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Found duplicate field: ~from", exception.getMessage());
}
@Test
public void multipleToNotAllowed() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,~from,name,~to,~to")).iterator().next();
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Found duplicate field: ~to", exception.getMessage());
}
@Test
public void multipleVertexLabelNotAllowed() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,name,~label")).iterator().next();
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Found duplicate field: ~label", exception.getMessage());
}
@Test
public void multipleEdgeLabelNotAllowed() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,name,~label,~from,~to")).iterator().next();
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Found duplicate field: ~label", exception.getMessage());
}
@Test
public void duplicateVertexSimpleFieldsNotAllowed() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,name,code,name")).iterator().next();
Csv2RdfException exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Found duplicate field: name", exception.getMessage());
}
@Test
public void duplicateVertexTypedFieldsNotAllowed() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,name:string,code,name")).iterator().next();
Csv2RdfException exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Found duplicate field: name", exception.getMessage());
}
@Test
public void duplicateEdgeSimpleFieldsNotAllowed() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,name,code,name,~from,~to")).iterator().next();
Csv2RdfException exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Found duplicate field: name", exception.getMessage());
}
@Test
public void duplicateEdgeTypedFieldsNotAllowed() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,name:string,code,name:byte,~from,~to"))
.iterator().next();
Csv2RdfException exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Found duplicate field: name:byte", exception.getMessage());
}
@Test
public void arrayTypesNotAllowedForEdges() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,~from,~to,name:string[]")).iterator().next();
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Array types are not allowed for edges: name", exception.getMessage());
}
@Test
public void setValuedTypesNotAllowedForEdges() throws IOException {
CSVRecord record = csvFormat.parse(new StringReader("~id,~label,~from,~to,name:string(set)")).iterator().next();
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvHeader.parse(record));
assertEquals("Set-valued types are not allowed for edges: name", exception.getMessage());
}
}
| 7,426 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/PropertyGraph2RdfMappingTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.net.URISyntaxException;
import java.util.HashMap;
import java.util.Map;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import software.amazon.neptune.csv2rdf.NeptuneCsvUserDefinedColumn.DataType;
@SuppressWarnings("serial")
public class PropertyGraph2RdfMappingTest {
private PropertyGraph2RdfMapping mapping;
@BeforeEach
public void init() {
mapping = new PropertyGraph2RdfMapping();
mapping.setTypeNamespace("tn:");
mapping.setVertexNamespace("vn:");
mapping.setEdgeNamespace("en:");
mapping.setVertexPropertyNamespace("vpn:");
mapping.setEdgePropertyNamespace("epn:");
mapping.setDefaultNamedGraph("dng:a");
mapping.setDefaultType("dt:a");
mapping.setEdgeContextNamespace("ec:");
}
@Test
public void invalidJavaUriSetDefaultNamedGraph() {
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.setDefaultNamedGraph("dgn:"));
assertEquals("Invalid resource URI <dgn:> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof URISyntaxException);
assertEquals("Expected scheme-specific part at index 4: dgn:", exception.getCause().getMessage());
}
@Test
public void invalidRdf4jIriSetDefaultNamedGraph() {
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.setDefaultNamedGraph("dgn"));
assertEquals("Invalid resource URI <dgn> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof IllegalArgumentException);
assertEquals("Not a valid (absolute) IRI: dgn", exception.getCause().getMessage());
}
@Test
public void invalidJavaUriSetDefaultType() {
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.setDefaultType("dt:"));
assertEquals("Invalid resource URI <dt:> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof URISyntaxException);
assertEquals("Expected scheme-specific part at index 3: dt:", exception.getCause().getMessage());
}
@Test
public void invalidRdf4jIriSetDefaultType() {
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.setDefaultType("dt"));
assertEquals("Invalid resource URI <dt> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof IllegalArgumentException);
assertEquals("Not a valid (absolute) IRI: dt", exception.getCause().getMessage());
}
@Test
public void invalidJavaUriTypeIri() {
mapping.setTypeNamespace(":tn");
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.typeIri("type"));
assertEquals("Invalid resource URI <:tnType> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof URISyntaxException);
assertEquals("Expected scheme name at index 0: :tnType", exception.getCause().getMessage());
}
@Test
public void invalidRdf4jIriTypeIri() {
mapping.setTypeNamespace("tn");
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.typeIri("type"));
assertEquals("Invalid resource URI <tnType> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof IllegalArgumentException);
assertEquals("Not a valid (absolute) IRI: tnType", exception.getCause().getMessage());
}
@Test
public void invalidJavaUriVertexIri() {
mapping.setVertexNamespace(":vn");
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.vertexIri("vertex"));
assertEquals("Invalid resource URI <:vnvertex> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof URISyntaxException);
assertEquals("Expected scheme name at index 0: :vnvertex", exception.getCause().getMessage());
}
@Test
public void invalidRdf4jIriVertexIri() {
mapping.setVertexNamespace("vn");
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.vertexIri("vertex"));
assertEquals("Invalid resource URI <vnvertex> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof IllegalArgumentException);
assertEquals("Not a valid (absolute) IRI: vnvertex", exception.getCause().getMessage());
}
@Test
public void invalidJavaUriEdgeIri() {
mapping.setEdgeNamespace(":en");
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.edgeIri("edge"));
assertEquals("Invalid resource URI <:enedge> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof URISyntaxException);
assertEquals("Expected scheme name at index 0: :enedge", exception.getCause().getMessage());
}
@Test
public void invalidRdf4jIriEdgeIri() {
mapping.setEdgeNamespace("en");
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.edgeIri("edge"));
assertEquals("Invalid resource URI <enedge> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof IllegalArgumentException);
assertEquals("Not a valid (absolute) IRI: enedge", exception.getCause().getMessage());
}
@Test
public void invalidJavaUriVertexPopertyIri() {
mapping.setVertexPropertyNamespace(":vpn");
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.vertexPropertyIri("vprop"));
assertEquals("Invalid resource URI <:vpnvprop> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof URISyntaxException);
assertEquals("Expected scheme name at index 0: :vpnvprop", exception.getCause().getMessage());
}
@Test
public void invalidRdf4jIriVertexPropertyIri() {
mapping.setVertexPropertyNamespace("vpn");
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.vertexPropertyIri("vprop"));
assertEquals("Invalid resource URI <vpnvprop> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof IllegalArgumentException);
assertEquals("Not a valid (absolute) IRI: vpnvprop", exception.getCause().getMessage());
}
@Test
public void invalidJavaUriEdgePopertyIri() {
mapping.setEdgePropertyNamespace(":epn");
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.edgePropertyIri("eprop"));
assertEquals("Invalid resource URI <:epneprop> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof URISyntaxException);
assertEquals("Expected scheme name at index 0: :epneprop", exception.getCause().getMessage());
}
@Test
public void invalidRdf4jIriEdgePropertyIri() {
mapping.setEdgePropertyNamespace("epn");
Exception exception = assertThrows(Csv2RdfException.class, () -> mapping.edgePropertyIri("eprop"));
assertEquals("Invalid resource URI <epneprop> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof IllegalArgumentException);
assertEquals("Not a valid (absolute) IRI: epneprop", exception.getCause().getMessage());
}
@Test
public void invalidJavaUriMapPropertyValue2RdfResource() {
String property = "word";
mapping.getPgProperty2RdfResourcePattern().put(property, ":bad{{VALUE}}");
Exception exception = assertThrows(Csv2RdfException.class,
() -> mapping.getVertex2RdfMapping().mapPropertyValue2RdfResource(property, "deleyite"));
assertEquals("Invalid resource URI <:baddeleyite> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof URISyntaxException);
assertEquals("Expected scheme name at index 0: :baddeleyite", exception.getCause().getMessage());
}
@Test
public void invalidRdf4jIriMapPropertyValue2RdfResource() {
String property = "word";
mapping.getPgProperty2RdfResourcePattern().put(property, "bad{{VALUE}}");
Exception exception = assertThrows(Csv2RdfException.class,
() -> mapping.getVertex2RdfMapping().mapPropertyValue2RdfResource(property, "deleyite"));
assertEquals("Invalid resource URI <baddeleyite> generated when mapping to RDF.", exception.getMessage());
assertTrue(exception.getCause() instanceof IllegalArgumentException);
assertEquals("Not a valid (absolute) IRI: baddeleyite", exception.getCause().getMessage());
}
@Test
public void emptyType() {
mapping.setTypeNamespace("tn://types/");
assertEquals("tn://types/", mapping.typeIri("").stringValue());
}
@Test
public void tinyType() {
mapping.setTypeNamespace("tn://types/");
assertEquals("tn://types/T", mapping.typeIri("t").stringValue());
}
@Test
public void irisAreEncoded() {
assertEquals("tn:%7BHeiz%C3%B6lr%C3%BCcksto%C3%9Fabd%C3%A4mpfung%7D",
mapping.typeIri("{Heizölrückstoßabdämpfung}").stringValue());
assertEquals("vn:%7BHeiz%C3%B6lr%C3%BCcksto%C3%9Fabd%C3%A4mpfung%7D",
mapping.vertexIri("{Heizölrückstoßabdämpfung}").stringValue());
assertEquals("en:%7BHeiz%C3%B6lr%C3%BCcksto%C3%9Fabd%C3%A4mpfung%7D",
mapping.edgeIri("{Heizölrückstoßabdämpfung}").stringValue());
assertEquals("vpn:%7BHeiz%C3%B6lr%C3%BCcksto%C3%9Fabd%C3%A4mpfung%7D",
mapping.vertexPropertyIri("{Heizölrückstoßabdämpfung}").stringValue());
assertEquals("epn:%7BHeiz%C3%B6lr%C3%BCcksto%C3%9Fabd%C3%A4mpfung%7D",
mapping.edgePropertyIri("{Heizölrückstoßabdämpfung}").stringValue());
assertEquals("dng:a", mapping.getDefaultNamedGraph().stringValue());
assertEquals("dt:a", mapping.getDefaultType().stringValue());
}
@Test
public void literalsAreNotEncoded() {
assertEquals("\"{Heizölrückstoßabdämpfung}\"^^<http://www.w3.org/2001/XMLSchema#boolean>",
mapping.value("{Heizölrückstoßabdämpfung}", DataType.BOOL).toString());
assertEquals("\"{Heizölrückstoßabdämpfung}\"^^<http://www.w3.org/2001/XMLSchema#byte>",
mapping.value("{Heizölrückstoßabdämpfung}", DataType.BYTE).toString());
assertEquals("\"{Heizölrückstoßabdämpfung}\"^^<http://www.w3.org/2001/XMLSchema#date>",
mapping.value("{Heizölrückstoßabdämpfung}", DataType.DATETIME).toString());
assertEquals("\"{Heizölrückstoßabdämpfung}\"^^<http://www.w3.org/2001/XMLSchema#double>",
mapping.value("{Heizölrückstoßabdämpfung}", DataType.DOUBLE).toString());
assertEquals("\"{Heizölrückstoßabdämpfung}\"^^<http://www.w3.org/2001/XMLSchema#float>",
mapping.value("{Heizölrückstoßabdämpfung}", DataType.FLOAT).toString());
assertEquals("\"{Heizölrückstoßabdämpfung}\"^^<http://www.w3.org/2001/XMLSchema#integer>",
mapping.value("{Heizölrückstoßabdämpfung}", DataType.INT).toString());
assertEquals("\"{Heizölrückstoßabdämpfung}\"^^<http://www.w3.org/2001/XMLSchema#long>",
mapping.value("{Heizölrückstoßabdämpfung}", DataType.LONG).toString());
assertEquals("\"{Heizölrückstoßabdämpfung}\"^^<http://www.w3.org/2001/XMLSchema#short>",
mapping.value("{Heizölrückstoßabdämpfung}", DataType.SHORT).toString());
assertEquals("\"{Heizölrückstoßabdämpfung}\"",
mapping.value("{Heizölrückstoßabdämpfung}", DataType.STRING).toString());
}
@Test
public void propertyGraphVertexType2InstanceLabelMapping() {
String vertexType = "country";
String instanceLabelProperty = "code";
Map<String, String> pgVertexType2InstanceLabelProperty = new HashMap<String, String>() {
{
put(vertexType, instanceLabelProperty);
}
};
assertNull(mapping.getVertex2RdfMapping().getPropertyForRdfsLabel(vertexType));
mapping.setPgVertexType2PropertyForRdfsLabel(pgVertexType2InstanceLabelProperty);
assertEquals(instanceLabelProperty, mapping.getVertex2RdfMapping().getPropertyForRdfsLabel(vertexType));
assertNull(mapping.getVertex2RdfMapping().getPropertyForRdfsLabel("city"));
}
@Test
public void convertPropertyValue2RdfResource() {
String property = "word";
String value = "{Heizölrückstoßabdämpfung}";
String pattern = "http://example.org/resource/word/{{VALUE}}";
String resource = "http://example.org/resource/word/%7BHeiz%C3%B6lr%C3%BCcksto%C3%9Fabd%C3%A4mpfung%7D";
Map<String, String> pgProperty2RdfResourcePattern = new HashMap<String, String>() {
{
put(property, pattern);
}
};
assertFalse(mapping.getVertex2RdfMapping().containsRdfResourcePatternForProperty(property));
assertNull(mapping.getVertex2RdfMapping().mapPropertyValue2RdfResource(property, value));
mapping.setPgProperty2RdfResourcePattern(pgProperty2RdfResourcePattern);
assertTrue(mapping.getVertex2RdfMapping().containsRdfResourcePatternForProperty(property));
assertEquals(resource,
mapping.getVertex2RdfMapping().mapPropertyValue2RdfResource(property, value).stringValue());
}
@Test
public void invalidPatternIsRejected() {
Map<String, String> pgProperty2RdfResourcePattern = new HashMap<String, String>() {
{
put("country", "something");
}
};
Csv2RdfException exception = assertThrows(Csv2RdfException.class,
() -> mapping.setPgProperty2RdfResourcePattern(pgProperty2RdfResourcePattern));
assertEquals("The pattern <something> for the new URI must contain the replacement variable {{VALUE}}.",
exception.getMessage());
}
@Test
public void testGetEdgeContextNamespaceFallback() {
final String eContext = "econtext";
mapping.setVertexNamespace("vns");
mapping.setEdgeContextNamespace(null);
assertEquals(mapping.getVertexNamespace(), mapping.getEdgeContextNamespace());
mapping.setEdgeContextNamespace(eContext);
assertEquals(eContext, mapping.getEdgeContextNamespace());
}
}
| 7,427 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/UriPostTransformerTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.util.ArrayList;
import java.util.Arrays;
import org.eclipse.rdf4j.model.Literal;
import org.eclipse.rdf4j.model.Statement;
import org.eclipse.rdf4j.model.impl.SimpleValueFactory;
import org.eclipse.rdf4j.model.vocabulary.RDF;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class UriPostTransformerTest {
private static SimpleValueFactory VF = SimpleValueFactory.getInstance();
private static Statement[] TEST = { relation("j:12", "j:b", "j:c", "j:d"), relation("j:a", "j:12", "j:c", "j:d"),
relation("j:a", "j:b", "j:12", "j:d"), relation("j:a", "j:b", "j:c", "j:12"),
relation("j:12", "j:b", "j:12", "j:d"), relation("j:12", "j:12", "j:12", "j:12") };
private static Statement[] EXPECTED = { relation("j:xy", "j:b", "j:c", "j:d"),
relation("j:a", "j:xy", "j:c", "j:d"), relation("j:a", "j:b", "j:xy", "j:d"),
relation("j:a", "j:b", "j:c", "j:xy"), relation("j:xy", "j:b", "j:xy", "j:d"),
relation("j:xy", "j:xy", "j:xy", "j:xy") };
private static Statement TYPE_STATEMENT = relation("j:12", RDF.TYPE.stringValue(), "j:type", "j:d");
private static Statement PROPERTY_STATEMENT = literal("j:12", "j:prop", "xy", "j:d");
private UriPostTransformation transformation;
private UriPostTransformer transformer;
/**
*
* Create an RDF statement with resource object.
*
* @param subject
* @param predicate
* @param object
* @param context
* @return new statement
*/
private static Statement relation(String subject, String predicate, String object, String context) {
return VF.createStatement(VF.createIRI(subject), VF.createIRI(predicate), VF.createIRI(object),
VF.createIRI(context));
}
/**
*
* Create an RDF statement with literal object.
*
* @param subject
* @param predicate
* @param object
* @param context
* @return new statement
*/
private static Statement literal(String subject, String predicate, String object, String context) {
return VF.createStatement(VF.createIRI(subject), VF.createIRI(predicate), VF.createLiteral(object),
VF.createIRI(context));
}
@BeforeEach
public void init() {
transformation = new UriPostTransformation("j:(\\d+)", "j:type", "j:prop", "j:{{VALUE}}");
transformer = new UriPostTransformer();
transformer.setUriPostTransformations(new ArrayList<>(Arrays.asList(transformation)));
}
@Test
public void noTransformationBeforeInitialization() {
transformer = new UriPostTransformer();
for (int i = 0; i < TEST.length; ++i) {
assertEquals(TEST[i], transformer.transform(TEST[i]));
}
}
@Test
public void transformStatementUsingPreparedTransformation() {
transformation.registerResource("j:12", RDF.TYPE.stringValue(), "j:type");
transformation.registerReplacementValue("j:12", "j:prop", "xy");
for (int i = 0; i < TEST.length; ++i) {
assertEquals(EXPECTED[i], transformer.transform(TEST[i]));
}
}
@Test
public void prepareTransformerAndTransformStatement() {
transformer.register(literal("j:12", RDF.TYPE.stringValue(), "j:type", "j:d"));
transformer.register(PROPERTY_STATEMENT);
for (int i = 0; i < TEST.length; ++i) {
assertEquals(TEST[i], transformer.transform(TEST[i]));
}
}
@Test
public void literalDoesNotPrepareTransformer() {
transformer.register(TYPE_STATEMENT);
transformer.register(PROPERTY_STATEMENT);
for (int i = 0; i < TEST.length; ++i) {
assertEquals(EXPECTED[i], transformer.transform(TEST[i]));
}
assertEquals(relation("j:xy", RDF.TYPE.stringValue(), "j:type", "j:d"), transformer.transform(TYPE_STATEMENT));
assertEquals(literal("j:xy", "j:prop", "xy", "j:d"), transformer.transform(PROPERTY_STATEMENT));
}
@Test
public void registerInAnyOrderAndTransformStatement() {
transformer.register(PROPERTY_STATEMENT);
transformer.register(TYPE_STATEMENT);
for (int i = 0; i < TEST.length; ++i) {
assertEquals(EXPECTED[i], transformer.transform(TEST[i]));
}
}
@Test
public void firstComeFirstServed() {
UriPostTransformation transformation2 = new UriPostTransformation("j:(\\d+)", "j:type", "j:prop",
"j:z/{{VALUE}}");
transformer.setUriPostTransformations(new ArrayList<>(Arrays.asList(transformation2, transformation)));
transformer.register(TYPE_STATEMENT);
transformer.register(PROPERTY_STATEMENT);
assertEquals(relation("j:z/xy", "j:b", "j:c", "j:d"), transformer.transform(TEST[0]));
assertEquals(relation("j:z/xy", "j:z/xy", "j:z/xy", "j:z/xy"), transformer.transform(TEST[5]));
}
@Test
public void differentTransformationsOnSameStatement() {
UriPostTransformation transformation2 = new UriPostTransformation("j:r/(\\d+)", "j:type2", "j:prop",
"j:z/{{VALUE}}");
transformer.setUriPostTransformations(new ArrayList<>(Arrays.asList(transformation2, transformation)));
transformer.register(TYPE_STATEMENT);
transformer.register(PROPERTY_STATEMENT);
transformer.register(relation("j:r/10", RDF.TYPE.stringValue(), "j:type2", "j:g"));
transformer.register(literal("j:r/10", "j:prop", "ab", "j:d"));
assertEquals(EXPECTED[0], transformer.transform(TEST[0]));
assertEquals(relation("j:z/ab", "j:r/b", "j:r/c", "j:z/ab"),
transformer.transform(relation("j:r/10", "j:r/b", "j:r/c", "j:r/10")));
assertEquals(relation("j:xy", "j:z/ab", "j:r/c", "j:r/34"),
transformer.transform(relation("j:12", "j:r/10", "j:r/c", "j:r/34")));
}
@Test
public void literalsAreNotTransformed() {
transformer.register(TYPE_STATEMENT);
transformer.register(PROPERTY_STATEMENT);
assertEquals(relation("j:a", "j:b", "j:xy", "j:d"),
transformer.transform(relation("j:a", "j:b", "j:12", "j:d")));
assertEquals(literal("j:a", "j:b", "j:12", "j:d"), transformer.transform(literal("j:a", "j:b", "j:12", "j:d")));
}
@Test
public void specialCharactersAreEncoded() {
transformer.register(TYPE_STATEMENT);
transformer.register(literal("j:12", "j:prop", " { very späcial } ", "j:d"));
assertEquals(relation("j:a", "j:b", "j:+%7B+very+sp%C3%A4cial+%7D+", "j:d"),
transformer.transform(relation("j:a", "j:b", "j:12", "j:d")));
}
@Test
public void datatypeIsRemovedFromReplacementValue() {
transformer.register(TYPE_STATEMENT);
Literal integer = VF.createLiteral(1001);
transformer.register(VF.createStatement(PROPERTY_STATEMENT.getSubject(), PROPERTY_STATEMENT.getPredicate(),
integer, PROPERTY_STATEMENT.getContext()));
assertEquals("\"1001\"^^<http://www.w3.org/2001/XMLSchema#int>", integer.toString());
assertEquals(relation("j:a", "j:b", "j:1001", "j:d"),
transformer.transform(relation("j:a", "j:b", "j:12", "j:d")));
}
}
| 7,428 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/Csv2RdfLogOutputTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.util.List;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import picocli.CommandLine;
@Tag("IntegrationTest")
@Tag("Csv2RdfLogOutputTest")
public class Csv2RdfLogOutputTest {
private static final PrintStream STDOUT = System.out;
private static final PrintStream STDERR = System.err;
private static final ByteArrayOutputStream STDOUT_BAOS = new ByteArrayOutputStream();
private static final ByteArrayOutputStream STDERR_BAOS = new ByteArrayOutputStream();
private static final Path LOG_FILE = Paths.get("target", "csv2rdf.checkLogOutput.log");
private static final String LOG_FILE_PROPERTY = "software.amazon.neptune.csv2rdf.log.file";
private static final String LOG_IMMEDIATE_FLUSH_PROPERTY = "software.amazon.neptune.csv2rdf.log.immediateFlush";
static {
System.setProperty(LOG_FILE_PROPERTY, Paths.get("target", "csv2rdf.tests.log").toFile().getPath());
}
private static final File TARGET = new File("target");
@BeforeAll
public static void setup() {
// load the real log configuration for this test instead of log4j2-test.xml
System.setProperty("log4j.configurationFile", "classpath:log4j2.xml");
// write log file to a dedicated file in target/
System.setProperty(LOG_FILE_PROPERTY, LOG_FILE.toString());
// flush immediately for writing the log file before checking its content
System.setProperty(LOG_IMMEDIATE_FLUSH_PROPERTY, "true");
System.setOut(new PrintStream(new MultiOutputStream(STDOUT, STDOUT_BAOS)));
System.setErr(new PrintStream(new MultiOutputStream(STDERR, STDERR_BAOS)));
LoggerFactory.getLogger(Csv2RdfLogOutputTest.class);
}
@AfterAll
public static void tearDown() {
System.setOut(STDOUT);
System.setErr(STDERR);
}
@BeforeEach
public void init() throws IOException {
if (Files.exists(LOG_FILE)) {
Files.write(LOG_FILE, new byte[0], StandardOpenOption.TRUNCATE_EXISTING);
}
STDOUT_BAOS.reset();
STDERR_BAOS.reset();
}
@Test
public void validateConfigParameter() throws IOException {
File nonExistingProperties = new File(TARGET, "non-existing.properties");
assertEquals(-1, new CommandLine(new Csv2Rdf()).execute("-c", nonExistingProperties.getPath(), "-i",
TARGET.getPath(), "-o", TARGET.getPath()));
final String[] lines = STDERR_BAOS.toString(StandardCharsets.UTF_8.name()).split("\\r?\\n");
assertEquals(2, lines.length);
assertEquals("CSV to RDF conversion failed.", lines[0]);
assertTrue(lines[1].matches(
"File for parameter <configuration file> does not exist: .*" + nonExistingProperties.getPath()));
List<String> logLines = Files.readAllLines(LOG_FILE);
assertTrue(logLines.size() > 2);
assertTrue(logLines.get(0)
.contains("ERROR software.amazon.neptune.csv2rdf.Csv2Rdf - CSV to RDF conversion failed."));
assertTrue(logLines.get(1).contains("File for parameter <configuration file> does not exist:"));
}
/**
*
* Logging should be configured as:
* <ul>
* <li>INFO to stdout (but no DEBUG, no TRACE, no WARN, no ERROR)</li>
* <li>WARN to stderr (but no ERROR)</li>
* <li>ALL to amazon-neptune-csv2rdf.log</li>
* </ul>
*
* This test must be <b>run before any logger</b> has been initialized! Use the
* tag <em>Csv2RdfTest::checkLogOutput</em> to include or exclude this test.
*
* @throws IOException
* @throws InterruptedException
*/
@Test
public void checkLogOutput() throws IOException, InterruptedException {
Logger log = LoggerFactory.getLogger(getClass());
log.trace("trace");
log.debug("debug");
log.info("info");
log.warn("warn");
log.info("info2");
log.error("error");
log.warn("warn2");
assertEquals(String.format("info%ninfo2%n"), STDOUT_BAOS.toString(StandardCharsets.UTF_8.name()));
assertEquals(String.format("warn%nwarn2%n"), STDERR_BAOS.toString(StandardCharsets.UTF_8.name()));
List<String> logLines = Files.readAllLines(LOG_FILE);
assertEquals(5, logLines.size());
assertTrue(logLines.get(0).matches(".*INFO .+? - info"));
assertTrue(logLines.get(1).matches(".*WARN .+? - warn"));
assertTrue(logLines.get(2).matches(".*INFO .+? - info2"));
assertTrue(logLines.get(3).matches(".*ERROR .+? - error"));
assertTrue(logLines.get(4).matches(".*WARN .+? - warn2"));
}
/**
*
* An output stream for multiplying the written bytes to multiple streams
*
*/
public static class MultiOutputStream extends OutputStream {
private final OutputStream[] streams;
public MultiOutputStream(OutputStream... streams) {
this.streams = streams;
}
@Override
public void write(int b) throws IOException {
for (OutputStream s : streams) {
s.write(b);
}
}
@Override
public void flush() throws IOException {
for (OutputStream s : streams) {
s.flush();
}
}
@Override
public void close() throws IOException {
for (OutputStream s : streams) {
s.close();
}
}
}
}
| 7,429 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/UriPostTransformationTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.net.URISyntaxException;
import org.eclipse.rdf4j.model.vocabulary.RDF;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class UriPostTransformationTest {
private static final String TYPE = "http://example.org/class/Country";
private static final String PROPERTY = "http://example.org/datatypeProperty/code";
private UriPostTransformation transformation;
@BeforeEach
public void init() {
transformation = new UriPostTransformation("http://example.org/resource/([0-9]+)", TYPE, PROPERTY,
"http://example.org/resource/{{VALUE}}");
}
@Test
public void noTransformationBeforeRegistering() {
assertNull(transformation.apply("http://example.org/resource/123"));
}
@Test
public void transformUri() {
String resource = "http://example.org/resource/123";
transformation.registerResource(resource, RDF.TYPE.toString(), TYPE);
assertNull(transformation.apply(resource));
transformation.registerReplacementValue(resource, PROPERTY, "FR");
assertEquals("http://example.org/resource/FR", transformation.apply(resource).stringValue());
}
@Test
public void keyMustBeRegisteredBeforeValue() {
String resource = "http://example.org/resource/456";
transformation.registerReplacementValue(resource, PROPERTY, "CN");
transformation.registerResource(resource, RDF.TYPE.toString(), TYPE);
assertEquals("http://example.org/resource/CN", transformation.apply(resource).stringValue());
}
@Test
public void noTransformationWithoutValue() {
String resource = "http://example.org/resource/123";
transformation.registerResource(resource, RDF.TYPE.toString(), TYPE);
assertNull(transformation.apply(resource));
}
@Test
public void noTransformationIfTypeDoesNotMatch() {
String resource = "http://example.org/resource/456";
transformation.registerResource(resource, RDF.TYPE.toString(), "http://example.org/class/City");
transformation.registerReplacementValue(resource, PROPERTY, "BU");
assertNull(transformation.apply(resource));
}
@Test
public void noTransformationIfPropertyDoesNotMatch() {
String resource = "http://example.org/resource/456";
transformation.registerResource(resource, RDF.TYPE.toString(), TYPE);
transformation.registerReplacementValue(resource, "http://example.org/datatypeProperty/name", "Peru");
assertNull(transformation.apply(resource));
}
@Test
public void specialCharactersAreEncoded() {
String resource = "http://example.org/resource/456";
transformation.registerResource(resource, RDF.TYPE.toString(), TYPE);
transformation.registerReplacementValue(resource, PROPERTY, "[] {} ß ä ");
assertEquals("http://example.org/resource/%5B%5D+%7B%7D+%C3%9F+%C3%A4+",
transformation.apply(resource).stringValue());
}
@Test
public void resourcePrefixIsNotEncoded() {
transformation = new UriPostTransformation("http://example.org/resource/([0-9]+)", TYPE, PROPERTY,
"{invalid} /resource/{{VALUE}}");
String resource = "http://example.org/resource/456";
transformation.registerResource(resource, RDF.TYPE.toString(), TYPE);
transformation.registerReplacementValue(resource, PROPERTY, "[] {} ß ä ");
Exception exception = assertThrows(Csv2RdfException.class, () -> transformation.apply(resource));
assertEquals(
"Invalid resource URI <{invalid} /resource/%5B%5D+%7B%7D+%C3%9F+%C3%A4+> generated when applying UriPostTransformation(srcPattern=http://example.org/resource/([0-9]+), typeUri=http://example.org/class/Country, propertyUri=http://example.org/datatypeProperty/code, dstPattern={invalid} /resource/{{VALUE}}).",
exception.getMessage());
assertTrue(exception.getCause() instanceof URISyntaxException);
assertEquals("Illegal character in path at index 0: {invalid} /resource/%5B%5D+%7B%7D+%C3%9F+%C3%A4+",
exception.getCause().getMessage());
}
@Test
public void rejectDuplicateKeyWithDistinctValues() {
String resource = "http://example.org/resource/456";
transformation.registerResource(resource, RDF.TYPE.toString(), TYPE);
transformation.registerReplacementValue(resource, PROPERTY, "Peru");
Exception exception = assertThrows(Csv2RdfException.class,
() -> transformation.registerReplacementValue(resource, PROPERTY, "Chile"));
assertEquals("Found duplicate, inconsistent value for <http://example.org/resource/456>: Chile vs. Peru",
exception.getMessage());
}
@Test
public void invalidJavaUri() {
UriPostTransformation transformation2 = new UriPostTransformation("j:(\\d+)", "j:type", "j:prop", "{{VALUE}}:");
transformation2.registerResource("j:12", RDF.TYPE.stringValue(), "j:type");
transformation2.registerReplacementValue("j:12", "j:prop", "type");
Exception exception = assertThrows(Csv2RdfException.class, () -> transformation2.apply("j:12"));
assertEquals(
"Invalid resource URI <type:> generated when applying UriPostTransformation(srcPattern=j:(\\d+), typeUri=j:type, propertyUri=j:prop, dstPattern={{VALUE}}:).",
exception.getMessage());
assertTrue(exception.getCause() instanceof URISyntaxException);
assertEquals("Expected scheme-specific part at index 5: type:", exception.getCause().getMessage());
}
@Test
public void invalidRdf4jIri() {
UriPostTransformation transformation2 = new UriPostTransformation("j:(\\d+)", "j:type", "j:prop",
"{{VALUE}}/resource");
transformation2.registerResource("j:12", RDF.TYPE.stringValue(), "j:type");
transformation2.registerReplacementValue("j:12", "j:prop", "type");
Exception exception = assertThrows(Csv2RdfException.class, () -> transformation2.apply("j:12"));
assertEquals(
"Invalid resource URI <type/resource> generated when applying UriPostTransformation(srcPattern=j:(\\d+), typeUri=j:type, propertyUri=j:prop, dstPattern={{VALUE}}/resource).",
exception.getMessage());
assertTrue(exception.getCause() instanceof IllegalArgumentException);
assertEquals("Not a valid (absolute) IRI: type/resource", exception.getCause().getMessage());
}
}
| 7,430 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/Csv2RdfTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import org.junit.jupiter.api.Test;
import org.opentest4j.AssertionFailedError;
import picocli.CommandLine;
import software.amazon.neptune.csv2rdf.Csv2Rdf.MavenVersionProvider;
import software.amazon.neptune.csv2rdf.Csv2RdfLogOutputTest.MultiOutputStream;
public class Csv2RdfTest {
private static final Path INTEGRATION_TEST = Paths.get("src", "test", "integration-test");
private static final Path TARGET = Paths.get("target");
@Test
public void printVersionInfo() {
int exitCode = new CommandLine(new Csv2Rdf()).execute("--version");
assertEquals(0, exitCode);
}
@Test
public void getVersionInfo() {
assertTrue(new MavenVersionProvider().getVersion()[0].matches("\\d+.\\d+.\\d+(-.+)?"));
}
@Test
public void checkExistingFile() {
File existingFile = Paths.get("src", "test", "inputDirectoryTest").resolve("ignore.txt").toFile();
Csv2Rdf.validateFileParam(existingFile, "config");
}
@Test
public void checkNonExistingFile() {
File nonExistingFile = TARGET.resolve("non-existing-file.csv").toFile();
Exception exception = assertThrows(Csv2RdfException.class,
() -> Csv2Rdf.validateFileParam(nonExistingFile, "config"));
assertTrue(exception.getMessage()
.matches("File for parameter config does not exist: .*" + nonExistingFile.getPath()));
}
@Test
public void checkFileButIsDirectory() {
Exception exception = assertThrows(Csv2RdfException.class,
() -> Csv2Rdf.validateFileParam(TARGET.toFile(), "config"));
assertTrue(exception.getMessage().matches("Parameter config does not point to a file: .*" + TARGET.toString()));
}
@Test
public void checkExistingDirectory() {
Csv2Rdf.validateDirectoryParam(TARGET.toFile(), "input", false);
}
@Test
public void checkDirectoryButIsFile() {
File existingFile = Paths.get("src", "test", "inputDirectoryTest").resolve("ignore.txt").toFile();
Exception exception = assertThrows(Csv2RdfException.class,
() -> Csv2Rdf.validateDirectoryParam(existingFile, "input", true));
assertTrue(exception.getMessage()
.matches("Parameter input does not point to a directory: .*" + existingFile.getPath()));
}
@Test
public void checkNonExistingDirectory() {
File nonExistingDir = TARGET.resolve("non-existing-dir").toFile();
Exception exception = assertThrows(Csv2RdfException.class,
() -> Csv2Rdf.validateDirectoryParam(nonExistingDir, "input", false));
assertTrue(exception.getMessage()
.matches("Directory for parameter input does not exist: .*" + nonExistingDir.getPath()));
}
@Test
public void checkNonExistingDirectoryWithCreate() {
File directoryToCreate = TARGET.resolve("directory-to-create").toFile();
assertFalse(directoryToCreate.exists());
AssertionFailedError assertionFailedError = null;
try {
Csv2Rdf.validateDirectoryParam(directoryToCreate, "output", true);
assertTrue(directoryToCreate.isDirectory());
} catch (AssertionFailedError e) {
assertionFailedError = e;
} finally {
try {
assertDoesNotThrow(() -> Files.delete(directoryToCreate.toPath()));
} catch (AssertionFailedError e2) {
if (assertionFailedError != null) {
assertionFailedError.addSuppressed(e2);
throw assertionFailedError;
}
throw e2;
}
if (assertionFailedError != null) {
throw assertionFailedError;
}
}
}
@Test
public void checkNonExistingNestedDirectoryWithCreate() {
File directoryToCreate = TARGET.resolve("nested").resolve("directory-to-create").toFile();
assertFalse(directoryToCreate.getParentFile().exists());
AssertionFailedError assertionFailedError = null;
try {
Csv2Rdf.validateDirectoryParam(directoryToCreate, "output", true);
assertTrue(directoryToCreate.isDirectory());
} catch (AssertionFailedError e) {
assertionFailedError = e;
} finally {
try {
assertDoesNotThrow(() -> Files.delete(directoryToCreate.toPath()));
assertDoesNotThrow(() -> Files.delete(directoryToCreate.toPath().getParent()));
} catch (AssertionFailedError e2) {
if (assertionFailedError != null) {
assertionFailedError.addSuppressed(e2);
throw assertionFailedError;
}
throw e2;
}
if (assertionFailedError != null) {
throw assertionFailedError;
}
}
}
@Test
public void checkNonExistingDirectoryWithCreateFails() throws Exception {
File readOnlyDirectory = TARGET.resolve("read-ony-directory").toFile();
AssertionFailedError assertionFailedError = null;
try {
assertTrue(readOnlyDirectory.mkdir(), "Prepatation of a read-only directory failed.");
assertTrue(readOnlyDirectory.setReadOnly());
File cannotBeCreated = new File(readOnlyDirectory, "cannot-be-created");
Exception exception = assertThrows(Csv2RdfException.class,
() -> Csv2Rdf.validateDirectoryParam(cannotBeCreated, "output", true));
assertTrue(exception.getMessage()
.matches("Directory for parameter output could not be created: .*" + cannotBeCreated.getPath()));
} catch (AssertionFailedError e) {
assertionFailedError = e;
} finally {
try {
assertTrue(readOnlyDirectory.setWritable(true));
assertDoesNotThrow(() -> Files.delete(readOnlyDirectory.toPath()));
} catch (AssertionFailedError e2) {
if (assertionFailedError != null) {
assertionFailedError.addSuppressed(e2);
throw assertionFailedError;
}
throw e2;
}
if (assertionFailedError != null) {
throw assertionFailedError;
}
}
}
@Test
public void validateConfigParameter() throws UnsupportedEncodingException {
File nonExistingProperties = TARGET.resolve("non-existing.properties").toFile();
PrintStream stderr = System.err;
ByteArrayOutputStream stderrBytes = new ByteArrayOutputStream();
try {
System.setErr(new PrintStream(new MultiOutputStream(stderr, stderrBytes)));
assertEquals(-1, new CommandLine(new Csv2Rdf()).execute("-c", nonExistingProperties.getPath(), "-i",
TARGET.toString(), "-o", TARGET.toString()));
} finally {
System.setErr(stderr);
}
String[] lines = stderrBytes.toString(StandardCharsets.UTF_8.name()).split("\\r?\\n");
assertEquals(2, lines.length);
assertEquals("CSV to RDF conversion failed.", lines[0]);
assertTrue(lines[1].matches(
"File for parameter <configuration file> does not exist: .*" + nonExistingProperties.getPath()));
}
@Test
public void validateInputParameter() throws UnsupportedEncodingException {
File nonExistingDirectory = TARGET.resolve("non-existing-directory").toFile();
PrintStream stderr = System.err;
ByteArrayOutputStream stderrBytes = new ByteArrayOutputStream();
try {
System.setErr(new PrintStream(new MultiOutputStream(stderr, stderrBytes)));
assertEquals(-1, new CommandLine(new Csv2Rdf()).execute("-i", nonExistingDirectory.getPath(), "-o",
TARGET.toString()));
} finally {
System.setErr(stderr);
}
String[] lines = stderrBytes.toString(StandardCharsets.UTF_8.name()).split("\\r?\\n");
assertEquals(2, lines.length);
assertEquals("CSV to RDF conversion failed.", lines[0]);
assertTrue(lines[1].matches(
"Directory for parameter <input directory> does not exist: .*" + nonExistingDirectory.getPath()));
}
@Test
public void validateOutputParameter() throws UnsupportedEncodingException {
File integrationTestProperties = INTEGRATION_TEST.resolve("integration-test.properties").toFile();
PrintStream stderr = System.err;
ByteArrayOutputStream stderrBytes = new ByteArrayOutputStream();
try {
System.setErr(new PrintStream(new MultiOutputStream(stderr, stderrBytes)));
assertEquals(-1, new CommandLine(new Csv2Rdf()).execute("-i", TARGET.toString(), "-o",
integrationTestProperties.getPath()));
} finally {
System.setErr(stderr);
}
String[] lines = stderrBytes.toString(StandardCharsets.UTF_8.name()).split("\\r?\\n");
assertEquals(2, lines.length);
assertEquals("CSV to RDF conversion failed.", lines[0]);
assertTrue(lines[1].matches("Parameter <output directory> does not point to a directory: .*"
+ integrationTestProperties.getPath()));
}
}
| 7,431 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/NeptuneCsvUserDefinedColumnTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import org.junit.jupiter.api.Test;
import software.amazon.neptune.csv2rdf.NeptuneCsvUserDefinedColumn.Cardinality;
import software.amazon.neptune.csv2rdf.NeptuneCsvUserDefinedColumn.DataType;
public class NeptuneCsvUserDefinedColumnTest {
@Test
public void userField() {
NeptuneCsvUserDefinedColumn id = NeptuneCsvUserDefinedColumn.parse("id");
assertEquals("id", id.getName());
assertTrue(id instanceof NeptuneCsvUserDefinedColumn);
assertEquals(DataType.STRING, id.getDataType());
}
@Test
public void userFieldWithDatatype() {
NeptuneCsvUserDefinedColumn id = NeptuneCsvUserDefinedColumn.parse("id:byte");
assertEquals("id", id.getName());
assertTrue(id instanceof NeptuneCsvUserDefinedColumn);
assertEquals(DataType.BYTE, id.getDataType());
}
@Test
public void allDataTypes() {
assertEquals(DataType.BYTE, NeptuneCsvUserDefinedColumn.parse("id:byte").getDataType());
assertEquals(DataType.BOOL, NeptuneCsvUserDefinedColumn.parse("id:boolean").getDataType());
assertEquals(DataType.BOOL, NeptuneCsvUserDefinedColumn.parse("id:bool").getDataType());
assertEquals(DataType.SHORT, NeptuneCsvUserDefinedColumn.parse("id:short").getDataType());
assertEquals(DataType.INT, NeptuneCsvUserDefinedColumn.parse("id:int").getDataType());
assertEquals(DataType.INT, NeptuneCsvUserDefinedColumn.parse("id:integer").getDataType());
assertEquals(DataType.LONG, NeptuneCsvUserDefinedColumn.parse("id:long").getDataType());
assertEquals(DataType.FLOAT, NeptuneCsvUserDefinedColumn.parse("id:float").getDataType());
assertEquals(DataType.DOUBLE, NeptuneCsvUserDefinedColumn.parse("id:double").getDataType());
assertEquals(DataType.STRING, NeptuneCsvUserDefinedColumn.parse("id:string").getDataType());
assertEquals(DataType.DATETIME, NeptuneCsvUserDefinedColumn.parse("id:date").getDataType());
assertEquals(DataType.DATETIME, NeptuneCsvUserDefinedColumn.parse("id:datetime").getDataType());
}
@Test
public void invalidDatatypeIsRejected() {
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvUserDefinedColumn.parse("id:bla"));
assertEquals("Invalid data type encountered for header: id:bla", exception.getMessage());
}
@Test
public void allCardinalities() {
NeptuneCsvUserDefinedColumn type1 = NeptuneCsvUserDefinedColumn.parse("id:Byte");
assertEquals(DataType.BYTE, type1.getDataType());
assertEquals(Cardinality.DEFAULT, type1.getCardinality());
assertFalse(type1.isArray());
NeptuneCsvUserDefinedColumn type4 = NeptuneCsvUserDefinedColumn.parse("id:byte(set)");
assertEquals(DataType.BYTE, type4.getDataType());
assertEquals(Cardinality.SET, type4.getCardinality());
assertFalse(type4.isArray());
NeptuneCsvUserDefinedColumn type6 = NeptuneCsvUserDefinedColumn.parse("id:byte[]");
assertEquals(DataType.BYTE, type6.getDataType());
assertEquals(Cardinality.SET, type6.getCardinality());
assertTrue(type6.isArray());
NeptuneCsvUserDefinedColumn type5 = NeptuneCsvUserDefinedColumn.parse("id:byte(Set)[]");
assertEquals(DataType.BYTE, type5.getDataType());
assertEquals(Cardinality.SET, type5.getCardinality());
assertTrue(type5.isArray());
NeptuneCsvUserDefinedColumn type2 = NeptuneCsvUserDefinedColumn.parse("id:byte(single)");
assertEquals(DataType.BYTE, type2.getDataType());
assertEquals(Cardinality.SINGLE, type2.getCardinality());
assertFalse(type2.isArray());
Exception exception = assertThrows(Csv2RdfException.class,
() -> NeptuneCsvUserDefinedColumn.parse("id:byte(single)[]"));
assertEquals("Type definition cannot be single cardinality but array: id", exception.getMessage());
}
@Test
public void invalidFieldIsRejected() {
Exception exception = assertThrows(Csv2RdfException.class,
() -> NeptuneCsvUserDefinedColumn.parse(" -*__, -*_. "));
assertEquals("Invalid column encountered while parsing header: -*__, -*_.", exception.getMessage());
}
@Test
public void invalidField2IsRejected() {
Exception exception = assertThrows(Csv2RdfException.class, () -> NeptuneCsvUserDefinedColumn.parse("f:[]bla"));
assertEquals("Invalid column encountered while parsing header: f:[]bla", exception.getMessage());
}
}
| 7,432 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/PropertGraph2RdfMapperTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
import java.io.StringReader;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVRecord;
import org.eclipse.rdf4j.model.IRI;
import org.eclipse.rdf4j.model.Statement;
import org.eclipse.rdf4j.model.impl.SimpleValueFactory;
import org.eclipse.rdf4j.model.vocabulary.RDF;
import org.eclipse.rdf4j.model.vocabulary.RDFS;
import org.eclipse.rdf4j.model.vocabulary.XMLSchema;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptunePropertyGraphEdge;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptunePropertyGraphVertex;
@SuppressWarnings("serial")
public class PropertGraph2RdfMapperTest {
private static SimpleValueFactory VF = SimpleValueFactory.getInstance();
private CSVFormat csvFormat;
private PropertyGraph2RdfMapping mapping;
private PropertyGraph2RdfMapper mapper;
/**
*
* Create an RDF statement with resource object.
*
* @param subject
* @param predicate
* @param object
* @param context
* @return new statement
*/
private static Statement relation(String subject, String predicate, String object, String context) {
return VF.createStatement(VF.createIRI(subject), VF.createIRI(predicate), VF.createIRI(object),
VF.createIRI(context));
}
/**
*
* Create an RDF statement with literal object.
*
* @param subject
* @param predicate
* @param object
* @param context
* @return new statement
*/
private static Statement literal(String subject, String predicate, String object, String context) {
return VF.createStatement(VF.createIRI(subject), VF.createIRI(predicate), VF.createLiteral(object),
VF.createIRI(context));
}
/**
*
* Create an RDF statement with literal object.
*
* @param subject
* @param predicate
* @param object
* @param dataType
* @param context
* @return new statement
*/
private static Statement literal(String subject, String predicate, String object, IRI dataType, String context) {
return VF.createStatement(VF.createIRI(subject), VF.createIRI(predicate), VF.createLiteral(object, dataType),
VF.createIRI(context));
}
@BeforeEach
public void init() {
csvFormat = NeptuneCsvInputParser.createCSVFormat();
mapping = new PropertyGraph2RdfMapping();
mapping.setTypeNamespace("tn:");
mapping.setVertexNamespace("vn:");
mapping.setEdgeNamespace("en:");
mapping.setVertexPropertyNamespace("vpn:");
mapping.setEdgePropertyNamespace("epn:");
mapping.setDefaultNamedGraph("dng:a");
mapping.setDefaultType("dt:a");
mapping.setDefaultPredicate("dp:a");
mapping.setEdgeContextNamespace("ec:");
mapper = new PropertyGraph2RdfMapper();
mapper.setMapping(mapping);
}
/**
*
* Return a list of statements for the input string.
*
* @param csv CSV formatted input
* @return list of RDF statements
* @throws IOException
* @throws ClassCastException if the CSV did not describe a vertex
*/
private List<Statement> getStatementsForVertex(String csv) throws IOException {
NeptunePropertyGraphElement pgElement = parse(csv);
return mapper.mapToStatements((NeptunePropertyGraphVertex) pgElement);
}
/**
*
* Return a list of statements for the input string.
*
* @param csv CSV formatted input
* @return list of RDF statements
* @throws IOException
* @throws ClassCastException if the CSV did not describe an edge
*/
private List<Statement> getStatementsForEdge(String csv) throws IOException {
NeptunePropertyGraphElement pgElement = parse(csv);
return mapper.mapToStatements((NeptunePropertyGraphEdge) pgElement);
}
/**
*
* Parse a CSV snippet
*
* @param csv
* @return vertex or edge depending on the CSV header
* @throws IOException
*/
private NeptunePropertyGraphElement parse(String csv) throws IOException {
Iterator<CSVRecord> csvRecords = csvFormat.parse(new StringReader(csv)).iterator();
NeptuneCsvHeader header = NeptuneCsvHeader.parse(csvRecords.next());
CSVRecord record = csvRecords.next();
return NeptuneCsvInputParser.create(header, record);
}
@Test
public void vertexWithoutLabel() throws IOException {
List<Statement> statements = getStatementsForVertex("~id,name\n1,x");
assertEquals(2, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "dt:a", "dng:a"), statements.get(0));
assertEquals(literal("vn:1", "vpn:name", "x", "dng:a"), statements.get(1));
}
@Test
public void vertexWithLabel() throws IOException {
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1,mister,x");
assertEquals(2, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
assertEquals(literal("vn:1", "vpn:name", "x", "dng:a"), statements.get(1));
}
@Test
public void vertexWithWhitespaceLabel() throws IOException {
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1, ,x");
assertEquals(2, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "dt:a", "dng:a"), statements.get(0));
assertEquals(literal("vn:1", "vpn:name", "x", "dng:a"), statements.get(1));
}
@Test
public void vertexWithMultipleLabels() throws IOException {
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1,mister;mister2;mister3,x");
assertEquals(4, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister2", "dng:a"), statements.get(1));
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister3", "dng:a"), statements.get(2));
assertEquals(literal("vn:1", "vpn:name", "x", "dng:a"), statements.get(3));
}
@Test
public void vertexWithNullLabel() throws IOException {
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1,,x");
assertEquals(2, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "dt:a", "dng:a"), statements.get(0));
assertEquals(literal("vn:1", "vpn:name", "x", "dng:a"), statements.get(1));
}
@Test
public void nullValueDoesNotCreateVertexProperty() throws IOException {
List<Statement> statements = getStatementsForVertex("~id,name\n1,");
assertEquals(1, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "dt:a", "dng:a"), statements.get(0));
}
@Test
public void emptyValueDoesNotCreateVertexProperty() throws IOException {
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1,mister, \t");
assertEquals(1, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
}
@Test
public void nullValueDoesNotCreateRdfsLabel() throws IOException {
mapping.setPgVertexType2PropertyForRdfsLabel(new HashMap<String, String>() {
{
put("mister", "name");
}
});
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1,mister,");
assertEquals(1, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
}
@Test
public void emptyValueDoesNotCreateRdfsLabel() throws IOException {
mapping.setPgVertexType2PropertyForRdfsLabel(new HashMap<String, String>() {
{
put("mister", "name");
}
});
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1,mister, ");
assertEquals(1, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
}
@Test
public void createOnlyRdfsLabel() throws IOException {
mapping.setPgVertexType2PropertyForRdfsLabel(new HashMap<String, String>() {
{
put("mister", "name");
}
});
mapper.setAlwaysAddPropertyStatements(false);
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1,mister,x");
assertEquals(2, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
assertEquals(literal("vn:1", RDFS.LABEL.stringValue(), "x", "dng:a"), statements.get(1));
}
@Test
public void createOnlyRdfsLabelHavingTwoLabels() throws IOException {
mapping.setPgVertexType2PropertyForRdfsLabel(new HashMap<String, String>() {
{
put("mister", "name");
}
});
mapper.setAlwaysAddPropertyStatements(false);
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1,mister;mister2,x");
assertEquals(3, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister2", "dng:a"), statements.get(1));
assertEquals(literal("vn:1", RDFS.LABEL.stringValue(), "x", "dng:a"), statements.get(2));
}
@Test
public void createOnlyRdfsLabelHavingTwoLabelsAndTwoLabelProperties() throws IOException {
mapping.setPgVertexType2PropertyForRdfsLabel(new HashMap<String, String>() {
{
put("mister", "name");
put("mister2", "name");
}
});
mapper.setAlwaysAddPropertyStatements(false);
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1,mister;mister2,x");
assertEquals(3, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister2", "dng:a"), statements.get(1));
assertEquals(literal("vn:1", RDFS.LABEL.stringValue(), "x", "dng:a"), statements.get(2));
}
@Test
public void createOnlyRdfsLabelHavingTwoLabelsAndTwoDifferentLabelProperties() throws IOException {
mapping.setPgVertexType2PropertyForRdfsLabel(new HashMap<String, String>() {
{
put("mister", "name");
put("mister2", "name2");
}
});
mapper.setAlwaysAddPropertyStatements(false);
List<Statement> statements = getStatementsForVertex("~id,~label,name,name2\n1,mister;mister2,x,x2");
assertEquals(4, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister2", "dng:a"), statements.get(1));
assertEquals(literal("vn:1", RDFS.LABEL.stringValue(), "x", "dng:a"), statements.get(2));
assertEquals(literal("vn:1", RDFS.LABEL.stringValue(), "x2", "dng:a"), statements.get(3));
}
@Test
public void createRdfsLabelAndProperty() throws IOException {
mapping.setPgVertexType2PropertyForRdfsLabel(new HashMap<String, String>() {
{
put("mister", "name");
}
});
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1,mister,x");
assertEquals(3, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
assertEquals(literal("vn:1", RDFS.LABEL.stringValue(), "x", "dng:a"), statements.get(1));
assertEquals(literal("vn:1", "vpn:name", "x", "dng:a"), statements.get(2));
}
@Test
public void createRdfsLabelAndPropertyHavingTwoLabels() throws IOException {
mapping.setPgVertexType2PropertyForRdfsLabel(new HashMap<String, String>() {
{
put("mister", "name");
}
});
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1,mister;mister2,x");
assertEquals(4, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister2", "dng:a"), statements.get(1));
assertEquals(literal("vn:1", RDFS.LABEL.stringValue(), "x", "dng:a"), statements.get(2));
assertEquals(literal("vn:1", "vpn:name", "x", "dng:a"), statements.get(3));
}
@Test
public void createRdfsLabelAndPropertyHavingTwoLabelsAndTwoLabelProperties() throws IOException {
mapping.setPgVertexType2PropertyForRdfsLabel(new HashMap<String, String>() {
{
put("mister", "name");
put("mister2", "name");
}
});
List<Statement> statements = getStatementsForVertex("~id,~label,name\n1,mister;mister2,x");
assertEquals(4, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister2", "dng:a"), statements.get(1));
assertEquals(literal("vn:1", RDFS.LABEL.stringValue(), "x", "dng:a"), statements.get(2));
assertEquals(literal("vn:1", "vpn:name", "x", "dng:a"), statements.get(3));
}
@Test
public void createRdfsLabelAndPropertyHavingTwoLabelsAndTwoDifferentLabelProperties() throws IOException {
mapping.setPgVertexType2PropertyForRdfsLabel(new HashMap<String, String>() {
{
put("mister", "name");
put("mister2", "name2");
}
});
List<Statement> statements = getStatementsForVertex("~id,~label,name,name2\n1,mister;mister2,x,x2");
assertEquals(6, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister2", "dng:a"), statements.get(1));
assertEquals(literal("vn:1", RDFS.LABEL.stringValue(), "x", "dng:a"), statements.get(2));
assertEquals(literal("vn:1", "vpn:name", "x", "dng:a"), statements.get(3));
assertEquals(literal("vn:1", RDFS.LABEL.stringValue(), "x2", "dng:a"), statements.get(4));
assertEquals(literal("vn:1", "vpn:name2", "x2", "dng:a"), statements.get(5));
}
@Test
public void propertyIsEncoded() throws IOException {
List<Statement> statements = getStatementsForVertex("~id,~label,{Heizölrückstoßabdämpfung}\n1,mister,x");
assertEquals(2, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:Mister", "dng:a"), statements.get(0));
assertEquals(literal("vn:1", "vpn:%7BHeiz%C3%B6lr%C3%BCcksto%C3%9Fabd%C3%A4mpfung%7D", "x", "dng:a"),
statements.get(1));
}
@Test
public void edgeWithLabel() throws IOException {
List<Statement> statements = getStatementsForEdge("~id,~label,~from,~to,name\n1,related,2,3,x");
assertEquals(2, statements.size());
assertEquals(relation("vn:2", "en:related", "vn:3", "ec:1"), statements.get(0));
assertEquals(literal("ec:1", "epn:name", "x", "dng:a"), statements.get(1));
}
@Test
public void edgeWithNullLabel() throws IOException {
List<Statement> statements = getStatementsForEdge("~id,~label,~from,~to,name\n1,,2,3,x");
assertEquals(2, statements.size());
assertEquals(relation("vn:2", "dp:a", "vn:3", "ec:1"), statements.get(0));
assertEquals(literal("ec:1", "epn:name", "x", "dng:a"), statements.get(1));
}
@Test
public void edgeWithWhitespaceLabel() throws IOException {
List<Statement> statements = getStatementsForEdge("~id,~label,~from,~to,name\n1, ,2,3,x");
assertEquals(2, statements.size());
assertEquals(relation("vn:2", "dp:a", "vn:3", "ec:1"), statements.get(0));
assertEquals(literal("ec:1", "epn:name", "x", "dng:a"), statements.get(1));
}
@Test
public void edgeDoesNotSplitLabel() throws IOException {
List<Statement> statements = getStatementsForEdge(
"~id,~label,~from,~to,name\n1,related;related2;related3,2,3,x");
assertEquals(2, statements.size());
assertEquals(relation("vn:2", "en:related%3Brelated2%3Brelated3", "vn:3", "ec:1"), statements.get(0));
assertEquals(literal("ec:1", "epn:name", "x", "dng:a"), statements.get(1));
}
@Test
public void nullValueDoesNotCreateEdgeProperty() throws IOException {
List<Statement> statements = getStatementsForEdge("~id,~label,~from,~to,name\n1,related,2,3,");
assertEquals(1, statements.size());
assertEquals(relation("vn:2", "en:related", "vn:3", "ec:1"), statements.get(0));
}
@Test
public void emptyValueDoesNotCreateEdgeProperty() throws IOException {
List<Statement> statements = getStatementsForEdge("~id,~label,name,~from,~to\n1,related, ,2,3");
assertEquals(1, statements.size());
assertEquals(relation("vn:2", "en:related", "vn:3", "ec:1"), statements.get(0));
}
@Test
public void tolerateTooFewRecordColumns() throws IOException {
List<Statement> statements = getStatementsForEdge("~id,~label,~from,~to,name\n1,a,2,3");
assertEquals(1, statements.size());
assertEquals(relation("vn:2", "en:a", "vn:3", "ec:1"), statements.get(0));
}
@Test
public void tolerateTooManyRecordColumns() throws IOException {
List<Statement> statements = getStatementsForEdge("~id,~label,~from,~to,name\n1,a,2,3,Alice,Bob");
assertEquals(2, statements.size());
assertEquals(relation("vn:2", "en:a", "vn:3", "ec:1"), statements.get(0));
assertEquals(literal("ec:1", "epn:name", "Alice", "dng:a"), statements.get(1));
}
@Test
public void allDataTypes() throws IOException {
List<Statement> statements = getStatementsForVertex(
"~id,~label,byte:byte,bool:bool,boolean:boolean,short:short,int:int,integer:integer,long:long,float:float,double:double,string:string,datetime:datetime,date:date"
+ "\n1,dataTypes,A0,true,false,32767,2147483647,-2147483648,9223372036854775807,3.14,3.1415926,Hello World!,2020-03-05T13:30:00Z,2020-02-29");
assertEquals(13, statements.size());
assertEquals(relation("vn:1", RDF.TYPE.stringValue(), "tn:DataTypes", "dng:a"), statements.get(0));
assertEquals(literal("vn:1", "vpn:byte", "A0", XMLSchema.BYTE, "dng:a"), statements.get(1));
assertEquals(literal("vn:1", "vpn:bool", "true", XMLSchema.BOOLEAN, "dng:a"), statements.get(2));
assertEquals(literal("vn:1", "vpn:boolean", "false", XMLSchema.BOOLEAN, "dng:a"), statements.get(3));
assertEquals(literal("vn:1", "vpn:short", "32767", XMLSchema.SHORT, "dng:a"), statements.get(4));
assertEquals(literal("vn:1", "vpn:int", "2147483647", XMLSchema.INTEGER, "dng:a"), statements.get(5));
assertEquals(literal("vn:1", "vpn:integer", "-2147483648", XMLSchema.INTEGER, "dng:a"), statements.get(6));
assertEquals(literal("vn:1", "vpn:long", "9223372036854775807", XMLSchema.LONG, "dng:a"), statements.get(7));
assertEquals(literal("vn:1", "vpn:float", "3.14", XMLSchema.FLOAT, "dng:a"), statements.get(8));
assertEquals(literal("vn:1", "vpn:double", "3.1415926", XMLSchema.DOUBLE, "dng:a"), statements.get(9));
assertEquals(literal("vn:1", "vpn:string", "Hello World!", XMLSchema.STRING, "dng:a"), statements.get(10));
assertEquals(literal("vn:1", "vpn:datetime", "2020-03-05T13:30:00Z", XMLSchema.DATE, "dng:a"),
statements.get(11));
assertEquals(literal("vn:1", "vpn:date", "2020-02-29", XMLSchema.DATE, "dng:a"), statements.get(12));
}
}
| 7,433 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/Csv2RdfIntegrationTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Iterator;
import java.util.List;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.opentest4j.AssertionFailedError;
import picocli.CommandLine;
@Tag("IntegrationTest")
public class Csv2RdfIntegrationTest {
private static final String AIR_ROUTES_VERSION = "0.81";
private static final Path AIR_ROUTES = Paths.get("src", "test", "air-routes");
private static final Path INTEGRATION_TEST = Paths.get("src", "test", "integration-test");
private static final Path CARDINALITY_TEST = Paths.get("src", "test", "cardinality-test");
private static final Path EXAMPLE = Paths.get("src", "test", "example");
private static final Path TARGET = Paths.get("target");
private static final Path UNZIPPED_AIR_ROUTES = TARGET.resolve("air-routes-" + AIR_ROUTES_VERSION);
@Test
public void compareAirRoutesOutputFiles() throws IOException {
unzip(AIR_ROUTES.resolve("air-routes-" + AIR_ROUTES_VERSION + ".zip"), UNZIPPED_AIR_ROUTES);
int exitCode = new CommandLine(new Csv2Rdf()).execute("-c",
AIR_ROUTES.resolve("air-routes.properties").toString(), "-i", UNZIPPED_AIR_ROUTES.toString(), "-o",
TARGET.toString());
Assertions.assertEquals(0, exitCode);
this.assertThatAllLinesInFilesAreEqual(
UNZIPPED_AIR_ROUTES.resolve("air-routes-" + AIR_ROUTES_VERSION + "-edges.nq.master"),
TARGET.resolve("air-routes-" + AIR_ROUTES_VERSION + "-edges.nq"));
this.assertThatAllLinesInFilesAreEqual(
UNZIPPED_AIR_ROUTES.resolve("air-routes-" + AIR_ROUTES_VERSION + "-nodes.nq.master"),
TARGET.resolve("air-routes-" + AIR_ROUTES_VERSION + "-nodes.nq"));
}
@Test
public void smallIntegrationTest() throws IOException {
int exitCode = new CommandLine(new Csv2Rdf()).execute("-c",
INTEGRATION_TEST.resolve("integration-test.properties").toString(), "-i", INTEGRATION_TEST.toString(),
"-o", TARGET.toString());
assertEquals(0, exitCode);
this.assertThatAllLinesInFilesAreEqual(INTEGRATION_TEST.resolve("integration-test-edges.nq.master"),
TARGET.resolve("integration-test-edges.nq"));
this.assertThatAllLinesInFilesAreEqual(INTEGRATION_TEST.resolve("integration-test-nodes.nq.master"),
TARGET.resolve("integration-test-nodes.nq"));
}
@Test
public void smallIntegrationTestUsingDefaultConfiguration() throws IOException {
Path nestedSubdiretcory = TARGET.resolve("nested-integration").resolve("subdirectory");
assertFalse(Files.exists(nestedSubdiretcory.getParent()));
AssertionFailedError assertionFailedError = null;
try {
int exitCode = new CommandLine(new Csv2Rdf()).execute("-i", INTEGRATION_TEST.toString(), "-o",
nestedSubdiretcory.toString());
assertEquals(0, exitCode);
this.assertThatAllLinesInFilesAreEqual(INTEGRATION_TEST.resolve("integration-test-edges.nq.master.default"),
nestedSubdiretcory.resolve("integration-test-edges.nq"));
this.assertThatAllLinesInFilesAreEqual(INTEGRATION_TEST.resolve("integration-test-nodes.nq.master.default"),
nestedSubdiretcory.resolve("integration-test-nodes.nq"));
} catch (AssertionFailedError e) {
assertionFailedError = e;
} finally {
try {
assertDoesNotThrow(() -> Files.delete(nestedSubdiretcory.resolve("integration-test-edges.nq")));
assertDoesNotThrow(() -> Files.delete(nestedSubdiretcory.resolve("integration-test-nodes.nq")));
assertDoesNotThrow(() -> Files.delete(nestedSubdiretcory));
assertDoesNotThrow(() -> Files.delete(nestedSubdiretcory.getParent()));
} catch (AssertionFailedError e2) {
if (assertionFailedError != null) {
assertionFailedError.addSuppressed(e2);
throw assertionFailedError;
}
throw e2;
}
if (assertionFailedError != null) {
throw assertionFailedError;
}
}
}
@Test
public void smallExample() throws IOException {
int exitCode = new CommandLine(new Csv2Rdf()).execute("-c", EXAMPLE.resolve("city.properties").toString(), "-i",
EXAMPLE.toString(), "-o", TARGET.toString());
assertEquals(0, exitCode);
this.assertThatAllLinesInFilesAreEqual(EXAMPLE.resolve("city-edges.nq.master"),
TARGET.resolve("city-edges.nq"));
this.assertThatAllLinesInFilesAreEqual(EXAMPLE.resolve("city-nodes.nq.master"),
TARGET.resolve("city-nodes.nq"));
}
/**
*
* Currently, each line of the CSV file is parsed individually, so property
* values defined on different lines for the same ID are not joined and
* cardinality constraints cannot be checked:
* <ul>
* <li>The statement {@code <vertex:1> <eproperty:since> "tomorrow" <dng:/>}
* should be rejected because egde properties have single cardinality.</li>
* <li>The result of the test should contain only one
* {@code <vertex:2> <edge:knows> <vertex:3> <vertex:1>} statement (however, RDF
* joins multiple equal statements into one).</li>
* <li>The statement
* {@code <vertex:3> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <dt:/> <dng:/>}
* should not be generated because vertex 3 has a label.</li>
* </ul>
*
* @throws IOException
*/
@Test
public void cardinalityTest() throws IOException {
int exitCode = new CommandLine(new Csv2Rdf()).execute("-c", EXAMPLE.resolve("city.properties").toString(), "-i",
CARDINALITY_TEST.toString(), "-o", TARGET.toString());
assertEquals(0, exitCode);
this.assertThatAllLinesInFilesAreEqual(CARDINALITY_TEST.resolve("cardinality-test-edges.nq.master"),
TARGET.resolve("cardinality-test-edges.nq"));
this.assertThatAllLinesInFilesAreEqual(CARDINALITY_TEST.resolve("cardinality-test-nodes.nq.master"),
TARGET.resolve("cardinality-test-nodes.nq"));
}
/**
*
* Assert that two files are equal comparing line by line
*
* @param expected file with expected content
* @param actual file with actual content
* @throws IOException
*/
private void assertThatAllLinesInFilesAreEqual(Path expected, Path actual) throws IOException {
List<String> expectedLines = Files.readAllLines(expected);
List<String> actualLines = Files.readAllLines(actual);
Iterator<String> expectedIt = expectedLines.iterator();
Iterator<String> actualIt = actualLines.iterator();
while (expectedIt.hasNext() && actualIt.hasNext()) {
assertEquals(expectedIt.next(), actualIt.next());
}
assertFalse(expectedIt.hasNext());
assertFalse(actualIt.hasNext());
}
/**
*
* Unzip an archive
*
* @param zipFile input file
* @param outputDirectory output directory, is created if it does not exist
* @throws IOException
*/
private void unzip(Path zipFile, Path outputDirectory) throws IOException {
try {
Files.createDirectory(outputDirectory);
} catch (FileAlreadyExistsException e) {
// continue
}
byte[] buffer = new byte[1024];
try (ZipInputStream zis = new ZipInputStream(new FileInputStream(zipFile.toFile()))) {
ZipEntry zipEntry = zis.getNextEntry();
while (zipEntry != null) {
try (FileOutputStream fos = new FileOutputStream(
outputDirectory.resolve(zipEntry.getName()).toFile());) {
int len;
while ((len = zis.read(buffer)) > 0) {
fos.write(buffer, 0, len);
}
}
zis.closeEntry();
zipEntry = zis.getNextEntry();
}
}
}
}
| 7,434 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/PropertyGraph2RdfConverterTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.regex.Pattern;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
public class PropertyGraph2RdfConverterTest {
private static final Path INPUT_DIRECTORY = Paths.get("src", "test", "inputDirectoryTest");
private static final Path TARGET = Paths.get("target");
private PropertyGraph2RdfConverter converter;
@BeforeEach
public void init() {
converter = new PropertyGraph2RdfConverter(null);
}
@Test
public void readInputDirectoryWithoutTrailingPathSeparator() {
String inputDirectoryString = INPUT_DIRECTORY.toString();
assertFalse(inputDirectoryString.endsWith(File.separator));
List<File> files = converter.listPropertyGraphFiles(new File(inputDirectoryString));
assertEquals(2, files.size());
assertTrue(files.contains(INPUT_DIRECTORY.resolve("test1.csv").toFile()));
assertTrue(files.contains(INPUT_DIRECTORY.resolve("test2.csv").toFile()));
}
@Test
public void readInputDirectoryWithTrailingPathSeparator() {
String inputDirectoryString = INPUT_DIRECTORY.toString();
assertFalse(inputDirectoryString.endsWith(File.separator));
inputDirectoryString += File.separator;
List<File> files = converter.listPropertyGraphFiles(new File(inputDirectoryString));
assertEquals(2, files.size());
assertTrue(files.contains(INPUT_DIRECTORY.resolve("test1.csv").toFile()));
assertTrue(files.contains(INPUT_DIRECTORY.resolve("test2.csv").toFile()));
}
@Test
public void customInputFileExtension() {
converter.setInputFileExtension("txt");
List<File> files = converter.listPropertyGraphFiles(INPUT_DIRECTORY.toFile());
assertEquals(1, files.size());
assertTrue(files.contains(INPUT_DIRECTORY.resolve("ignore.txt").toFile()));
}
@Test
public void inputDirectoryEmpty() {
File emptyDir = TARGET.resolve("empty-dir").toFile();
emptyDir.mkdir();
Csv2RdfException exception = assertThrows(Csv2RdfException.class,
() -> converter.listPropertyGraphFiles(emptyDir));
assertTrue(exception.getMessage()
.matches("No files with extension csv found at: .*" + Pattern.quote(emptyDir.getPath())));
}
@Test
public void inputDirectoryDoesNotExist() {
File nonExistingDir = TARGET.resolve("non-existing-dir").toFile();
Csv2RdfException exception = assertThrows(Csv2RdfException.class,
() -> converter.listPropertyGraphFiles(nonExistingDir));
assertTrue(exception.getMessage()
.matches("Could not read from input directory: .*" + Pattern.quote(nonExistingDir.getPath())));
}
@Test
public void getOutputFileWithoutTrailingPathSeparator() {
String outputDirectoryString = TARGET.toString();
assertFalse(outputDirectoryString.endsWith(File.separator));
File outputFile = converter.getRdfFile(new File(outputDirectoryString), new File("test.csv"));
assertEquals(TARGET.resolve("test.nq").toFile(), outputFile);
}
@Test
public void getOutputFileWithTrailingPathSeparator() {
String outputDirectoryString = TARGET.toString();
assertFalse(outputDirectoryString.endsWith(File.separator));
outputDirectoryString += File.separator;
File outputFile = converter.getRdfFile(new File(outputDirectoryString), new File("test.csv"));
assertEquals(TARGET.resolve("test.nq").toFile(), outputFile);
}
}
| 7,435 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/test/java/software/amazon/neptune/csv2rdf/NeptuneCsvInputParserTest.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.nio.file.Paths;
import java.util.Collection;
import java.util.List;
import org.junit.jupiter.api.Test;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptuneCsvSetValuedUserDefinedProperty;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptuneCsvUserDefinedProperty;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptunePropertyGraphVertex;
import software.amazon.neptune.csv2rdf.NeptuneCsvUserDefinedColumn.DataType;
public class NeptuneCsvInputParserTest {
@Test
public void emptyCsvIsRejected() {
File empty = Paths.get("src", "test", "inputParserTest", "empty.csv").toFile();
Csv2RdfException exception = assertThrows(Csv2RdfException.class, () -> new NeptuneCsvInputParser(empty));
assertEquals("No header column found in input CSV file!", exception.getMessage());
}
@Test
public void validUtf8IsSuccessful() {
File invalidUtf8 = Paths.get("src", "test", "inputParserTest", "valid-utf8.csv").toFile();
try (NeptuneCsvInputParser parser = new NeptuneCsvInputParser(invalidUtf8)) {
NeptuneCsvUserDefinedProperty property = ((NeptunePropertyGraphVertex) parser.next())
.getUserDefinedProperties().get(0);
assertEquals("Bärbel", ((NeptuneCsvSetValuedUserDefinedProperty) property).getValues().iterator().next());
}
}
@Test
public void invalidUtf8CharacterIsReplaced() {
File invalidUtf8 = Paths.get("src", "test", "inputParserTest", "iso-8859-15-with-E4-8bit-character.csv")
.toFile();
try (NeptuneCsvInputParser parser = new NeptuneCsvInputParser(invalidUtf8)) {
NeptuneCsvUserDefinedProperty property = ((NeptunePropertyGraphVertex) parser.next())
.getUserDefinedProperties().get(0);
assertEquals("B�rbel", ((NeptuneCsvSetValuedUserDefinedProperty) property).getValues().iterator().next());
}
}
@Test
public void missingFileIsRejected() {
File notExistingFile = Paths.get("src", "test", "inputParserTest", "not-existing.csv").toFile();
Csv2RdfException exception = assertThrows(Csv2RdfException.class,
() -> new NeptuneCsvInputParser(notExistingFile));
assertTrue(exception.getMessage()
.matches("Error creating input stream for CSV file .*" + notExistingFile.getPath()));
}
@Test
public void openFromInputStream() throws FileNotFoundException {
File validUtf8 = Paths.get("src", "test", "inputParserTest", "valid-utf8.csv").toFile();
FileInputStream ins = new FileInputStream(validUtf8);
try (NeptuneCsvInputParser parser = new NeptuneCsvInputParser(ins)) {
NeptuneCsvUserDefinedProperty property = ((NeptunePropertyGraphVertex) parser.next())
.getUserDefinedProperties().get(0);
assertEquals("Bärbel", ((NeptuneCsvSetValuedUserDefinedProperty) property).getValues().iterator().next());
}
}
@Test
public void escapedSemicolon() {
File escapedSemi = Paths.get("src", "test", "inputParserTest", "escaped-semicolon.csv").toFile();
try (NeptuneCsvInputParser parser = new NeptuneCsvInputParser(escapedSemi)) {
NeptunePropertyGraphVertex v = (NeptunePropertyGraphVertex) parser.next();
List<NeptuneCsvUserDefinedProperty> props = v.getUserDefinedProperties();
Collection<String> namesValues = props.get(0).getValues();
Collection<String> labels = v.getLabels();
assertTrue(labels.contains("person;"));
assertTrue(labels.contains("boss"));
assertEquals(labels.size(), 2);
assertEquals(namesValues.size(), 2);
assertTrue(namesValues.contains("John;Smith"));
assertTrue(namesValues.contains("Jane;Smith"));
}
}
@Test
public void escapedColonInHeader() {
File escapedColon = Paths.get("src", "test", "inputParserTest", "colon-in-header.csv").toFile();
try (NeptuneCsvInputParser parser = new NeptuneCsvInputParser(escapedColon)) {
NeptunePropertyGraphVertex v = (NeptunePropertyGraphVertex) parser.next();
List<NeptuneCsvUserDefinedProperty> props = v.getUserDefinedProperties();
String propName1 = props.get(0).getName();
String propName2 = props.get(1).getName();
String propName3 = props.get(2).getName();
String propName4 = props.get(3).getName();
DataType dt1 = props.get(0).getDataType();
DataType dt2 = props.get(1).getDataType();
DataType dt3 = props.get(2).getDataType();
DataType dt4 = props.get(3).getDataType();
assertEquals("http://example.com/age", propName1);
assertEquals("multi:backslash\\\\:header", propName2);
assertEquals("noDataType:", propName3);
assertEquals("having\\back\\slashes:end", propName4);
assertEquals(DataType.INT, dt1);
assertEquals(DataType.STRING, dt2);
assertEquals(DataType.STRING, dt3);
assertEquals(DataType.INT, dt4);
}
}
}
| 7,436 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune/csv2rdf/UriPostTransformer.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.eclipse.rdf4j.model.IRI;
import org.eclipse.rdf4j.model.Literal;
import org.eclipse.rdf4j.model.Resource;
import org.eclipse.rdf4j.model.Statement;
import org.eclipse.rdf4j.model.Value;
import org.eclipse.rdf4j.model.impl.SimpleValueFactory;
import org.eclipse.rdf4j.rio.RDFHandlerException;
import org.eclipse.rdf4j.rio.RDFParseException;
import org.eclipse.rdf4j.rio.RDFParser;
import org.eclipse.rdf4j.rio.RDFWriter;
import org.eclipse.rdf4j.rio.Rio;
import org.eclipse.rdf4j.rio.UnsupportedRDFormatException;
import org.eclipse.rdf4j.rio.helpers.AbstractRDFHandler;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
/**
*
* This class applies transformations specified in zero to many
* {@link UriPostTransformation} to the RDF resource IRIs resulting from the
* basic mapping performed by {@link PropertyGraph2RdfMapper}.
*
* The transformations can be defined in the configuration file.
*
*/
@Slf4j
@JsonAutoDetect(fieldVisibility = Visibility.NONE, getterVisibility = Visibility.NONE)
public class UriPostTransformer {
private final SimpleValueFactory vf = SimpleValueFactory.getInstance();
/**
* List of rewriting rules for RDF resource IRIs.
*/
@Getter
@Setter
private Collection<UriPostTransformation> uriPostTransformations = new ArrayList<>();
/**
*
* @param files The list of RDF files that need to be transformed.
* @param baseUri used for resolving relative URIs
* @throws Csv2RdfException if the transformation fails
*/
public void applyTo(List<File> files, String baseUri) {
if (uriPostTransformations.isEmpty()) {
return;
}
log.info("-> Applying URI post transformations...");
for (File file : files) {
extractUriTransformationResourcesAndReplacementValues(file, baseUri);
}
for (File file : files) {
transformResources(file, baseUri);
}
}
/**
* Read each RDF statement and store its resources and/or literal values if they
* are matching a transformation rule.
*
* @param file RDF file, will not be changed
* @param baseUri used for resolving relative URIs
*/
private void extractUriTransformationResourcesAndReplacementValues(File file, String baseUri) {
log.info("--> Extracting URI transformation resources and replacement values from " + file.getName() + "...");
try (FileInputStream fis = new FileInputStream(file)) {
RDFParser rdfParser = Rio.createParser(PropertyGraph2RdfConverter.RDF_FORMAT);
rdfParser.setRDFHandler(new AbstractRDFHandler() {
@Override
public void handleStatement(Statement statement) {
register(statement);
}
});
rdfParser.parse(fis, baseUri);
} catch (UnsupportedRDFormatException | RDFHandlerException | RDFParseException | IOException e) {
throw new Csv2RdfException("Extracting URI transformation resources and replacement values from "
+ file.getAbsolutePath() + " failed.", e);
}
}
/**
*
* Register all resources and the literal of a statement at all transformation
* rules.
*
* @param statement
*/
// visible for testing
void register(Statement statement) {
for (UriPostTransformation uriPostTransformation : uriPostTransformations) {
// register the URIs (may be a no-op)
if (statement.getSubject() instanceof IRI && statement.getPredicate() instanceof IRI
&& statement.getObject() instanceof IRI) {
uriPostTransformation.registerResource(statement.getSubject().stringValue(),
statement.getPredicate().stringValue(), statement.getObject().stringValue());
}
// register the value (may be a no-op)
if (statement.getSubject() instanceof IRI && statement.getPredicate() instanceof IRI
&& statement.getObject() instanceof Literal) {
uriPostTransformation.registerReplacementValue(statement.getSubject().stringValue(),
statement.getPredicate().stringValue(), statement.getObject().stringValue());
}
}
}
/**
* Read all RDF statements and rewrite their IRI if applicable. <br>
* Has no effect for resources whose IRI did not match any transformation rule
* in
* {@link UriPostTransformer#extractUriTransformationResourcesAndReplacementValues}
* or if no replacement value was found.
*
* @param file RDF file, will be changed during the process
* @param baseUri used for resolving relative URIs
*/
private void transformResources(File file, String baseUri) {
log.info("--> Transforming resources in " + file.getName() + "...");
File transformedFile = new File(file.getParentFile(), "transformed." + file.getName());
try (FileOutputStream fos = new FileOutputStream(transformedFile);
FileInputStream fis = new FileInputStream(file)) {
final RDFWriter rdfWriter = Rio.createWriter(PropertyGraph2RdfConverter.RDF_FORMAT, fos);
rdfWriter.startRDF();
RDFParser rdfParser = Rio.createParser(PropertyGraph2RdfConverter.RDF_FORMAT);
rdfParser.setRDFHandler(new AbstractRDFHandler() {
@Override
public void handleStatement(Statement statement) {
Statement statement2 = transform(statement);
rdfWriter.handleStatement(statement2);
}
});
rdfParser.parse(fis, baseUri);
rdfWriter.endRDF();
} catch (UnsupportedRDFormatException | RDFHandlerException | RDFParseException | IOException e) {
throw new Csv2RdfException("Applying URI transformation to file " + file.getAbsolutePath() + " failed.", e);
}
if (!transformedFile.renameTo(file)) {
throw new Csv2RdfException("Transformed file " + transformedFile.getName() + " could not be renamed to: "
+ file.getAbsolutePath());
}
}
/**
*
* Apply the transformation rules to the resources of a statement. The first
* matching rule makes the change.
*
* @param statement
* @return a new statement with transformed resources, resources that do not
* match a rule are not modified; if no resource matched a rule the
* incoming statement is returned
* @throws Csv2RdfException if the IRI cannot be created
*/
// visible for testing
Statement transform(Statement statement) {
Resource newSubject = null;
IRI newPredicate = null;
Value newObject = null;
Resource newContext = null;
for (UriPostTransformation uriPostTransformation : uriPostTransformations) {
// register the URIs (may be a no-op)
if (newSubject == null && statement.getSubject() instanceof IRI) {
newSubject = uriPostTransformation.apply(statement.getSubject().stringValue());
}
if (newPredicate == null && statement.getPredicate() instanceof IRI) {
newPredicate = uriPostTransformation.apply(statement.getPredicate().stringValue());
}
if (newObject == null && statement.getObject() instanceof IRI) {
newObject = uriPostTransformation.apply(statement.getObject().stringValue());
}
if (newContext == null && statement.getContext() instanceof IRI) {
newContext = uriPostTransformation.apply(statement.getContext().stringValue());
}
}
if (newSubject == null && newPredicate == null && newObject == null && newContext == null) {
return statement;
}
Resource subject = newSubject == null ? statement.getSubject() : newSubject;
IRI predicate = newPredicate == null ? statement.getPredicate() : newPredicate;
Value object = newObject == null ? statement.getObject() : newObject;
Resource context = newContext == null ? statement.getContext() : newContext;
return vf.createStatement(subject, predicate, object, context);
}
} | 7,437 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune/csv2rdf/NeptuneCsvInputParser.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.Reader;
import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets;
import java.util.Iterator;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVParser;
import org.apache.commons.csv.CSVRecord;
import org.apache.commons.csv.QuoteMode;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import software.amazon.neptune.csv2rdf.NeptuneCsvHeader.NeptuneCsvEdgeHeader;
import software.amazon.neptune.csv2rdf.NeptuneCsvHeader.NeptuneCsvVertexHeader;
import software.amazon.neptune.csv2rdf.NeptuneCsvUserDefinedColumn.Cardinality;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptuneCsvSetValuedUserDefinedProperty;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptuneCsvSingleValuedUserDefinedProperty;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptuneCsvUserDefinedArrayProperty;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptunePropertyGraphEdge;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptunePropertyGraphVertex;
/**
*
* Parser for the Neptune CSV property graph format.
* {@link NeptuneCsvInputParser#next()} iterates over all vertices or edges in
* the source file.
*
*/
@Slf4j
public class NeptuneCsvInputParser implements AutoCloseable, Iterator<NeptunePropertyGraphElement> {
/**
*
* CSV parser for the Neptune property graph file
*/
private final CSVParser csvParser;
/**
*
* header of the CSV file (first row)
*/
private final NeptuneCsvHeader header;
/**
*
* the record iterator, which will return the data (non-header) records of the
* file
*/
private final Iterator<CSVRecord> iterator;
/**
*
* Sets up a {@link CSVRecord} iterator over the input file and parses the first
* row as header.
*
* @param file CSV input file
* @throws Csv2RdfException if the file cannot be opened
* @throws Csv2RdfException if the CSV parser cannot be created
* @throws Csv2RdfException if there is no header column in the data
*/
public NeptuneCsvInputParser(final File file) {
try {
InputStream ins = new FileInputStream(file);
this.csvParser = setupParser(createInputStreamReader(ins));
this.iterator = this.csvParser.iterator();
this.header = setupHeader();
} catch (IOException e) {
throw new Csv2RdfException("Error creating input stream for CSV file " + file.getAbsolutePath(), e);
}
}
/**
* Sets up a {@link CSVRecord} iterator over the input stream and parse the first
* row as header.
*
* @param ins CSV input stream
* @throws Csv2RdfException if the CSV parser cannot be created
* @throws Csv2RdfException if there is no header column in the data
*/
public NeptuneCsvInputParser(final InputStream ins) {
this.csvParser = setupParser(createInputStreamReader(ins));
this.iterator = this.csvParser.iterator();
this.header = setupHeader();
}
/**
*
* Create the format for parsing the CSV file according to RFC4180 with:
* <ul>
* <li>ignore empty lines</li>
* <li>ignore surrounding spaces</li>
* <li>empty string means {@code null}</li>
* <li>minimal quotes</li>
* </ul>
*
* @return CSV format
*/
// visible for testing
static CSVFormat createCSVFormat() {
return CSVFormat.RFC4180.withIgnoreEmptyLines(true).withIgnoreSurroundingSpaces(true).withNullString("")
.withQuoteMode(QuoteMode.MINIMAL);
}
@Override
public void close() {
if (this.csvParser != null) {
try {
this.csvParser.close();
} catch (IOException e) {
throw new Csv2RdfException("Parser could not be closed.", e);
}
}
}
/**
*
* Sets up and returns the record parser, positioned at the beginning of the
* file.
*
* @param reader a reader for the input CSV file
* @return CSV parser
*/
private CSVParser setupParser(@NonNull final Reader reader) {
try {
CSVFormat csvFormat = createCSVFormat();
return csvFormat.parse(reader);
} catch (final IOException e) {
try {
reader.close();
} catch (IOException e1) {
e.addSuppressed(new Csv2RdfException(
"Error setting up CSV parser, reader is supposed to close but could not be closed.", e1));
}
throw new Csv2RdfException("Error setting up CSV parser.", e);
}
}
/**
*
* Initializes the header column, using the iterator's current position. This
* must be called exactly once at the beginning. The iterator is advanced by one
* line.
*
* @return the parsed header
*/
private NeptuneCsvHeader setupHeader() {
if (!iterator.hasNext()) {
throw new Csv2RdfException("No header column found in input CSV file!");
}
final CSVRecord record = iterator.next();
return NeptuneCsvHeader.parse(record);
}
/**
*
* Create an input stream reader over the given input stream.
*
* @param ins the Neptune CSV property graph input stream
* @return input stream reader
*/
private Reader createInputStreamReader(@NonNull final InputStream ins) {
BufferedInputStream bufferedStream = null;
try {
bufferedStream = new BufferedInputStream(ins);
return new InputStreamReader(bufferedStream, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
if (bufferedStream != null) {
try {
bufferedStream.close();
} catch (IOException e1) {
e.addSuppressed(new Csv2RdfException(
"Encoding not supported for decoding, stream is supposed to close but could not be closed.",
e1));
}
}
throw new Csv2RdfException("Encoding not supported for decoding input stream", e);
}
}
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public NeptunePropertyGraphElement next() {
CSVRecord record = this.iterator.next();
return create(header, record);
}
/**
*
* Create a vertex or an edge
*
* @param header
* @param record
* @return the created vertex or edge
* @throws Csv2RdfException if the vertex or edge is not valid
*/
// visible for testing
static NeptunePropertyGraphElement create(NeptuneCsvHeader header, CSVRecord record) {
if (header instanceof NeptuneCsvEdgeHeader) {
return create((NeptuneCsvEdgeHeader) header, record);
}
if (header instanceof NeptuneCsvVertexHeader) {
return create((NeptuneCsvVertexHeader) header, record);
}
throw new IllegalArgumentException("Header type not recognized: " + header.getClass());
}
/**
*
* Get a value from the CSV record
*
* @param record
* @param index
* @return value at index or {@code null} if the index is out of bounds of the
* record
*/
private static String getValueIfExists(CSVRecord record, int index) {
if (index >= record.size()) {
log.debug("CSV record does not contain field {}.", index);
return null;
}
return record.get(index);
}
/**
*
* Create an edge
*
* @param header
* @param record
* @return new edge
* @throws Csv2RdfException if the edge is not valid
*/
private static NeptunePropertyGraphEdge create(NeptuneCsvEdgeHeader header, CSVRecord record) {
String id = header.getId() == null ? null : getValueIfExists(record, header.getId());
String from = getValueIfExists(record, header.getFrom());
String to = getValueIfExists(record, header.getTo());
String label = getValueIfExists(record, header.getLabel());
NeptunePropertyGraphEdge edge = new NeptunePropertyGraphEdge(id, from, to, label);
for (NeptuneCsvUserDefinedColumn userDefinedType : header.getUserDefinedTypes()) {
if (userDefinedType.getCardinality() == Cardinality.SET) {
throw new Csv2RdfException("Set-valued types are not allowed for edges: " + userDefinedType.getName());
}
String fieldValue = getValueIfExists(record, userDefinedType.getIndex());
if (fieldValue == null || fieldValue.isEmpty()) {
continue;
}
edge.add(new NeptuneCsvSingleValuedUserDefinedProperty(userDefinedType.getName(),
userDefinedType.getDataType(), fieldValue));
}
return edge;
}
/**
*
* Create a vertex
*
* @param header
* @param record
* @return new edge
* @throws Csv2RdfException if the vertex is not valid
*/
private static NeptunePropertyGraphVertex create(NeptuneCsvVertexHeader header, CSVRecord record) {
String id = header.getId() == null ? null : getValueIfExists(record, header.getId());
NeptunePropertyGraphVertex vertex = new NeptunePropertyGraphVertex(id);
for (NeptuneCsvUserDefinedColumn userDefinedType : header.getUserDefinedTypes()) {
String fieldValue = getValueIfExists(record, userDefinedType.getIndex());
if (fieldValue == null || fieldValue.isEmpty()) {
continue;
}
switch (userDefinedType.getCardinality()) {
case SINGLE:
vertex.add(new NeptuneCsvSingleValuedUserDefinedProperty(userDefinedType.getName(),
userDefinedType.getDataType(), fieldValue));
break;
case SET:
case DEFAULT:
if (userDefinedType.isArray()) {
vertex.add(new NeptuneCsvUserDefinedArrayProperty(userDefinedType.getName(),
userDefinedType.getDataType(), fieldValue));
} else {
vertex.add(new NeptuneCsvSetValuedUserDefinedProperty(userDefinedType.getName(),
userDefinedType.getDataType(), fieldValue));
}
break;
default:
break;
}
}
String labels = header.getLabel() == null ? null : getValueIfExists(record, header.getLabel());
if (labels == null) {
return vertex;
}
for (String labelValue : labels.split("(?<!\\\\)" + NeptuneCsvUserDefinedColumn.ARRAY_VALUE_SEPARATOR)) {
if (labelValue != null && !labelValue.isEmpty()) {
vertex.add(labelValue.replace("\\;",";"));
}
}
return vertex;
}
}
| 7,438 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune/csv2rdf/Csv2RdfException.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
/**
*
* All problems that prevent finishing the CSV to RDF conversion are thrown as
* {@link Csv2RdfException} containing a message helping the user to fix the
* problem.
*
*/
@SuppressWarnings("serial")
public class Csv2RdfException extends RuntimeException {
public Csv2RdfException(String message) {
super(message);
}
public Csv2RdfException(String message, Throwable cause) {
super(message, cause);
}
}
| 7,439 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune/csv2rdf/NeptuneCsvUserDefinedColumn.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import lombok.Getter;
import lombok.NonNull;
import lombok.Setter;
/**
*
* {@link NeptuneCsvUserDefinedColumn} describes a user-defined column of the
* property graph CSV file.
*
*/
public class NeptuneCsvUserDefinedColumn {
/**
*
* Array data types accept multiple values separated by a semicolon
*/
public static final String ARRAY_VALUE_SEPARATOR = ";";
/**
*
* Field name
*/
@Getter
private String name;
/**
*
* Field index
*/
@Getter
@Setter
private int index;
/**
*
* Enumeration of data types. Each is available by its name and an optional
* alias.
*
* @see #BYTE
* @see #BOOL
* @see #SHORT
* @see #INT
* @see #LONG
* @see #FLOAT
* @see #DOUBLE
* @see #STRING
* @see #DATETIME
*
*/
public enum DataType {
/**
* byte
*/
BYTE,
/**
* bool, boolean
*/
BOOL,
/**
* short
*/
SHORT,
/**
* int, integer
*/
INT,
/**
* long
*/
LONG,
/**
* float
*/
FLOAT,
/**
* double
*/
DOUBLE,
/**
* string
*/
STRING,
/**
* datetime, date
*/
DATETIME;
private static final Map<String, DataType> DATA_TYPES_MAP = new HashMap<>();
static {
for (DataType dataType : DataType.values()) {
DATA_TYPES_MAP.put(dataType.name().toLowerCase(), dataType);
}
DATA_TYPES_MAP.put("boolean", BOOL);
DATA_TYPES_MAP.put("integer", INT);
DATA_TYPES_MAP.put("date", DATETIME);
}
/**
*
* Return the data type for the given name, or {@code null} if none exists. Some
* data types are available under several aliases:
* <ul>
* <li>{@link DataType#BOOL}: bool, boolean</li>
* <li>{@link DataType#INT}: int, interger</li>
* <li>{@link DataType#DATETIME}: datetime, date</li>
* </ul>
*
* @param name will be lowercased
* @return data type or {@code null}
*/
public static DataType fromName(String name) {
return DATA_TYPES_MAP.get(name.toLowerCase());
}
}
/**
*
* Cardinality of a user-defined type
*
*/
public enum Cardinality {
/**
* Only one value is allowed
*/
SINGLE,
/**
* Multiple distinct values are allowed
*/
SET,
/**
* Default is {@link #SET} for vertices and {@link #SINGLE} for edges.
*/
DEFAULT;
public static Cardinality fromName(String name) {
for (Cardinality cardinality : values()) {
if (cardinality.name().equalsIgnoreCase(name)) {
return cardinality;
}
}
return null;
}
}
/**
*
* An array is declared with trailing brackets
*/
public static final String ARRAY_DECLARATION = "[]";
/**
* A column name consists of two parts: name and optional data type. Name and
* data type are separated by a colon. Name and data type consist of
* non-whitespace characters. If a colon appears within the column name,
* it must be escaped by preceding it with a backslash: {@code \:}.
*/
private static final Pattern USER_HEADER_PATTERN = Pattern.compile("^(\\S+?)((?<!\\\\):\\S+)?$");
private static final int GROUPS_IN_HEADER_PATTERN = 2;
/**
* A user type pattern consists the type name, an optional cardinality and
* optional brackets.
*/
private static final Pattern USER_TYPE_PATTERN = Pattern.compile(":([^" + Pattern.quote(ARRAY_DECLARATION)
+ "\\(\\)]+)(\\((" + Pattern.quote(Cardinality.SINGLE.name().toLowerCase()) + "|"
+ Pattern.quote(Cardinality.SET.name().toLowerCase()) + ")\\))?(" + Pattern.quote(ARRAY_DECLARATION)
+ ")?");
private static final int GROUPS_IN_TYPE_PATTERN = 4;
/**
*
* Data type of this column
*/
@Getter
private DataType dataType;
/**
*
* Cardinality of this column:
* <dl>
* <dt>{@link Cardinality#SINGLE}</dt>
* <dd>Only one value is allowed</dd>
* <dt>{@link Cardinality#SET}</dt>
* <dd>Multiple values are allowed</dd>
* </dl>
*/
@Getter
private final Cardinality cardinality;
/**
*
* Arrays allow multiple values in a field. The values will be separated.
*/
@Getter
private final boolean isArray;
/**
*
* Create a user defined non-array type. Cardinality is {@code null} and needs
* to set to {@link Cardinality#SET} for vertices and {@link Cardinality#SINGLE}
* for edges.
*
* @param name field name
* @param dataType data type
*/
public NeptuneCsvUserDefinedColumn(@NonNull String name, @NonNull DataType dataType) {
this.name = name;
this.dataType = dataType;
this.cardinality = Cardinality.DEFAULT;
this.isArray = false;
}
/**
*
* Create a user defined non-array type.
*
* @param name field name
* @param dataType data type
* @param cardinality {@link Cardinality#SET} or {@link Cardinality#SINGLE}
*/
public NeptuneCsvUserDefinedColumn(@NonNull String name, @NonNull DataType dataType,
@NonNull Cardinality cardinality) {
this.name = name;
this.dataType = dataType;
this.cardinality = cardinality;
this.isArray = false;
}
/**
*
* A user-defined column type
*
* @param name field name
* @param dataType data type
* @param isArray accepts multiple values if true
*/
public NeptuneCsvUserDefinedColumn(@NonNull String name, @NonNull DataType dataType, boolean isArray) {
this.name = name;
this.dataType = dataType;
this.isArray = isArray;
this.cardinality = this.isArray ? Cardinality.SET : Cardinality.DEFAULT;
}
/**
*
* Parse a user-defined field.
*
* @param fieldNameAndDatatype field declaration
* @return user-defined field with given data type or type string if no data
* type given
* @throws Csv2RdfException if validation of the column definition fails
*/
public static NeptuneCsvUserDefinedColumn parse(@NonNull String fieldNameAndDatatype) {
String trimmed = fieldNameAndDatatype.trim();
// split column name and type definition
Matcher matcher = USER_HEADER_PATTERN.matcher(trimmed);
if (!matcher.matches() || matcher.groupCount() < GROUPS_IN_HEADER_PATTERN) {
throw new Csv2RdfException("Invalid column encountered while parsing header: " + trimmed);
}
String columnName = matcher.group(1);
if (columnName.isEmpty()) {
throw new Csv2RdfException("Column name is not present for header field: " + trimmed);
}
columnName = columnName.replace("\\:",":");
String typeString = matcher.group(2);
if (typeString == null || typeString.isEmpty()) {
return new NeptuneCsvUserDefinedColumn(columnName, DataType.STRING);
}
// split type name, cardinality, and array declaration
Matcher typeMatcher = USER_TYPE_PATTERN.matcher(typeString.toLowerCase());
if (!typeMatcher.matches() || typeMatcher.groupCount() < GROUPS_IN_TYPE_PATTERN) {
throw new Csv2RdfException("Invalid column encountered while parsing header: " + trimmed);
}
// Parse type from first group
DataType dataType = DataType.fromName(typeMatcher.group(1));
if (dataType == null) {
throw new Csv2RdfException("Invalid data type encountered for header: " + trimmed);
}
Cardinality cardinality = Cardinality.fromName(typeMatcher.group(3));
boolean isArray = ARRAY_DECLARATION.equals(typeMatcher.group(4));
if (cardinality == null) {
return new NeptuneCsvUserDefinedColumn(columnName, dataType, isArray);
}
if (isArray && cardinality == Cardinality.SINGLE) {
throw new Csv2RdfException("Type definition cannot be single cardinality but array: " + columnName);
}
if (isArray) {
return new NeptuneCsvUserDefinedColumn(columnName, dataType, isArray);
}
return new NeptuneCsvUserDefinedColumn(columnName, dataType, cardinality);
}
}
| 7,440 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune/csv2rdf/Csv2Rdf.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import java.io.File;
import java.nio.charset.StandardCharsets;
import java.util.Scanner;
import java.util.concurrent.Callable;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import picocli.CommandLine;
import picocli.CommandLine.Command;
import picocli.CommandLine.IVersionProvider;
import picocli.CommandLine.Option;
/**
*
* Main class for running the CSV to RDF conversion. If an expected problem
* occurs during the process, a message helping the user to fix it is printed to
* the console and logged. Other exceptions are logged and a general message is
* printed to the console. <br>
* The CSV to RDF conversion is started by calling {@link Csv2Rdf#main}.
*
*/
@Slf4j
@Command(name = "java -jar amazon-neptune-csv2rdf.jar", header = { "*** Amazon Neptune CSV to RDF Converter ***",
"A tool for Amazon Neptune that converts property graphs stored as comma separated values into RDF graphs." }, footer = {
"Fork me on GitHub: https://github.com/aws/amazon-neptune-csv-to-rdf-converter",
"Licensed under Apache License, Version 2.0: https://aws.amazon.com/apache2.0",
"Copyright Amazon.com Inc. or its affiliates. All Rights Reserved." }, usageHelpAutoWidth = true, mixinStandardHelpOptions = true, versionProvider = Csv2Rdf.MavenVersionProvider.class)
public class Csv2Rdf implements Callable<Integer> {
// Name of the log file must match the configuration in log4j2.xml.
private static final String LOG_FILE = "amazon-neptune-csv2rdf.log";
private static final String VERSION_RESOURCE = "/amazon-neptune-csv2rdf-version.txt";
private static final String PARAM_LABEL_CONFIGURATION_FILE = "<configuration file>";
private static final String PARAM_LABEL_INPUT_DIRECTORY = "<input directory>";
private static final String PARAM_LABEL_OUTPUT_DIRECTORY = "<output directory>";
@Option(names = { "-c",
"--config" }, required = false, arity = "1", paramLabel = PARAM_LABEL_CONFIGURATION_FILE, description = "Property file containing the configuration.")
private File configFile;
@Option(names = { "-i",
"--input" }, required = true, arity = "1", paramLabel = PARAM_LABEL_INPUT_DIRECTORY, description = "Directory containing the CSV files (UTF-8 encoded).")
private File inputDirectory;
@Option(names = { "-o",
"--output" }, required = true, arity = "1", paramLabel = PARAM_LABEL_OUTPUT_DIRECTORY, description = "Directory for writing the RDF files (UTF-8 encoded); will be created if it does not exist.")
private File outputDirectory;
/**
*
* Exits with code 0 for normal termination. Exit codes less than zero signal
* that the conversion failed. See failure codes at {@link Csv2Rdf#call}. Exit
* codes greater than 0 mean that executing a command line argument failed.
*
* @param args command line arguments
*/
public static void main(final String[] args) {
int exitCode = new CommandLine(new Csv2Rdf()).execute(args);
System.exit(exitCode);
}
/**
*
* Load a text file resource as string.
*
* @param resource
* @return text content of the resource
*/
private static String getResourceAsString(String resource) {
try (Scanner scanner = new Scanner(Csv2Rdf.class.getResourceAsStream(resource),
StandardCharsets.UTF_8.name())) {
return scanner.useDelimiter("\\A").next();
}
}
/**
*
* Main method for running Csv2Rdf automatically by picocli.
*
* @return exit code: 0 for normal termination, -1 if an
* {@link Csv2RdfException} occurred, -2 for any other {@link Exception}
*/
@Override
public Integer call() {
try {
System.out.println(Csv2Rdf.class.getAnnotationsByType(Command.class)[0].header()[0]);
validateParameters();
echoParameters();
System.out.println("Initializing the converter...");
PropertyGraph2RdfConverter converter = new PropertyGraph2RdfConverter(configFile);
System.out.println("Running CSV to RDF conversion...");
converter.convert(inputDirectory, outputDirectory);
System.out.println("Your RDF files have been written to: " + outputDirectory.getPath());
System.out.println("All done.");
return 0;
} catch (Csv2RdfException e) {
log.error("CSV to RDF conversion failed.", e);
System.err.println("CSV to RDF conversion failed.");
System.err.println(e.getMessage());
return -1;
} catch (Exception e) {
log.error("CSV to RDF conversion failed.", e);
System.err.println("CSV to RDF conversion failed.");
System.err.println("Please see log file for details: " + LOG_FILE);
return -2;
}
}
/**
*
* Validate values of command line parameters -c, -i, and -o
*
* @throws Csv2RdfException for the first encountered invalid value
*/
private void validateParameters() {
if (configFile != null) {
validateFileParam(configFile, PARAM_LABEL_CONFIGURATION_FILE);
}
validateDirectoryParam(inputDirectory, PARAM_LABEL_INPUT_DIRECTORY, false);
validateDirectoryParam(outputDirectory, PARAM_LABEL_OUTPUT_DIRECTORY, true);
}
/**
*
* Echo the parameter values of -c, -i, and -o to stdout
*/
private void echoParameters() {
System.out.println("Parameter values:");
if (configFile != null) {
System.out.println("* " + PARAM_LABEL_CONFIGURATION_FILE + " : " + configFile.getPath());
}
System.out.println("* " + PARAM_LABEL_INPUT_DIRECTORY + " : " + inputDirectory.getPath());
System.out.println("* " + PARAM_LABEL_OUTPUT_DIRECTORY + " : " + outputDirectory.getPath());
}
/**
*
* Check if a file exists.
*
* @param file
* @param param parameter name appears in the exception message
* @throws Csv2RdfException if it is not a file or does not exist.
*/
// visible for testing
static void validateFileParam(@NonNull File file, @NonNull String param) {
if (file == null || file.isFile()) {
return;
}
if (file.exists()) {
throw new Csv2RdfException("Parameter " + param + " does not point to a file: " + file.getAbsolutePath());
}
throw new Csv2RdfException("File for parameter " + param + " does not exist: " + file.getAbsolutePath());
}
/**
*
* Check if a directory exist and optionally try to create it.
*
* @param directory
* @param param parameter name appears in the exception message
* @param create try to create the directory and parent directories if true
* @throws Csv2RdfException if it is not a directory or does not exist.
*/
// visible for testing
static void validateDirectoryParam(@NonNull File directory, @NonNull String param, boolean create) {
if (directory.isDirectory()) {
return;
}
if (directory.exists()) {
throw new Csv2RdfException(
"Parameter " + param + " does not point to a directory: " + directory.getAbsolutePath());
}
if (!create) {
throw new Csv2RdfException(
"Directory for parameter " + param + " does not exist: " + directory.getAbsolutePath());
}
if (!directory.mkdirs()) {
throw new Csv2RdfException(
"Directory for parameter " + param + " could not be created: " + directory.getAbsolutePath());
}
}
/**
*
* Version provider for picocli. Version number is read from a text file on the
* classpath.
*
*/
@Slf4j
// visible for picocli
static class MavenVersionProvider implements IVersionProvider {
@Override
public String[] getVersion() {
try {
return new String[] { getResourceAsString(VERSION_RESOURCE) };
} catch (Exception e) {
log.error("Could not read version information.", e);
throw new Csv2RdfException(
"Could not read version information. Please see log file for details: " + LOG_FILE);
}
}
}
}
| 7,441 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune/csv2rdf/UriPostTransformation.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import org.eclipse.rdf4j.model.IRI;
import org.eclipse.rdf4j.model.impl.SimpleValueFactory;
import org.eclipse.rdf4j.model.vocabulary.RDF;
import com.fasterxml.jackson.annotation.JsonCreator;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Getter;
import lombok.NonNull;
import lombok.ToString;
import lombok.extern.slf4j.Slf4j;
/**
* <h2>URI Post Transformation</h2>
*
* URI Post Transformations are used to transform RDF resource IRIs into more
* readable ones.
*
* An URI Post Transformation consists of four elements:
*
* <pre>
* {@code
* uriPostTransformation.<ID>.srcPattern=<URI regex patten>
* uriPostTransformation.<ID>.typeUri=<URI>
* uriPostTransformation.<ID>.propertyUri=<URI>
* uriPostTransformation.<ID>.dstPattern=<URI pattern>
* }
* </pre>
*
* A positive integer {@code <ID>} is required to group the elements. The
* grouping numbers of several transformation configurations do not need to be
* consecutive. The transformation rules will be executed in ascending order
* according to the grouping numbers. All four configuration items are required:
*
* <ol>
* <li>{@code srcPattern} is a URI with a single regular expression group, e.g.
* {@code <http://aws.amazon.com/neptune/csv2rdf/resource/([0-9]+)>}, defining
* the URI patterns of RDF resources to which the post transformation applies.
* <li>{@code typeUri} filters out all matched source URIs that do not belong to
* the specified RDF type.
* <li>{@code propertyUri} is the RDF predicate pointing to the replacement
* value.
* <li>{@code dstPattern} is the new URI, which must contain a
* <em>{{VALUE}}</em> substring which is then substituted with the value of
* {@code propertyUri}.
* </ol>
*
* <b>Example:</b>
*
* <pre>
* uriPostTransformation.1.srcPattern=http://example.org/resource/([0-9]+)
* uriPostTransformation.1.typeUri=http://example.org/class/Country
* uriPostTransformation.1.propertyUri=http://example.org/datatypeProperty/code
* uriPostTransformation.1.dstPattern=http://example.org/resource/{{VALUE}}
* </pre>
*
* This configuration transforms the URI {@code http://example.org/resource/123}
* into {@code http://example.org/resource/FR}, given that there are the
* statements: <br>
* {@code http://example.org/resource/123} a
* {@code http://example.org/class/Country}. <br>
* {@code http://example.org/resource/123}
* {@code http://example.org/datatypeProperty/code} "FR".
*
* <p>
* Note that we assume that the property {@code propertyUri} is unique for each
* resource, otherwise a runtime exception will be thrown. Also note that the
* post transformation is applied using a two-pass algorithm over the generated
* data, and the translation mapping is kept fully in memory. This means the
* property is suitable only in cases where the number of mappings is small or
* if the amount of main memory is large.
* </p>
*/
@Slf4j
@ToString(includeFieldNames = true)
public class UriPostTransformation {
@Getter
private final Pattern srcPattern;
@Getter
private final String typeUri;
@Getter
private final String propertyUri;
@Getter
private final String dstPattern;
@ToString.Exclude
private final Set<String> resources = new HashSet<>();
@ToString.Exclude
private final Map<String, String> resource2Value = new HashMap<>();
@ToString.Exclude
private final SimpleValueFactory vf = SimpleValueFactory.getInstance();
/**
* Create a URI post transformation rule.
*
* @param srcPattern URI with a single regular expression group, e.g.
* {@code <http://aws.amazon.com/neptune/csv2rdf/resource/([0-9]+)>},
* defining the URI patterns of RDF resources to which the
* post transformation applies.
* @param typeUri RDF type URI to filter out all matched source URIs that do
* not belong to the specified RDF type.
* @param propertyUri An RDF predicate pointing to the replacement value.
* @param dstPattern is the new URI, which must contain a <em>{{VALUE}}</em>
* substring which is then substituted with the value of
* {@code propertyUri}.
* @throws Csv2RdfException if the regex of scrPattern is invalid or dstPattern
* does not contain {{VALUE}}
*/
@JsonCreator
public UriPostTransformation(@JsonProperty(value = "srcPattern", required = true) @NonNull String srcPattern,
@JsonProperty(value = "typeUri", required = true) @NonNull String typeUri,
@JsonProperty(value = "propertyUri", required = true) @NonNull String propertyUri,
@JsonProperty(value = "dstPattern", required = true) @NonNull String dstPattern) {
try {
this.srcPattern = Pattern.compile(srcPattern);
} catch (PatternSyntaxException e) {
throw new Csv2RdfException("Regex is bad. " + e.getMessage() + ".", e);
}
this.typeUri = typeUri;
this.propertyUri = propertyUri;
if (!dstPattern.contains(PropertyGraph2RdfConverter.REPLACEMENT_VARIABLE)) {
throw new Csv2RdfException(
"The pattern <" + dstPattern + "> for the new URI must contain the replacement variable "
+ PropertyGraph2RdfConverter.REPLACEMENT_VARIABLE + ".");
}
this.dstPattern = dstPattern;
}
/**
* Phase 1: register all URIs; this is a no-op if the paramters do not match as
* described.
*
* @param subject resource to be possibly transformed, must match
* {@link UriPostTransformation#srcPattern}
* @param predicate must match {@link RDF#TYPE}
* @param object must match the {@link UriPostTransformation#typeUri}
*/
public void registerResource(String subject, String predicate, String object) {
if (predicate.equals(RDF.TYPE.toString()) && object.equals(typeUri)) {
Matcher matcher = srcPattern.matcher(subject);
if (matcher.matches()) {
resources.add(subject); // may override (which does not harm)
}
}
}
/**
* Phase 2: register the replacement values; this is a no-op if the predicate
* does not match as described.
*
* @param subject resource to be possibly transformed
* @param predicate must match {@link UriPostTransformation#propertyUri}
* @param object possible replacement value
* @throws Csv2RdfException if there was already a replacement value registered
* for the subject
*/
public void registerReplacementValue(String subject, String predicate, String object) {
if (propertyUri.equals(predicate)) {
if (resource2Value.containsKey(subject) && !resource2Value.get(subject).equals(object)) {
throw new Csv2RdfException("Found duplicate, inconsistent value for <" + subject + ">: " + object
+ " vs. " + resource2Value.get(subject));
}
resource2Value.put(subject, object);
}
}
/**
* Phase 3: apply transformation, return {@code null} if no transformation has
* been applied. <br>
* The replacement value will be URI encoded.
*
* @param uri where the part according to the matching group of
* {@link UriPostTransformation#srcPattern} is going to be replaced
* @return the new URI string or {@code null} if the resource did not match the
* type at {@link UriPostTransformation#typeUri} or no replacement value
* was found
*/
public IRI apply(String uri) {
if (!resources.contains(uri)) {
return null; // type did not match, no op
}
if (!resource2Value.containsKey(uri)) {
log.info("---> No replacement value found for <{}>. Resource was not transformed.", uri);
return null;
}
String value = resource2Value.get(uri);
String resource = dstPattern.replace(PropertyGraph2RdfConverter.REPLACEMENT_VARIABLE, encode(value));
return toValidatedIri(resource);
}
/**
*
* URI encode a value using the UTF-8 encoding scheme
*
* @param value
* @return URI encoded value
* @throws Csv2RdfException if the value could not be encoded
*/
private String encode(String value) {
try {
return URLEncoder.encode(value, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
throw new Csv2RdfException("Could not encode '" + value + "' when applying " + toString() + ".", e);
}
}
/**
*
* Convert a string into an IRI
*
* @param iri
* @return new {@link IRI}
* @throws Csv2RdfException if the IRI cannot be created
*/
private IRI toValidatedIri(String iri) {
try {
return vf.createIRI(new URI(iri).toString());
} catch (URISyntaxException | IllegalArgumentException e) {
throw new Csv2RdfException("Invalid resource URI <" + iri + "> generated when applying " + toString() + ".",
e);
}
}
@Override
public int hashCode() {
return Objects.hash(srcPattern, typeUri, propertyUri, dstPattern);
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
UriPostTransformation other = (UriPostTransformation) obj;
if (srcPattern == null) {
if (other.srcPattern != null)
return false;
} else if (other.srcPattern == null) {
return false;
} else if (!srcPattern.pattern().equals(other.srcPattern.pattern())) {
return false;
}
return Objects.equals(propertyUri, other.propertyUri) && Objects.equals(typeUri, other.typeUri)
&& Objects.equals(dstPattern, other.dstPattern);
}
}
| 7,442 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune/csv2rdf/NeptuneCsvHeader.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.csv.CSVRecord;
import lombok.Getter;
import lombok.NonNull;
import software.amazon.neptune.csv2rdf.NeptuneCsvUserDefinedColumn.Cardinality;
/**
*
* An {@link NeptuneCsvHeader} is either a {@link NeptuneCsvVertexHeader} for
* describing the the fields of property graph vertex or a
* {@link NeptuneCsvEdgeHeader} for containing the fields types of a property
* graph edge.
*
*/
public abstract class NeptuneCsvHeader {
public static final String SYSTEM_COLUMN_PREFIX = "~";
public static final String ID = SYSTEM_COLUMN_PREFIX + "id";
public static final String LABEL = SYSTEM_COLUMN_PREFIX + "label";
public static final String FROM = SYSTEM_COLUMN_PREFIX + "from";
public static final String TO = SYSTEM_COLUMN_PREFIX + "to";
private static final Set<String> SYSTEM_COLUMNS = new HashSet<>();
static {
SYSTEM_COLUMNS.add(ID);
SYSTEM_COLUMNS.add(LABEL);
SYSTEM_COLUMNS.add(FROM);
SYSTEM_COLUMNS.add(TO);
}
/**
*
* ID column (optional)
*/
@Getter
private Integer id;
/**
*
* Label field
*/
@Getter
private Integer label;
/**
*
* All user-defined fields
*/
@Getter
private List<NeptuneCsvUserDefinedColumn> userDefinedTypes = new ArrayList<>();
/**
*
* Constructor is private and can only be called from
* {@link NeptuneCsvVertexHeader} and {@link NeptuneCsvEdgeHeader}.
*
* @param id
* @param label
* @param userDefinedTypes
*/
private NeptuneCsvHeader(Integer id, Integer label, @NonNull List<NeptuneCsvUserDefinedColumn> userDefinedTypes) {
this.id = id;
this.label = label;
this.userDefinedTypes = userDefinedTypes;
}
/**
*
* Parse a vertex or edge header from a CSV record.
*
* @param record CSV record
* @return {@link NeptuneCsvEdgeHeader} when ~from and ~to are present, else
* {@link NeptuneCsvVertexHeader}
* @throws Csv2RdfException if the vertex or edge validation fails
*/
public static NeptuneCsvHeader parse(@NonNull CSVRecord record) {
Set<String> names = new HashSet<>();
Map<String, Integer> system = new HashMap<>();
List<NeptuneCsvUserDefinedColumn> user = new ArrayList<>();
for (int i = 0; i < record.size(); ++i) {
String name = record.get(i);
if (name == null) {
throw new Csv2RdfException("Empty column header encountered.");
}
String normalized = name.trim().toLowerCase();
if (SYSTEM_COLUMNS.contains(normalized)) {
system.put(normalized, i);
} else if (normalized.startsWith(SYSTEM_COLUMN_PREFIX)) {
throw new Csv2RdfException("Invalid system column encountered: " + normalized);
} else {
NeptuneCsvUserDefinedColumn column = NeptuneCsvUserDefinedColumn.parse(name);
column.setIndex(i);
user.add(column);
normalized = column.getName();
}
if (!names.add(normalized)) {
throw new Csv2RdfException("Found duplicate field: " + name);
}
}
NeptuneCsvHeader header;
if (system.get(FROM) != null || system.get(TO) != null) {
header = new NeptuneCsvEdgeHeader(system.get(ID), system.get(FROM), system.get(TO), system.get(LABEL),
user);
} else {
header = new NeptuneCsvVertexHeader(system.get(ID), system.get(LABEL), user);
}
return header;
}
/**
*
* {@link NeptuneCsvVertexHeader} provides access to types of the id field, the
* label fields, and the user-defined fields.
*
*/
public static class NeptuneCsvVertexHeader extends NeptuneCsvHeader {
/**
*
* @param id optional
* @param label optional
* @param userDefinedTypes may be empty
*/
public NeptuneCsvVertexHeader(Integer id, Integer label,
@NonNull List<NeptuneCsvUserDefinedColumn> userDefinedTypes) {
super(id, label, userDefinedTypes);
}
}
/**
*
* {@link NeptuneCsvEdgeHeader} provides access to the type of the id field, the
* from field, the to field, the label fields, and the user-defined fields.
*
*/
public static class NeptuneCsvEdgeHeader extends NeptuneCsvHeader {
@Getter
private final Integer from;
@Getter
private final Integer to;
/**
*
* @param id optional
* @param from required
* @param to required
* @param label required
* @param userDefinedTypes may be empty
* @throws Csv2RdfException if from or to is missing or there is no label or an
* user-defined type is an array type
*/
public NeptuneCsvEdgeHeader(Integer id, Integer from, Integer to, Integer label,
@NonNull List<NeptuneCsvUserDefinedColumn> userDefinedTypes) {
super(id, label, userDefinedTypes);
this.from = from;
this.to = to;
if (this.from == null) {
throw new Csv2RdfException("An edge requires a " + FROM + " field.");
}
if (this.to == null) {
throw new Csv2RdfException("An edge requires a " + TO + " field.");
}
if (this.getLabel() == null) {
throw new Csv2RdfException("An edge requires a " + LABEL + " field.");
}
for (NeptuneCsvUserDefinedColumn userDefinedType : this.getUserDefinedTypes()) {
if (userDefinedType.isArray()) {
throw new Csv2RdfException("Array types are not allowed for edges: " + userDefinedType.getName());
}
if (userDefinedType.getCardinality() == Cardinality.SET) {
throw new Csv2RdfException(
"Set-valued types are not allowed for edges: " + userDefinedType.getName());
}
}
}
}
}
| 7,443 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune/csv2rdf/PropertyGraph2RdfMapper.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.eclipse.rdf4j.model.Statement;
import org.eclipse.rdf4j.rio.RDFHandlerException;
import org.eclipse.rdf4j.rio.RDFWriter;
import org.eclipse.rdf4j.rio.Rio;
import org.eclipse.rdf4j.rio.UnsupportedRDFormatException;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility;
import lombok.Getter;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptuneCsvSingleValuedUserDefinedProperty;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptuneCsvUserDefinedProperty;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptunePropertyGraphEdge;
import software.amazon.neptune.csv2rdf.NeptunePropertyGraphElement.NeptunePropertyGraphVertex;
import software.amazon.neptune.csv2rdf.PropertyGraph2RdfMapping.PropertyGraphEdge2RdfMapping;
import software.amazon.neptune.csv2rdf.PropertyGraph2RdfMapping.PropertyGraphVertex2RdfMapping;
/**
*
* This class performs the basic mapping specified in
* {@link PropertyGraph2RdfMapping} between property graph vertices and edges
* into RDF. RDF quads are used to represent edges with properties.
*
* The mapping can be defined in the configuration file.
*
* <b>Simple Example</b><br>
*
* Simplified configuration values:
*
* <pre>
* mapper.mapping.typeNamespace=type:
* mapper.mapping.vertexNamespace=vertex:
* mapper.mapping.edgeNamespace=edge:
* mapper.mapping.edgeContextNamespace=econtext:
* mapper.mapping.vertexPropertyNamespace=vproperty:
* mapper.mapping.edgePropertyNamespace=eproperty:
* mapper.mapping.defaultNamedGraph=dng:/
* mapper.mapping.defaultType=dt:/
* mapper.mapping.defaultProperty=dp:/
* </pre>
*
* Vertices:
*
* <pre>
* ~id,~label,name,code,country
* 1,city,Seattle,S,USA
* 2,city,Vancouver,V,CA
* </pre>
*
* Edges:
*
* <pre>
* ~id,~label,~from,~to,distance,type
* a,route,1,2,166,highway
* </pre>
*
* RDF statements:
*
* <pre>
* {@code
* <vertex:1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <type:City> <dng:/> .
* <vertex:1> <vproperty:name> "Seattle" <dng:/> .
* <vertex:1> <vproperty:code> "S" <dng:/> .
* <vertex:1> <vproperty:country> "USA" <dng:/> .
* <vertex:2> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <type:City> <dng:/> .
* <vertex:2> <vproperty:name> "Vancouver" <dng:/> .
* <vertex:2> <vproperty:code> "V" <dng:/> .
* <vertex:2> <vproperty:country> "CA" <dng:/> .
*
* <vertex:1> <edge:route> <vertex:2> <econtext:a> .
* <econtext:a> <eproperty:distance> "166" <dng:/> .
* <econtext:a> <eproperty:type> "highway" <dng:/> .
* }
* </pre>
*/
@Slf4j
@JsonAutoDetect(fieldVisibility = Visibility.NONE, getterVisibility = Visibility.NONE)
public class PropertyGraph2RdfMapper {
/**
* When {@code true}, a property that is selected to create RDFS labels will
* create normal property statements, too. Otherwise only the RDFS label
* statements are created. <br>
* The properties to create RDFS labels are configured in
* {@link PropertyGraph2RdfMapping#pgVertexType2PropertyForRdfsLabel}.
*/
@Getter
@Setter
private boolean alwaysAddPropertyStatements = true;
/**
* The {@link PropertyGraph2RdfMapping} defines how property graph vertices and
* edges are mapped to RDF.
*/
@Getter
@Setter
private PropertyGraph2RdfMapping mapping = new PropertyGraph2RdfMapping();
/**
* Map a property graph file to RDF
*
* @param propertyGraphInFile a property graph file
* @param rdfOutFile RDF output file
* @throws Csv2RdfException if an error occurs during the process
*/
public void map(final File propertyGraphInFile, File rdfOutFile) {
log.info("-> Converting input file {}...", propertyGraphInFile.getName());
RDFWriter rdfWriter = null;
try (NeptuneCsvInputParser inputParser = new NeptuneCsvInputParser(propertyGraphInFile);
FileOutputStream fos = new FileOutputStream(rdfOutFile)) {
rdfWriter = Rio.createWriter(PropertyGraph2RdfConverter.RDF_FORMAT, fos);
rdfWriter.startRDF();
rdfWriter.handleNamespace("vertex", mapping.getVertexNamespace());
rdfWriter.handleNamespace("edge", mapping.getEdgeNamespace());
rdfWriter.handleNamespace("vertexprop", mapping.getVertexPropertyNamespace());
rdfWriter.handleNamespace("edgeprop", mapping.getEdgePropertyNamespace());
while (inputParser.hasNext()) {
List<Statement> statements = mapToStatements(inputParser.next());
for (Statement statement : statements) {
rdfWriter.handleStatement(statement);
}
}
rdfWriter.endRDF();
} catch (UnsupportedRDFormatException | RDFHandlerException | IOException e) {
throw new Csv2RdfException("Conversion of file " + propertyGraphInFile.getAbsolutePath() + " failed.", e);
}
}
/**
*
* Map a {@link NeptunePropertyGraphElement} to RDF statements according the
* configured {@link PropertyGraph2RdfMapper#mapping}.
*
* @param pgElement
* @return list of RDF statements
*/
private List<Statement> mapToStatements(NeptunePropertyGraphElement pgElement) {
if (pgElement instanceof NeptunePropertyGraphEdge) {
return mapToStatements((NeptunePropertyGraphEdge) pgElement);
}
if (pgElement instanceof NeptunePropertyGraphVertex) {
return mapToStatements((NeptunePropertyGraphVertex) pgElement);
}
throw new IllegalArgumentException("Property graph element type not recognized: " + pgElement.getClass());
}
/**
*
* Map a {@link NeptunePropertyGraphEdge} to RDF statements according the
* configured {@link PropertyGraph2RdfMapper#mapping}.
*
* @param edge property graph edge
* @return list of RDF statements
*/
public List<Statement> mapToStatements(NeptunePropertyGraphEdge edge) {
List<Statement> statements = new ArrayList<>();
PropertyGraphEdge2RdfMapping edgeMapper = mapping.getEdge2RdfMapping();
// the edge itself
if (edge.hasLabel()) {
// edge ID goes into graph position
statements.add(
edgeMapper.createRelationStatement(edge.getFrom(), edge.getLabel(), edge.getTo(), edge.getId()));
} else {
statements.add(edgeMapper.createRelationStatement(edge.getFrom(), edge.getTo(), edge.getId()));
}
// append edge properties
for (NeptuneCsvSingleValuedUserDefinedProperty userDefinedProperty : edge.getUserDefinedProperties()) {
statements.add(edgeMapper.createLiteralStatement(edge.getId(), userDefinedProperty.getName(),
userDefinedProperty.getValue(), userDefinedProperty.getDataType()));
}
return statements;
}
/**
*
* Map a {@link NeptunePropertyGraphVertex} to RDF statements according the
* configured {@link PropertyGraph2RdfMapper#mapping}.
*
* @param vertex property graph vertex
* @return list of RDF statements
*/
public List<Statement> mapToStatements(NeptunePropertyGraphVertex vertex) {
final List<Statement> statements = new ArrayList<>();
final PropertyGraphVertex2RdfMapping vertexMapper = mapping.getVertex2RdfMapping();
Set<String> propertiesForRdfsLabel = new HashSet<>();
// the vertex itself; for now, we always type (falling back on a default if no
// type is given)
if (vertex.getLabels().isEmpty()) {
statements.add(vertexMapper.createTypeStatement(vertex.getId()));
} else {
for (String label : vertex.getLabels()) {
statements.add(vertexMapper.createTypeStatement(vertex.getId(), label));
String propertyForRdfsLabel = vertexMapper.getPropertyForRdfsLabel(label);
if (propertyForRdfsLabel != null) {
propertiesForRdfsLabel.add(propertyForRdfsLabel);
}
}
}
for (NeptuneCsvUserDefinedProperty userDefinedProperty : vertex.getUserDefinedProperties()) {
String propertyName = userDefinedProperty.getName();
if (vertexMapper.containsRdfResourcePatternForProperty(propertyName)) {
// in this case, we do not write a literal statement but a relation
for (String value : userDefinedProperty.getValues()) {
statements.add(vertexMapper.createRelationStatement(vertex.getId(), propertyName, value));
}
} else {
boolean addRdfsLabel = propertiesForRdfsLabel.contains(propertyName);
// this property has been marked as the property used as the rdfs:label
if (addRdfsLabel) {
for (String value : userDefinedProperty.getValues()) {
statements.add(vertexMapper.createRdfsLabelStatement(vertex.getId(), value));
}
}
// if either this was not written as rdfs:label or the configuration tells us to
// write label properties
// redundantly, we also emit the datatype property statement
if (!addRdfsLabel || alwaysAddPropertyStatements) {
for (String value : userDefinedProperty.getValues()) {
statements.add(vertexMapper.createLiteralStatement(vertex.getId(), propertyName, value,
userDefinedProperty.getDataType()));
}
}
}
}
return statements;
}
}
| 7,444 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune/csv2rdf/PropertyGraph2RdfConverter.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.regex.Pattern;
import org.eclipse.rdf4j.rio.RDFFormat;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility;
import com.fasterxml.jackson.databind.JsonMappingException;
import com.fasterxml.jackson.databind.JsonMappingException.Reference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.exc.UnrecognizedPropertyException;
import com.fasterxml.jackson.dataformat.javaprop.JavaPropsFactory;
import lombok.Getter;
import lombok.NonNull;
import lombok.Setter;
/**
*
* {@link PropertyGraph2RdfConverter} converts property graph vertices and edges
* stored as comma separated values into RDF N-Quads files. The conversion uses
* two steps: <br>
* First, an {@link PropertyGraph2RdfMapper} applies the configured
* {@link PropertyGraph2RdfMapping} to the property graph data for generating
* RDF resources, predicates, literals, triples, and, in case of edge
* properties, quads. <br>
* Then, an {@link UriPostTransformer} performs the configured
* {@link UriPostTransformation}s on the RDF data. These transformations can be
* used to rewrite resource IRIs into more readable ones by replacing parts of
* them with property values.
*
*/
@JsonAutoDetect(fieldVisibility = Visibility.NONE, getterVisibility = Visibility.NONE)
public class PropertyGraph2RdfConverter {
public static final RDFFormat RDF_FORMAT = RDFFormat.NQUADS;
public static final String DEFAULT_PROPERTY_GRAPH_FILE_EXTENSION = "csv";
public static final String DEFAULT_RDF_FILE_EXTENSION = RDF_FORMAT.getDefaultFileExtension();
public static final String REPLACEMENT_VARIABLE = "{{VALUE}}";
/**
*
* Extension of the property graph input files. Only files matching the
* extension are converted.
*/
@Getter
@Setter
private String inputFileExtension = DEFAULT_PROPERTY_GRAPH_FILE_EXTENSION;
/**
*
* Output file suffix, determining the RDF format in which the result is
* written. Currently, only N-Quads is supported so this value cannot be
* changed.
*/
@Getter
private String outputFileExtension = DEFAULT_RDF_FILE_EXTENSION;
/**
*
* The {@link PropertyGraph2RdfMapper} performs the basic mapping defined in
* {@link PropertyGraph2RdfMapping} from property graph vertices and edges into
* RDF.
*/
@Getter
@Setter
private PropertyGraph2RdfMapper mapper = new PropertyGraph2RdfMapper();
/**
*
* The {@link UriPostTransformer} runs additional transformations defined in
* {@link UriPostTransformation}s on RDF resource IRIs.
*/
@Getter
@Setter
private UriPostTransformer transformer = new UriPostTransformer();
/**
*
* @param config property file, can be {@code null}
*/
public PropertyGraph2RdfConverter(File config) {
if (config != null) {
this.load(config);
}
}
/**
*
* Convert property graph files into RDF files.
*
* @param inputDirectory directory containing the property graph files, must
* exist, available files must be UTF-8 encoded
* @param outputDirectory output directory for the RDF files, must exist, output
* will be UTF-8 encoded
*/
public void convert(File inputDirectory, File outputDirectory) {
List<File> propertyGraphFiles = this.listPropertyGraphFiles(inputDirectory);
List<File> rdfFiles = new ArrayList<>();
for (File propertyGraphFile : propertyGraphFiles) {
File rdfFile = getRdfFile(outputDirectory, propertyGraphFile);
mapper.map(propertyGraphFile, rdfFile);
rdfFiles.add(rdfFile);
}
transformer.applyTo(rdfFiles, mapper.getMapping().getVertexNamespace());
}
/**
*
* List files in a directory matching
* {@link PropertyGraph2RdfConverter#inputFileExtension}.
*
* @param directory
* @return list of matching files
*/
// visible for testing
List<File> listPropertyGraphFiles(File directory) {
final File[] files = directory.listFiles((file) -> {
return file.isFile() && file.getName().endsWith("." + inputFileExtension);
});
if (files == null) {
throw new Csv2RdfException("Could not read from input directory: " + directory.getAbsolutePath());
}
if (files.length == 0) {
throw new Csv2RdfException(
"No files with extension " + inputFileExtension + " found at: " + directory.getAbsolutePath());
}
return new ArrayList<File>(Arrays.asList(files));
}
/**
*
* @param rdfDirectory the output directory
* @param propertyGraphFile
* @return a file in the output directory with the name of the property graph
* file but RDF extension of
* {@link PropertyGraph2RdfConverter#outputFileExtension}
*/
// visible for testing
File getRdfFile(File rdfDirectory, File propertyGraphFile) {
String rdfFileName = propertyGraphFile.getName().replaceAll(Pattern.quote(inputFileExtension) + "$",
outputFileExtension);
return new File(rdfDirectory, rdfFileName);
}
/**
* Load the configuration values and initialize all fields of the
* {@link PropertyGraph2RdfConverter} instance and its dependent objects.
*
* @param config property file
*/
private void load(@NonNull File config) {
try {
ObjectMapper mapper = new ObjectMapper(new JavaPropsFactory());
mapper.readerForUpdating(this).readValue(config);
} catch (UnrecognizedPropertyException e) {
throw new Csv2RdfException(
"Loading configuration failed because of unknown property: " + e.getPropertyName(), e);
} catch (JsonMappingException e) {
throw new Csv2RdfException(getErrorMessage(e), e);
} catch (IOException e) {
throw new Csv2RdfException("Configuration file not found: " + config.getAbsolutePath(), e);
}
}
/**
* Try to find the field and the specific cause where the failure occurred. As
* {@link PropertyGraph2RdfMapping#setPgProperty2RdfResourcePattern} and
* {@link UriPostTransformation#UriPostTransformation} perform consistency
* checks, {@link Csv2RdfException} can be the cause of
* {@link JsonMappingException}, too.
*
* @param e
* @return error message
*/
private String getErrorMessage(JsonMappingException e) {
List<Reference> path = e.getPath();
String message;
if (e.getCause() instanceof Csv2RdfException) {
message = e.getCause().getMessage();
} else {
message = e.getOriginalMessage();
}
for (int i = path.size() - 1; i >= 0; --i) {
String field = path.get(i).getFieldName();
if (field != null) {
return "Loading configuration failed because of invalid input at " + field + ": " + message;
}
}
return "Loading configuration failed because of invalid input: " + message;
}
}
| 7,445 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune/csv2rdf/NeptunePropertyGraphElement.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.NonNull;
import software.amazon.neptune.csv2rdf.NeptuneCsvUserDefinedColumn.DataType;
/**
*
* {@link NeptunePropertyGraphElement} is either a
* {@link NeptunePropertyGraphVertex} representing a vertex or a
* {@link NeptunePropertyGraphEdge} representing an edge.
*
*/
public abstract class NeptunePropertyGraphElement {
/**
*
* The Id of this vertex or edge.
*/
@Getter
private final String id;
/**
*
* Create a vertex or an edge
*
* @param id required
*/
private NeptunePropertyGraphElement(String id) {
if (id == null || id.isEmpty()) {
throw new Csv2RdfException("Vertex or edge ID must not be null or empty.");
} else {
this.id = id;
}
}
/**
*
* {@link NeptunePropertyGraphVertex} represents a property graph vertex. It
* provides access to the Id, the labels, and the user-defined fields of a
* vertex.
*
*/
public static class NeptunePropertyGraphVertex extends NeptunePropertyGraphElement {
/**
*
* The user-defined properties of this vertex. Their values are never
* {@code null} or empty.
*/
@Getter
private final List<NeptuneCsvUserDefinedProperty> userDefinedProperties = new ArrayList<>();
public NeptunePropertyGraphVertex(String id) {
super(id);
}
/**
*
* The labels of this vertex. The list may be empty. A label is never
* {@code null} or empty.
*/
@Getter
private final List<String> labels = new ArrayList<>();
/**
*
* Add a single or set values property or an array property
*
* @param property vertex property
*/
public void add(NeptuneCsvUserDefinedProperty property) {
this.userDefinedProperties.add(property);
}
/**
*
* Add a label
*
* @param label must not be {@code null} or empty
*/
public void add(String label) {
if (label == null || label.isEmpty()) {
throw new Csv2RdfException("Vertex labels must not be null or empty.");
}
this.labels.add(label);
}
}
/**
*
* {@link NeptunePropertyGraphEdge} represents a property graph edge. It
* provides access to the Id, the labels, the source and target, and the
* user-defined fields of an edge.
*
*/
public static class NeptunePropertyGraphEdge extends NeptunePropertyGraphElement {
/**
*
* Source vertex
*/
@Getter
private final String from;
/**
*
* Target vertex
*/
@Getter
private final String to;
/**
*
* Label of the edge (optional)
*/
@Getter
private final String label;
/**
*
* The user-defined properties of this edge. Their values are never {@code null}
* or empty.
*/
@Getter
private final List<NeptuneCsvSingleValuedUserDefinedProperty> userDefinedProperties = new ArrayList<>();
/**
*
* Add a single valued property
*
* @param property edge property
*/
public void add(NeptuneCsvSingleValuedUserDefinedProperty property) {
this.userDefinedProperties.add(property);
}
/**
*
* Check if a label exists for this edge
*
* @return {@code true} if there is a label
*/
public boolean hasLabel() {
return label != null && !label.isEmpty();
}
/**
*
* Creates an edge.
*
* @param id optional
* @param from required
* @param to required
* @param label optional
* @throws Csv2RdfException if from or to is missing
*/
public NeptunePropertyGraphEdge(String id, String from, String to, String label) {
super(id);
if (from == null || from.isEmpty()) {
throw new Csv2RdfException(
"Value for " + NeptuneCsvHeader.FROM + " is missing at edge " + this.getId() + ".");
}
if (to == null || to.isEmpty()) {
throw new Csv2RdfException(
"Value for " + NeptuneCsvHeader.TO + " is missing at edge " + this.getId() + ".");
}
this.from = from;
this.to = to;
this.label = label;
}
}
/**
*
* A property of a vertex or an edge. It can be multi or single valued.
*
*/
@Getter
@AllArgsConstructor
public static abstract class NeptuneCsvUserDefinedProperty {
/**
*
* Name of this property
*/
@NonNull
private final String name;
/**
*
* Data type of this property
*/
@NonNull
private final DataType dataType;
/**
*
* All values of this property. Single valued properties return only one value.
*
* @return an unmodifiable collection containing all values
*/
public abstract Collection<String> getValues();
}
/**
*
* {@link NeptuneCsvSetValuedUserDefinedProperty} combines a data value from a
* CSV record with the corresponding header column type
* {@link NeptuneCsvUserDefinedColumn}. It is essentially a property of a vertex
* and can have multiple values. The value must not be {@code null}. Empty or
* {@code null} values of arrays are skipped.
*
*/
public static class NeptuneCsvSetValuedUserDefinedProperty extends NeptuneCsvUserDefinedProperty {
// use linked hash set to make iteration order predictable for tests
private final Set<String> values = new LinkedHashSet<>();
public NeptuneCsvSetValuedUserDefinedProperty(@NonNull String name, @NonNull DataType dataType,
@NonNull String value) {
super(name, dataType);
this.add(value);
}
/**
*
* Add value
*
* @param value must not be {@code null}
*/
public void add(@NonNull String value) {
values.add(value);
}
@Override
public Collection<String> getValues() {
return Collections.unmodifiableCollection(values);
}
}
/**
*
* {@link NeptuneCsvUserDefinedArrayProperty} combines a data value from a CSV
* record with the corresponding header column type
* {@link NeptuneCsvUserDefinedColumn}. It is essentially a property of a vertex
* and can have multiple values. The value must not be {@code null}. Empty or
* {@code null} values of arrays are skipped.
*
*/
public static class NeptuneCsvUserDefinedArrayProperty extends NeptuneCsvSetValuedUserDefinedProperty {
public NeptuneCsvUserDefinedArrayProperty(@NonNull String name, @NonNull DataType dataType,
@NonNull String value) {
super(name, dataType, value);
}
/**
*
* Split the value at semicolons and add all except {@code null} and empty
* values
*
* @param value must not be {@code null}
*/
@Override
public void add(@NonNull String value) {
for (String v : value.split("(?<!\\\\)" + NeptuneCsvUserDefinedColumn.ARRAY_VALUE_SEPARATOR)) {
if (v == null || v.isEmpty()) {
continue;
}
super.add(v.replace("\\;",";"));
}
}
}
/**
*
* {@link NeptuneCsvSingleValuedUserDefinedProperty} combines a data value from
* a CSV record with the corresponding header column type
* {@link NeptuneCsvUserDefinedColumn}. It is essentially a property of an edge
* and has always a single value only. The value must not be {@code null}.
*
*/
public static class NeptuneCsvSingleValuedUserDefinedProperty extends NeptuneCsvUserDefinedProperty {
@Getter
private final String value;
public NeptuneCsvSingleValuedUserDefinedProperty(@NonNull String name, @NonNull DataType dataType,
@NonNull String value) {
super(name, dataType);
this.value = value;
}
@Override
public Collection<String> getValues() {
return Collections.singletonList(value);
}
}
}
| 7,446 |
0 | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune | Create_ds/amazon-neptune-csv-to-rdf-converter/src/main/java/software/amazon/neptune/csv2rdf/PropertyGraph2RdfMapping.java | /*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package software.amazon.neptune.csv2rdf;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import org.eclipse.rdf4j.model.IRI;
import org.eclipse.rdf4j.model.Literal;
import org.eclipse.rdf4j.model.Statement;
import org.eclipse.rdf4j.model.Value;
import org.eclipse.rdf4j.model.ValueFactory;
import org.eclipse.rdf4j.model.impl.SimpleValueFactory;
import org.eclipse.rdf4j.model.vocabulary.RDF;
import org.eclipse.rdf4j.model.vocabulary.RDFS;
import org.eclipse.rdf4j.model.vocabulary.XMLSchema;
import com.fasterxml.jackson.annotation.JsonAutoDetect;
import com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.NonNull;
import lombok.Setter;
import software.amazon.neptune.csv2rdf.NeptuneCsvUserDefinedColumn.DataType;
/**
*
* {@link PropertyGraph2RdfMapping} contains the configuration for mapping
* property graph vertices and edges to RDF statements. The configuration
* consists of several namespaces, a default type, a default named graph,
* {@link PropertyGraph2RdfMapping#pgVertexType2PropertyForRdfsLabel} for
* mapping certain properties to RDFS labels, and
* {@link PropertyGraph2RdfMapping#pgProperty2RdfResourcePattern} for creating
* RDF resources from property values. <br>
* It provides access to {@link PropertyGraphVertex2RdfMapping} for mapping
* vertices and to {@link PropertyGraphEdge2RdfMapping} for mapping edges to RDF
* statements.
*
*/
@JsonAutoDetect(fieldVisibility = Visibility.NONE)
public class PropertyGraph2RdfMapping {
public static final String DEFAULT_TYPE_NAMESPACE = "http://aws.amazon.com/neptune/csv2rdf/class/";
public static final String DEFAULT_VERTEX_NAMESPACE = "http://aws.amazon.com/neptune/csv2rdf/resource/";
public static final String DEFAULT_EDGE_NAMESPACE = "http://aws.amazon.com/neptune/csv2rdf/objectProperty/";
public static final String DEFAULT_VERTEX_PROPERTY_NAMESPACE = "http://aws.amazon.com/neptune/csv2rdf/datatypeProperty/";
public static final String DEFAULT_EDGE_PROPERTY_NAMESPACE = "http://aws.amazon.com/neptune/csv2rdf/datatypeProperty/";
public static final String DEFAULT_TYPE = "http://www.w3.org/2002/07/owl#Thing";
public static final String DEFAULT_PREDICATE = DEFAULT_EDGE_NAMESPACE + "edge";
public static final String DEFAULT_NAMED_GRAPH = "http://aws.amazon.com/neptune/vocab/v01/DefaultNamedGraph";
private final ValueFactory vf = SimpleValueFactory.getInstance();
/**
*
* Namespace in which types are stored
*/
@Getter(AccessLevel.PACKAGE)
@Setter
private String typeNamespace = DEFAULT_TYPE_NAMESPACE;
/**
*
* Namespace in which nodes are stored
*/
@Getter
@Setter
private String vertexNamespace = DEFAULT_VERTEX_NAMESPACE;
/**
*
* Namespace in which edges are stored
*/
@Getter
@Setter
private String edgeNamespace = DEFAULT_EDGE_NAMESPACE;
/**
*
* Namespace in which vertex properties are stored
*/
@Getter
@Setter
private String vertexPropertyNamespace = DEFAULT_VERTEX_PROPERTY_NAMESPACE;
/**
*
* Namespace in which edge contexts are stored.
*/
@Setter
private String edgeContextNamespace;
/**
*
* Namespace in which edge properties are stored
*/
@Getter
@Setter
private String edgePropertyNamespace = DEFAULT_EDGE_PROPERTY_NAMESPACE;
@Getter
private IRI defaultType = this.toValidatedIri(DEFAULT_TYPE);
@Getter
private IRI defaultPredicate = this.toValidatedIri(DEFAULT_PREDICATE);
@Getter
private IRI defaultNamedGraph = this.toValidatedIri(DEFAULT_NAMED_GRAPH);
/**
*
* <h1>Mapping from property graph vertex types to instance label
* properties</h1>
*
* Properties that need to be used for RDFS labels are represented as map from
* vertex type to property name.
*
* <br>
* A property that is selected for an RDFS label will be added as normal
* property statement or not depending on the the configuration of
* {@link PropertyGraph2RdfMapper#alwaysAddPropertyStatements}.
*
* <h4>Example:</h4>
*
* {@code pgVertexType2PropertyForRdfsLabel.country=code} <br>
* defines the property value of <em>code</em> as label for vertices of type
* <em>country</em>.
*/
@Getter
@Setter
private Map<String, String> pgVertexType2PropertyForRdfsLabel = new HashMap<>();
/**
*
* <h1>Mapping from property graph properties to RDF resources</h1>
*
* Properties that need to be mapped to resources are represented as a map from
* a property name to an IRI pattern. The pattern must contain the
* <em>{{VALUE}}</em> substring. This will be substituted with the property
* value.
*
* <h4>Example:</h4>
*
* pgProperty2RdfResourcePattern.country=http://example.org/resource/country/{{VALUE}}
* <br>
* converts the property value 'FR' of <em>country</em> into
* <em>http://example.org/resource/country/FR</em>.
*/
@Getter
private Map<String, String> pgProperty2RdfResourcePattern = new HashMap<>();
/**
*
* A {@link PropertyGraphVertex2RdfMapping} exposing methods for creating RDF
* statements for vertices according to this {@link PropertyGraph2RdfMapping}.
*/
@Getter
private PropertyGraphVertex2RdfMapping vertex2RdfMapping = new PropertyGraphVertex2RdfMapping(this);
/**
*
* A {@link PropertyGraphEdge2RdfMapping} exposing methods for creating RDF
* statements for edges according to this {@link PropertyGraph2RdfMapping}.
*
* {@link PropertyGraph2RdfMapping}.
*/
@Getter
private PropertyGraphEdge2RdfMapping edge2RdfMapping = new PropertyGraphEdge2RdfMapping(this);
/**
*
* Set the map from property graph properties to RDF resource patterns. RDF
* resource patterns must contain the string {{VALUE}}.
*
* @param pgProperty2RdfResourcePattern a map from properties to RDF resource
* patterns
* @throws Csv2RdfException if a pattern in the map does not contain {{VALUE}}
*/
public void setPgProperty2RdfResourcePattern(Map<String, String> pgProperty2RdfResourcePattern) {
for (String pattern : pgProperty2RdfResourcePattern.values()) {
if (!pattern.contains(PropertyGraph2RdfConverter.REPLACEMENT_VARIABLE)) {
throw new Csv2RdfException(
"The pattern <" + pattern + "> for the new URI must contain the replacement variable "
+ PropertyGraph2RdfConverter.REPLACEMENT_VARIABLE + ".");
}
}
this.pgProperty2RdfResourcePattern = pgProperty2RdfResourcePattern;
}
public void setDefaultNamedGraph(String defaultNamedGraph) {
this.defaultNamedGraph = toValidatedIri(defaultNamedGraph);
}
public void setDefaultType(String defaultType) {
this.defaultType = toValidatedIri(defaultType);
}
public void setDefaultPredicate(String defaultProperty) {
this.defaultPredicate = toValidatedIri(defaultProperty);
}
public String getEdgeContextNamespace() {
return (this.edgeContextNamespace == null ? getVertexNamespace() : this.edgeContextNamespace);
}
/**
*
* Create an IRI that represents a vertex type as class (aka type) in RDF
*
* @param type local name, will be URI encoded
* @return {@link PropertyGraph2RdfMapping#typeNamespace} + encoded type
* @throws Csv2RdfException if the IRI cannot be created
*/
// visible for testing
IRI typeIri(@NonNull String type) {
String labelUpperCase;
if (type.isEmpty()) {
labelUpperCase = type;
} else {
labelUpperCase = Character.toUpperCase(type.charAt(0)) + type.substring(1);
}
String iri = typeNamespace + encode(labelUpperCase);
return toValidatedIri(iri);
}
/**
*
* Create an IRI that represents a vertex in RDF
*
* @param vertex local name, will be URI encoded
* @return {@link PropertyGraph2RdfMapping#vertexNamespace} + encoded vertex
* @throws Csv2RdfException if the IRI cannot be created
*/
// visible for testing
IRI vertexIri(@NonNull String vertex) {
String iri = vertexNamespace + encode(vertex);
return toValidatedIri(iri);
}
/**
*
* Create an IRI that represents an edge in RDF
*
* @param edge local name, will be URI encoded
* @return {@link PropertyGraph2RdfMapping#edgeNamespace} + encoded edge
* @throws Csv2RdfException if the IRI cannot be created
*/
// visible for testing
IRI edgeIri(@NonNull String edge) {
String iri = edgeNamespace + encode(edge);
return toValidatedIri(iri);
}
/**
*
* Create an IRI that represents an edge context in RDF.
*
* @param context local context name, will be URI encoded.
* @return {@link PropertyGraph2RdfMapping#getEdgeContextNamespace()} + encoded edge context.
* @throws Csv2RdfException if the IRI cannot be created.
*/
IRI edgeContextIri(@NonNull String context) {
String iri = getEdgeContextNamespace() + encode(context);
return toValidatedIri(iri);
}
/**
* Create an IRI that represents a vertex property in RDF
*
* @param vertexProperty local name, will be URI encoded
* @return {@link PropertyGraph2RdfMapping#vertexPropertyNamespace} + encoded
* vertex property
* @throws Csv2RdfException if the IRI cannot be created
*/
// visible for testing
IRI vertexPropertyIri(@NonNull String vertexProperty) {
String iri = vertexPropertyNamespace + encode(vertexProperty);
return toValidatedIri(iri);
}
/**
*
* Create an IRI that represents an edge property in RDF
*
* @param edgeProperty local name, will be URI encoded
* @return {@link PropertyGraph2RdfMapping#edgePropertyNamespace} + encoded edge
* property
* @throws Csv2RdfException if the IRI cannot be created
*/
// visible for testing
IRI edgePropertyIri(@NonNull String edgeProperty) {
String iri = edgePropertyNamespace + encode(edgeProperty);
return toValidatedIri(iri);
}
/**
*
* URI encode a value using the UTF-8 encoding scheme
*
* @param value
* @return URI encoded value
* @throws Csv2RdfException if the value could not be encoded
*/
private String encode(String value) {
try {
return URLEncoder.encode(value, StandardCharsets.UTF_8.name());
} catch (UnsupportedEncodingException e) {
throw new Csv2RdfException("Could not encode '" + value + "' when mapping to RDF.", e);
}
}
/**
*
* Convert a string into an IRI
*
* @param iri
* @return new {@link IRI}
* @throws Csv2RdfException if the IRI cannot be created
*/
private IRI toValidatedIri(String iri) {
try {
return vf.createIRI(new URI(iri).toString());
} catch (URISyntaxException | IllegalArgumentException e) {
throw new Csv2RdfException("Invalid resource URI <" + iri + "> generated when mapping to RDF.", e);
}
}
/**
*
* Return a literal value including an XML schema data type for all type in
* {@link DataType} except {@link DataType#STRING}:
* <ul>
* <li>Values of {@link DataType#STRING} are not appended with an XML schema
* data type.</li>
* </ul>
*
* @param value
* @param datatype
* @return literal with XML schema data type except for strings
* @throws IllegalArgumentException if the data type is not recognized
*/
// visible for testing
Literal value(@NonNull String value, @NonNull DataType datatype) {
switch (datatype) {
case BYTE:
return vf.createLiteral(value, XMLSchema.BYTE);
case BOOL:
return vf.createLiteral(value, XMLSchema.BOOLEAN);
case SHORT:
return vf.createLiteral(value, XMLSchema.SHORT);
case INT:
return vf.createLiteral(value, XMLSchema.INTEGER);
case LONG:
return vf.createLiteral(value, XMLSchema.LONG);
case FLOAT:
return vf.createLiteral(value, XMLSchema.FLOAT);
case DOUBLE:
return vf.createLiteral(value, XMLSchema.DOUBLE);
case STRING:
return vf.createLiteral(value);
case DATETIME:
return vf.createLiteral(value, XMLSchema.DATE);
default:
throw new IllegalArgumentException("Data type not recognized: " + datatype + " for value " + value);
}
}
/**
*
* Create a RDF statement.
*
* @param subject
* @param predicate
* @param object
* @param graph
* @return a new RDF statement
*/
private Statement statement(@NonNull IRI subject, @NonNull IRI predicate, @NonNull Value object,
@NonNull IRI graph) {
return vf.createStatement(subject, predicate, object, graph);
}
/**
*
* {@link PropertyGraphVertex2RdfMapping} contains methods that are necessary to
* create RDF statements from vertices.
*
*/
public static class PropertyGraphVertex2RdfMapping {
/**
*
* This mapping is used for creating RDF statements.
*/
private final PropertyGraph2RdfMapping mapping;
private PropertyGraphVertex2RdfMapping(PropertyGraph2RdfMapping mapping) {
this.mapping = mapping;
}
/**
*
* Check if values of the given property can be mapped to an RDF resource.
*
* @param property vertex property
* @return {@code true} if there is pattern to build a resource, else
* {@code false}
*/
public boolean containsRdfResourcePatternForProperty(String property) {
return mapping.pgProperty2RdfResourcePattern.containsKey(property);
}
/**
*
* Create a resource for the value of the given property using the configured
* resource pattern for the property. The configuration needs to be done in
* {@link PropertyGraph2RdfMapping#pgProperty2RdfResourcePattern}.
*
* @param property vertex property
* @param value value of the property
* @return a resource IRI
*/
public IRI mapPropertyValue2RdfResource(String property, String value) {
String resourcePattern = mapping.pgProperty2RdfResourcePattern.get(property);
if (resourcePattern == null) {
return null;
}
String resource = resourcePattern.replace(PropertyGraph2RdfConverter.REPLACEMENT_VARIABLE,
mapping.encode(value));
return mapping.toValidatedIri(resource);
}
/**
*
* Get the property whose values should be used as RDFS labels for the given
* vertex type. The mapping from vertex type to property needs to be configured
* in {@link PropertyGraph2RdfMapping#pgVertexType2PropertyForRdfsLabel}.
*
* @param vertexType type of the vertex (property ~label)
* @return property for creating RDFS labels
*/
public String getPropertyForRdfsLabel(String vertexType) {
return mapping.pgVertexType2PropertyForRdfsLabel.get(vertexType);
}
/**
*
* @param subject local name of the subject, will be prefixed with
* {@link PropertyGraph2RdfMapping#vertexIri}
* @param type local name of the type, will be prefixed with
* {@link PropertyGraph2RdfMapping#typeIri}
* @return a type statement in
* {@link PropertyGraph2RdfMapping#defaultNamedGraph}
*/
public Statement createTypeStatement(@NonNull String subject, @NonNull String type) {
return mapping.statement(mapping.vertexIri(subject), RDF.TYPE, mapping.typeIri(type),
mapping.getDefaultNamedGraph());
}
/**
*
* @param subject local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#vertexIri}
* @return a type statement in
* {@link PropertyGraph2RdfMapping#defaultNamedGraph} using
* {@link PropertyGraph2RdfMapping#defaultType} as type
*/
public Statement createTypeStatement(@NonNull String subject) {
return mapping.statement(mapping.vertexIri(subject), RDF.TYPE, mapping.getDefaultType(),
mapping.getDefaultNamedGraph());
}
/**
*
* @param subject local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#vertexIri}
* @param label RDFS label value
* @return a statement in {@link PropertyGraph2RdfMapping#defaultNamedGraph}
*/
public Statement createRdfsLabelStatement(@NonNull String subject, @NonNull String label) {
return mapping.statement(mapping.vertexIri(subject), RDFS.LABEL, mapping.value(label, DataType.STRING),
mapping.getDefaultNamedGraph());
}
/**
*
* @param subject local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#vertexIri}
* @param predicate local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#vertexPropertyIri}
* @param literal literal value
* @param dataType data type of the value
* @return a statement in {@link PropertyGraph2RdfMapping#defaultNamedGraph}
*/
public Statement createLiteralStatement(@NonNull String subject, @NonNull String predicate,
@NonNull String literal, @NonNull DataType dataType) {
return mapping.statement(mapping.vertexIri(subject), mapping.vertexPropertyIri(predicate),
mapping.value(literal, dataType), mapping.getDefaultNamedGraph());
}
/**
*
* @param subject local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#vertexIri}
* @param predicate local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#edgeIri}
* @param value value will be mapped to an RDF resource by
* {@link #mapPropertyValue2RdfResource}
* @return a statement in {@link PropertyGraph2RdfMapping#defaultNamedGraph}
*/
public Statement createRelationStatement(@NonNull String subject, @NonNull String predicate,
@NonNull String value) {
return mapping.statement(mapping.vertexIri(subject), mapping.edgeIri(predicate),
mapPropertyValue2RdfResource(predicate, value), mapping.getDefaultNamedGraph());
}
}
/**
*
* {@link PropertyGraphEdge2RdfMapping} contains methods that are necessary to
* create RDF statements from edges.
*/
public static class PropertyGraphEdge2RdfMapping {
/**
*
* This mapping is used for creating RDF statements.
*/
private final PropertyGraph2RdfMapping mapping;
private PropertyGraphEdge2RdfMapping(PropertyGraph2RdfMapping mapping) {
this.mapping = mapping;
}
/**
*
* @param subject local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#vertexIri}
* @param predicate local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#edgeIri}
* @param object local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#vertexIri}
* @param context local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#edgeContextIri}
* @return a statement in {@link PropertyGraph2RdfMapping#edgeContextIri}(context)
*/
public Statement createRelationStatement(@NonNull String subject, @NonNull String predicate,
@NonNull String object, @NonNull String context) {
return mapping.statement(mapping.vertexIri(subject), mapping.edgeIri(predicate), mapping.vertexIri(object),
mapping.edgeContextIri(context));
}
/**
*
* @param subject local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#vertexIri}
* @param object local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#vertexIri}
* @param context local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#edgeContextIri}
* @return a statement in {@link PropertyGraph2RdfMapping#edgeContextIri}(context)
* using {@link PropertyGraph2RdfMapping#defaultPredicate} as predicate
*/
public Statement createRelationStatement(@NonNull String subject, @NonNull String object,
@NonNull String context) {
return mapping.statement(mapping.vertexIri(subject), mapping.getDefaultPredicate(),
mapping.vertexIri(object), mapping.edgeContextIri(context));
}
/**
*
* @param subject local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#edgeContextIri}
* @param predicate local name, will be prefixed with
* {@link PropertyGraph2RdfMapping#edgePropertyIri}
* @param literal literal value
* @param dataType data type of the value
* @return a statement in {@link PropertyGraph2RdfMapping#defaultNamedGraph}
*/
public Statement createLiteralStatement(@NonNull String subject, @NonNull String predicate,
@NonNull String literal, @NonNull DataType dataType) {
return mapping.statement(mapping.edgeContextIri(subject), mapping.edgePropertyIri(predicate),
mapping.value(literal, dataType), mapping.getDefaultNamedGraph());
}
}
}
| 7,447 |
0 | Create_ds/neptune-gremlin-client/neptune-endpoints-info-lambda/src/main/java/software/amazon | Create_ds/neptune-gremlin-client/neptune-endpoints-info-lambda/src/main/java/software/amazon/lambda/NeptuneEndpointsInfoLambda.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package software.amazon.lambda;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.LambdaLogger;
import com.amazonaws.services.lambda.runtime.RequestStreamHandler;
import org.apache.tinkerpop.gremlin.driver.Endpoint;
import org.apache.tinkerpop.gremlin.driver.EndpointCollection;
import software.amazon.neptune.cluster.*;
import software.amazon.utils.EnvironmentVariableUtils;
import software.amazon.utils.RegionUtils;
import java.io.*;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Scanner;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import static java.nio.charset.StandardCharsets.UTF_8;
public class NeptuneEndpointsInfoLambda implements RequestStreamHandler {
private final ClusterEndpointsRefreshAgent refreshAgent;
private final AtomicReference<NeptuneClusterMetadata> neptuneClusterMetadata = new AtomicReference<>();
private final String suspendedEndpoints;
public NeptuneEndpointsInfoLambda() {
this(
EnvironmentVariableUtils.getMandatoryEnv("clusterId"),
Integer.parseInt(EnvironmentVariableUtils.getOptionalEnv("pollingIntervalSeconds", "15")),
EnvironmentVariableUtils.getOptionalEnv("suspended", "none")
);
}
public NeptuneEndpointsInfoLambda(String clusterId, int pollingIntervalSeconds, String suspendedEndpoints) {
this.refreshAgent = ClusterEndpointsRefreshAgent.managementApi(clusterId,
RegionUtils.getCurrentRegionName(),
new DefaultAWSCredentialsProviderChain());
this.neptuneClusterMetadata.set(refreshAgent.getClusterMetadata());
this.suspendedEndpoints = suspendedEndpoints.toLowerCase();
System.out.println(String.format("clusterId: %s", clusterId));
System.out.println(String.format("pollingIntervalSeconds: %s", pollingIntervalSeconds));
System.out.println(String.format("suspendedEndpoints: %s", this.suspendedEndpoints));
refreshAgent.startPollingNeptuneAPI(
(OnNewClusterMetadata) metadata -> {
neptuneClusterMetadata.set(metadata);
System.out.println("Refreshed cluster metadata");
},
pollingIntervalSeconds,
TimeUnit.SECONDS);
}
@Override
public void handleRequest(InputStream input, OutputStream output, Context context) throws IOException {
LambdaLogger logger = context.getLogger();
EndpointsType endpointsType = null;
Scanner scanner = new Scanner(input);
if (scanner.hasNext()) {
String param = scanner.next().replace("\"", "");
if (!param.isEmpty()) {
endpointsType = EndpointsType.valueOf(param);
}
}
try {
refreshAgent.awake();
} catch (InterruptedException | ExecutionException e) {
throw new RuntimeException("Failed to awake refresh agent", e);
}
if (endpointsType != null) {
returnEndpointListForLegacyClient(endpointsType, logger, output);
} else {
returnClusterMetadata(logger, output);
}
}
private void returnClusterMetadata(LambdaLogger logger, OutputStream output) throws IOException {
logger.log("Returning cluster metadata");
NeptuneClusterMetadata clusterMetadata = addAnnotations(neptuneClusterMetadata.get());
String results = clusterMetadata.toJsonString();
logger.log("Results: " + results);
try (Writer writer = new BufferedWriter(new OutputStreamWriter(output, UTF_8))) {
writer.write(results);
writer.flush();
}
}
private void returnEndpointListForLegacyClient(EndpointsType endpointsType,
LambdaLogger logger,
OutputStream output) throws IOException {
logger.log("Returning list of endpoints for EndpointsType: " + endpointsType);
NeptuneClusterMetadata clusterMetadata = addAnnotations(neptuneClusterMetadata.get());
EndpointCollection endpoints = endpointsType.getEndpoints(clusterMetadata);
Collection<String> addresses = new ArrayList<>();
for (Endpoint endpoint : endpoints) {
addresses.add(endpoint.getAddress());
}
String results = String.join(",", addresses);
logger.log("Results: " + results);
try (Writer writer = new BufferedWriter(new OutputStreamWriter(output, UTF_8))) {
writer.write(results);
writer.flush();
}
}
private NeptuneClusterMetadata addAnnotations(NeptuneClusterMetadata clusterMetadata) {
for (NeptuneInstanceMetadata instance : clusterMetadata.getInstances()) {
if (suspendedEndpoints.equals("all")) {
setSuspended(instance);
} else if (suspendedEndpoints.equals("writer") && instance.isPrimary()) {
setSuspended(instance);
} else if (suspendedEndpoints.equals("reader") && instance.isReader()) {
setSuspended(instance);
}
}
if (suspendedEndpoints.equals("all")){
setSuspended(clusterMetadata.getClusterEndpoint());
setSuspended(clusterMetadata.getReaderEndpoint());
} else if (suspendedEndpoints.equals("writer")){
setSuspended(clusterMetadata.getClusterEndpoint());
} else if (suspendedEndpoints.equals("reader")){
setSuspended(clusterMetadata.getReaderEndpoint());
}
return clusterMetadata;
}
private static void setSuspended(Endpoint endpoint) {
endpoint.setAnnotation(SuspendedEndpoints.STATE_ANNOTATION, SuspendedEndpoints.SUSPENDED);
}
}
| 7,448 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin/driver/EndpointCollectionTest.java | package org.apache.tinkerpop.gremlin.driver;
import org.junit.Test;
import java.util.Arrays;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
public class EndpointCollectionTest {
@Test
public void shouldEnrichEndpointsUsingFilter() {
Endpoint endpoint1 = new DatabaseEndpoint().withAddress("address1");
Endpoint endpoint2 = new DatabaseEndpoint().withAddress("address2");
EndpointFilter filter = new EndpointFilter() {
public Endpoint enrichEndpoint(Endpoint endpoint) {
endpoint.setAnnotation("test", endpoint.getAddress());
return endpoint;
}
};
EndpointCollection endpoints = new EndpointCollection(Arrays.asList(endpoint1, endpoint2));
assertFalse(endpoints.get("address1").getAnnotations().containsKey("test"));
assertFalse(endpoints.get("address2").getAnnotations().containsKey("test"));
EndpointCollection enrichedEndpoints = endpoints.getEnrichedEndpoints(filter);
assertTrue(enrichedEndpoints.get("address1").getAnnotations().containsKey("test"));
assertTrue(enrichedEndpoints.get("address2").getAnnotations().containsKey("test"));
assertEquals(endpoint1.getAddress(), enrichedEndpoints.get("address1").getAnnotations().get("test"));
assertEquals(endpoint2.getAddress(), enrichedEndpoints.get("address2").getAnnotations().get("test"));
}
@Test
public void shouldFilterForAcceptedEndpoints(){
Endpoint endpoint1 = new DatabaseEndpoint().withAddress("address1");
Endpoint endpoint2 = new DatabaseEndpoint().withAddress("address2");
EndpointFilter filter = new EndpointFilter() {
public ApprovalResult approveEndpoint(Endpoint endpoint) {
return new ApprovalResult(endpoint.getAddress().equals("address1"), null);
}
};
EndpointCollection endpoints = new EndpointCollection(Arrays.asList(endpoint1, endpoint2));
EndpointCollection acceptedEndpoints = endpoints.getAcceptedEndpoints(filter);
assertEquals(1, acceptedEndpoints.size());
assertEquals(endpoint1, acceptedEndpoints.get("address1"));
}
@Test
public void shouldFilterForRejectedEndpoints(){
Endpoint endpoint1 = new DatabaseEndpoint().withAddress("address1");
Endpoint endpoint2 = new DatabaseEndpoint().withAddress("address2");
EndpointFilter filter = new EndpointFilter() {
public ApprovalResult approveEndpoint(Endpoint endpoint) {
return new ApprovalResult(endpoint.getAddress().equals("address1"), null);
}
};
EndpointCollection endpoints = new EndpointCollection(Arrays.asList(endpoint1, endpoint2));
EndpointCollection rejectedEndpoints = endpoints.getRejectedEndpoints(filter);
assertEquals(1, rejectedEndpoints.size());
assertEquals(endpoint2, rejectedEndpoints.get("address2"));
}
@Test
public void shouldIdentifyEndpointsForWhichThereIsNoCluster(){
Endpoint endpoint1 = new DatabaseEndpoint().withAddress("address1");
Endpoint endpoint2 = new DatabaseEndpoint().withAddress("address2");
Endpoint endpoint3 = new DatabaseEndpoint().withAddress("address3");
EndpointCollection endpoints = new EndpointCollection(Arrays.asList(endpoint1, endpoint2, endpoint3));
Cluster cluster = Cluster.build().create();
ClusterFactory clusterFactory = e -> cluster;
ClientClusterCollection clientClusterCollection = new ClientClusterCollection(clusterFactory, null);
clientClusterCollection.createClusterForEndpoint(endpoint1);
EndpointCollection endpointsWithNoCluster = endpoints.getEndpointsWithNoCluster(clientClusterCollection);
assertEquals(2, endpointsWithNoCluster.size());
assertEquals(endpoint2, endpointsWithNoCluster.get("address2"));
assertEquals(endpoint3, endpointsWithNoCluster.get("address3"));
cluster.close();
}
} | 7,449 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin/driver/ConnectionAttemptManagerTest.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.apache.tinkerpop.gremlin.driver;
import org.junit.Test;
import software.amazon.utils.Clock;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.*;
public class ConnectionAttemptManagerTest {
@Test
public void determinesIfMaxWaitTimeExceeded() throws Exception {
Clock clock = mock(Clock.class);
when(clock.currentTimeMillis()).thenReturn(5000L);
try (ConnectionAttemptManager connectionAttemptManager = new ConnectionAttemptManager(
null,
1000,
-1,
null,
-1,
clock)) {
assertFalse(connectionAttemptManager.maxWaitTimeExceeded(4500L));
assertFalse(connectionAttemptManager.maxWaitTimeExceeded(4000L));
assertTrue(connectionAttemptManager.maxWaitTimeExceeded(3500L));
}
}
@Test
public void determinesIfEagerRefreshWaitTimeExceeded() throws Exception {
Clock clock = mock(Clock.class);
when(clock.currentTimeMillis()).thenReturn(5000L);
try (ConnectionAttemptManager connectionAttemptManager = new ConnectionAttemptManager(
null,
-1,
1000,
null,
-1,
clock)) {
assertFalse(connectionAttemptManager.eagerRefreshWaitTimeExceeded(4500L));
assertFalse(connectionAttemptManager.eagerRefreshWaitTimeExceeded(4000L));
assertTrue(connectionAttemptManager.eagerRefreshWaitTimeExceeded(3500L));
}
;
}
@Test
public void alwaysReturnsFalseIfEagerRefreshWaitTimeNotSet() throws Exception {
Clock clock = mock(Clock.class);
when(clock.currentTimeMillis()).thenReturn(5000L);
try (ConnectionAttemptManager connectionAttemptManager = new ConnectionAttemptManager(
null,
-1,
-1,
null,
-1,
clock)) {
assertFalse(connectionAttemptManager.eagerRefreshWaitTimeExceeded(4500L));
assertFalse(connectionAttemptManager.eagerRefreshWaitTimeExceeded(4000L));
assertFalse(connectionAttemptManager.eagerRefreshWaitTimeExceeded(3500L));
}
}
@Test
public void shouldDoNothingIfOnEagerRefreshIsNull() throws Exception {
ExecutorService executorService = mock(ExecutorService.class);
try (ConnectionAttemptManager connectionAttemptManager = new ConnectionAttemptManager(
null,
-1,
-1,
null,
-1,
null,
executorService,
0,
false)) {
connectionAttemptManager.triggerEagerRefresh(new EagerRefreshContext());
verify(executorService, never()).submit(any(Callable.class));
}
}
@Test
public void shouldSubmitRefreshEventTask() throws Exception {
ExecutorService executorService = mock(ExecutorService.class);
try (ConnectionAttemptManager connectionAttemptManager = new ConnectionAttemptManager(
null,
-1,
-1,
context -> null,
-1,
null,
executorService,
0,
false)) {
connectionAttemptManager.triggerEagerRefresh(new EagerRefreshContext());
verify(executorService, times(1)).submit(any(ConnectionAttemptManager.RefreshEventTask.class));
}
}
@Test
public void shouldNotSubmitRefreshEventTaskIfWithinBackoffPeriod() throws Exception {
Clock clock = mock(Clock.class);
when(clock.currentTimeMillis()).thenReturn(5000L);
ExecutorService executorService = mock(ExecutorService.class);
try (ConnectionAttemptManager connectionAttemptManager = new ConnectionAttemptManager(
null,
-1,
-1,
context -> null,
1000,
clock,
executorService,
4500,
false)) {
connectionAttemptManager.triggerEagerRefresh(new EagerRefreshContext());
verify(executorService, never()).submit(any(Callable.class));
}
}
@Test
public void shouldSubmitRefreshEventTaskIfOutsideBackoffPeriod() throws Exception {
Clock clock = mock(Clock.class);
when(clock.currentTimeMillis()).thenReturn(5000L);
ExecutorService executorService = mock(ExecutorService.class);
try (ConnectionAttemptManager connectionAttemptManager = new ConnectionAttemptManager(
null,
-1,
-1,
context -> null,
1000,
clock,
executorService,
3000,
false)) {
connectionAttemptManager.triggerEagerRefresh(new EagerRefreshContext());
verify(executorService, times(1)).submit(any(ConnectionAttemptManager.RefreshEventTask.class));
}
}
@Test
public void shouldDoNothingIfAlreadyRefreshing() throws Exception {
ExecutorService executorService = mock(ExecutorService.class);
try (ConnectionAttemptManager connectionAttemptManager = new ConnectionAttemptManager(
null,
-1,
-1,
context -> null,
-1,
null,
executorService,
0,
true)) {
connectionAttemptManager.triggerEagerRefresh(new EagerRefreshContext());
verify(executorService, never()).submit(any(Callable.class));
}
}
} | 7,450 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin/driver/EndpointClientCollectionTest.java | package org.apache.tinkerpop.gremlin.driver;
import org.junit.Test;
import java.util.Arrays;
import java.util.List;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
public class EndpointClientCollectionTest {
@Test
public void shouldIdentifySurvivingEndpointClients(){
DatabaseEndpoint endpoint1 = new DatabaseEndpoint().withAddress("address1");
DatabaseEndpoint endpoint2 = new DatabaseEndpoint().withAddress("address2");
DatabaseEndpoint endpoint3 = new DatabaseEndpoint().withAddress("address3");
EndpointClient endpointClient1 = new EndpointClient(endpoint1, mock(Client.class));
EndpointClient endpointClient2 = new EndpointClient(endpoint2, mock(Client.class));
EndpointClient endpointClient3 = new EndpointClient(endpoint3, mock(Client.class));
EndpointClientCollection endpointClientCollection =
new EndpointClientCollection(
EndpointClientCollection.builder()
.withEndpointClients(Arrays.asList(endpointClient1, endpointClient2, endpointClient3)));
List<EndpointClient> survivingEndpointClients =
endpointClientCollection.getSurvivingEndpointClients(
new EndpointCollection(Arrays.asList(endpoint1, endpoint3)));
assertEquals(2, survivingEndpointClients.size());
assertTrue(survivingEndpointClients.contains(endpointClient1));
assertTrue(survivingEndpointClients.contains(endpointClient3));
}
} | 7,451 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin/driver/EndpointClientTest.java | package org.apache.tinkerpop.gremlin.driver;
import org.junit.Test;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.junit.Assert.*;
import static org.mockito.Mockito.mock;
public class EndpointClientTest {
@Test
public void shouldCreateListOfEndpointClientsForEndpointClusters(){
Cluster cluster = Cluster.build().create();
Client client = mock(Client.class);
Endpoint endpoint1 = new DatabaseEndpoint().withAddress("address1");
Endpoint endpoint2 = new DatabaseEndpoint().withAddress("address2");
Endpoint endpoint3 = new DatabaseEndpoint().withAddress("address3");
Map<Endpoint, Cluster> endpointClusters = new HashMap<>();
endpointClusters.put(endpoint1, cluster);
endpointClusters.put(endpoint2, cluster);
endpointClusters.put(endpoint3, cluster);
List<EndpointClient> endpointClients = EndpointClient.create(endpointClusters, c -> client);
assertEquals(3, endpointClients.size());
assertTrue(containsEndpointClientWithEndpoint(endpointClients, endpoint1));
assertTrue(containsEndpointClientWithEndpoint(endpointClients, endpoint2));
assertTrue(containsEndpointClientWithEndpoint(endpointClients, endpoint3));
cluster.close();
}
private boolean containsEndpointClientWithEndpoint(List<EndpointClient> endpointClients, Endpoint endpoint){
for (EndpointClient endpointClient : endpointClients) {
if (endpointClient.endpoint().getAddress().equals(endpoint.getAddress())){
return true;
}
}
return false;
}
} | 7,452 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin/driver/EmptyEndpointFilterTest.java | package org.apache.tinkerpop.gremlin.driver;
import org.junit.Test;
import java.util.Arrays;
import static org.junit.Assert.*;
public class EmptyEndpointFilterTest {
@Test
public void shouldEnsureThatEndpointsWithNullAddressAreNotAccepted(){
Endpoint endpoint1 = new DatabaseEndpoint().withAddress("address1");
Endpoint endpoint2 = new DatabaseEndpoint().withAddress(null);
Endpoint endpoint3 = new DatabaseEndpoint().withAddress("address3");
Endpoint endpoint4 = new DatabaseEndpoint().withAddress(null);
EndpointFilter filter = new EmptyEndpointFilter(EndpointFilter.NULL_ENDPOINT_FILTER);
EndpointCollection endpoints = new EndpointCollection(
Arrays.asList(endpoint1, endpoint2, endpoint3, endpoint4));
EndpointCollection acceptedEndpoints = endpoints.getAcceptedEndpoints(filter);
assertEquals(2, acceptedEndpoints.size());
assertEquals(endpoint1, acceptedEndpoints.get("address1"));
assertEquals(endpoint3, acceptedEndpoints.get("address3"));
EndpointCollection rejectedEndpoints = endpoints.getRejectedEndpoints(filter);
assertEquals(2, rejectedEndpoints.size());
for (Endpoint rejectedEndpoint : rejectedEndpoints) {
assertEquals("empty", rejectedEndpoint.getAnnotations().get(ApprovalResult.REJECTED_REASON_ANNOTATION));
}
}
} | 7,453 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin/driver/ClientClusterCollectionTest.java | package org.apache.tinkerpop.gremlin.driver;
import org.junit.Test;
import java.util.Arrays;
import java.util.Map;
import java.util.function.Function;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
public class ClientClusterCollectionTest {
@Test
public void shouldCreateClustersForEndpoints() {
Cluster cluster = Cluster.build().create();
ClusterFactory clusterFactory = endpoints -> cluster;
Endpoint endpoint1 = new DatabaseEndpoint().withAddress("address1");
Endpoint endpoint2 = new DatabaseEndpoint().withAddress("address2");
Endpoint endpoint3 = new DatabaseEndpoint().withAddress("address3");
EndpointCollection endpoints = new EndpointCollection(Arrays.asList(endpoint1, endpoint2, endpoint3));
ClientClusterCollection clientClusterCollection = new ClientClusterCollection(clusterFactory, null);
Map<Endpoint, Cluster> endpointClusters = clientClusterCollection.createClustersForEndpoints(endpoints);
assertEquals(3, endpointClusters.size());
assertTrue(endpointClusters.containsKey(endpoint1));
assertTrue(endpointClusters.containsKey(endpoint2));
assertTrue(endpointClusters.containsKey(endpoint3));
cluster.close();
}
@Test
public void shouldRemoveClustersWithNoMatchingEndpoint() {
Cluster cluster = Cluster.build().create();
ClusterFactory clusterFactory = endpoints -> cluster;
Function<Cluster, Void> clusterCloseMethod = mock(Function.class);
Endpoint endpoint1 = new DatabaseEndpoint().withAddress("address1");
Endpoint endpoint2 = new DatabaseEndpoint().withAddress("address2");
Endpoint endpoint3 = new DatabaseEndpoint().withAddress("address3");
Endpoint endpoint4 = new DatabaseEndpoint().withAddress("address4");
EndpointCollection endpoints = new EndpointCollection(Arrays.asList(endpoint1, endpoint2, endpoint3, endpoint4));
ClientClusterCollection clientClusterCollection = new ClientClusterCollection(clusterFactory, null);
clientClusterCollection.createClustersForEndpoints(endpoints);
EndpointCollection survivingEndpoints = new EndpointCollection(Arrays.asList(endpoint1, endpoint3));
clientClusterCollection.removeClustersWithNoMatchingEndpoint(survivingEndpoints, clusterCloseMethod);
assertTrue(clientClusterCollection.containsClusterForEndpoint(endpoint1));
assertTrue(clientClusterCollection.containsClusterForEndpoint(endpoint3));
assertFalse(clientClusterCollection.containsClusterForEndpoint(endpoint2));
assertFalse(clientClusterCollection.containsClusterForEndpoint(endpoint4));
verify(clusterCloseMethod, times(2)).apply(cluster);
cluster.close();
}
} | 7,454 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/org/apache/tinkerpop/gremlin/driver/RefreshEventTaskTest.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.apache.tinkerpop.gremlin.driver;
import org.junit.Test;
import software.amazon.utils.Clock;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
public class RefreshEventTaskTest {
@Test
public void shouldRefreshClient(){
EndpointCollection endpointCollection = new EndpointCollection();
EagerRefreshContext context = new EagerRefreshContext();
AtomicLong latestRefreshTime = new AtomicLong(0);
AtomicBoolean refreshing = new AtomicBoolean(false);
Clock clock = mock(Clock.class);
when(clock.currentTimeMillis()).thenReturn(5000L);
Refreshable client = mock(Refreshable.class);
OnEagerRefresh onEagerRefresh = mock(OnEagerRefresh.class);
when(onEagerRefresh.getEndpoints(context)).thenReturn(endpointCollection);
ConnectionAttemptManager.RefreshEventTask refreshEventTask =
new ConnectionAttemptManager.RefreshEventTask(
context,
client,
refreshing,
latestRefreshTime,
onEagerRefresh,
clock);
refreshEventTask.run();
verify(client, times(1)).refreshEndpoints(endpointCollection);
assertEquals(5000, latestRefreshTime.get());
assertFalse(refreshing.get());
}
@Test
public void shouldNotRefreshClientIfAlreadyRefreshing(){
EndpointCollection endpointCollection = new EndpointCollection();
EagerRefreshContext context = new EagerRefreshContext();
AtomicLong latestRefreshTime = new AtomicLong(0);
AtomicBoolean refreshing = new AtomicBoolean(true);
Clock clock = mock(Clock.class);
when(clock.currentTimeMillis()).thenReturn(5000L);
Refreshable client = mock(Refreshable.class);
OnEagerRefresh onEagerRefresh = mock(OnEagerRefresh.class);
when(onEagerRefresh.getEndpoints(context)).thenReturn(endpointCollection);
ConnectionAttemptManager.RefreshEventTask refreshEventTask =
new ConnectionAttemptManager.RefreshEventTask(
context,
client,
refreshing,
latestRefreshTime,
onEagerRefresh,
clock);
refreshEventTask.run();
verify(client, never()).refreshEndpoints(endpointCollection);
assertEquals(0, latestRefreshTime.get());
assertTrue(refreshing.get());
}
}
| 7,455 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/software/amazon/neptune | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/software/amazon/neptune/cluster/NeptuneClusterMetadataTest.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package software.amazon.neptune.cluster;
import org.junit.Test;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Optional;
import static org.junit.Assert.assertEquals;
public class NeptuneClusterMetadataTest {
@Test
public void serializeAndDeserializeClusterMetadata() throws IOException {
HashMap<String, String> tags = new HashMap<>();
tags.put("name", "my-writer");
tags.put("app", "analytics");
NeptuneInstanceMetadata instance1 = new NeptuneInstanceMetadata()
.withInstanceId("instance-1")
.withInstanceType("r5.large")
.withAvailabilityZone("eu-west-1b")
.withAddress("endpoint-1")
.withStatus("available")
.withRole("writer")
.withTags(tags)
.withAnnotation("annotation-1-key", "annotation-1-value");
NeptuneInstanceMetadata instance2 = new NeptuneInstanceMetadata()
.withInstanceId("instance-2")
.withInstanceType("r5.medium")
.withAvailabilityZone("eu-west-1a")
.withAddress("endpoint-2")
.withStatus("rebooting")
.withRole("reader")
.withTags(tags)
.withAnnotation("annotation-2-key", "annotation-2-value");
NeptuneClusterMetadata neptuneClusterMetadata = new NeptuneClusterMetadata()
.withClusterEndpoint("cluster-endpoint")
.withReaderEndpoint("reader-endpoint")
.withInstances(Arrays.asList(instance1, instance2));
String json1 = neptuneClusterMetadata.toJsonString();
NeptuneClusterMetadata cluster = NeptuneClusterMetadata.fromByteArray(json1.getBytes());
String json2 = cluster.toJsonString();
assertEquals(json2, json1);
}
@Test
public void shouldAcceptEndpointFieldForAddressValue() throws IOException {
String json = "{\n" +
" \"instances\": [\n" +
" {\n" +
" \"instanceId\": \"neptune-db-1-123456b0\",\n" +
" \"role\": \"writer\",\n" +
" \"endpoint\": \"neptune-db-1-123456b0.abcdefghijklm.eu-west-2.neptune.amazonaws.com\",\n" +
" \"status\": \"available\",\n" +
" \"availabilityZone\": \"eu-west-2a\",\n" +
" \"instanceType\": \"db.r5.large\",\n" +
" \"annotations\": {},\n" +
" \"tags\": {\n" +
" \"Name\": \"neptune-db-1-123456b0\",\n" +
" \"Stack\": \"eu-west-2-social-NeptuneBaseStack-ABCDEFGHIJKL\",\n" +
" \"StackId\": \"arn:aws:cloudformation:eu-west-2:123456789123:stack/social-NeptuneBaseStack-ABCDEFGHIJKL/828f4fe0-e4e4-11ed-9c0c-02f21623886a\"\n" +
" }\n" +
" },\n" +
" {\n" +
" \"instanceId\": \"neptune-db-2-123456b0\",\n" +
" \"role\": \"reader\",\n" +
" \"endpoint\": \"neptune-db-2-123456b0.abcdefghijklm.eu-west-2.neptune.amazonaws.com\",\n" +
" \"status\": \"available\",\n" +
" \"availabilityZone\": \"eu-west-2c\",\n" +
" \"instanceType\": \"db.r5.large\",\n" +
" \"annotations\": {},\n" +
" \"tags\": {\n" +
" \"Name\": \"neptune-db-2-123456b0\",\n" +
" \"Stack\": \"eu-west-2-social-NeptuneBaseStack-ABCDEFGHIJKL\",\n" +
" \"StackId\": \"arn:aws:cloudformation:eu-west-2:123456789123:stack/social-NeptuneBaseStack-ABCDEFGHIJKL/828f4fe0-e4e4-11ed-9c0c-02f21623886a\"\n" +
" }\n" +
" }\n" +
" ],\n" +
" \"clusterEndpoint\": {\n" +
" \"endpoint\": \"neptune-cluster-123456b0.cluster-abcdefghijklm.eu-west-2.neptune.amazonaws.com\",\n" +
" \"annotations\": {}\n" +
" },\n" +
" \"readerEndpoint\": {\n" +
" \"endpoint\": \"neptune-cluster-123456b0.cluster-ro-abcdefghijklm.eu-west-2.neptune.amazonaws.com\",\n" +
" \"annotations\": {}\n" +
" }\n" +
"}\n";
NeptuneClusterMetadata cluster = NeptuneClusterMetadata.fromByteArray(json.getBytes());
String address = cluster.getInstances().stream().filter(i -> i.isPrimary()).map(i -> i.getAddress()).findFirst().get();
assertEquals("neptune-db-1-123456b0.abcdefghijklm.eu-west-2.neptune.amazonaws.com", address);
}
}
| 7,456 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/software/amazon/neptune | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/software/amazon/neptune/cluster/IamAuthConfigTest.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package software.amazon.neptune.cluster;
import org.junit.Test;
import software.amazon.neptune.cluster.IamAuthConfig;
import java.util.HashSet;
import static org.junit.Assert.assertEquals;
public class IamAuthConfigTest {
@Test
public void shouldCreateJson() {
IamAuthConfig config = new IamAuthConfig.IamAuthConfigBuilder()
.addNeptuneEndpoints("endpoint1", "endpoint2")
.setNeptunePort(8182)
.connectViaLoadBalancer()
.removeHostHeaderAfterSigning()
.setIamProfile("neptune")
.setServiceRegion("us-east-1")
.build();
String json = config.asJsonString();
assertEquals("{\"endpoints\":[\"endpoint1\",\"endpoint2\"],\"port\":8182,\"enableIamAuth\":false,\"connectViaLoadBalancer\":true,\"removeHostHeaderAfterSigning\":true,\"serviceRegion\":\"us-east-1\",\"iamProfile\":\"neptune\"}", json);
}
} | 7,457 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/software/amazon/neptune | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/software/amazon/neptune/cluster/NeptuneInstanceMetadataTest.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package software.amazon.neptune.cluster;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.util.HashMap;
import static org.junit.Assert.*;
public class NeptuneInstanceMetadataTest {
@Test
public void shouldSerializeAndDeserialize() throws IOException {
HashMap<String, String> tags = new HashMap<>();
tags.put("name", "my-writer");
tags.put("app", "analytics");
NeptuneInstanceMetadata instance = new NeptuneInstanceMetadata()
.withInstanceId("instance-2")
.withInstanceType("r5.medium")
.withAvailabilityZone("eu-west-1a")
.withAddress("endpoint-2")
.withStatus("rebooting")
.withRole("reader")
.withTags(tags)
.withAnnotation("annotation-2-key", "annotation-2-value");
String json1 = instance.toJsonString();
NeptuneInstanceMetadata i = NeptuneInstanceMetadata.fromByteArray(json1.getBytes());
String json2 = i.toJsonString();
Assert.assertEquals(json2, json1);
}
@Test
public void shouldAcceptEndpointFieldForAddressValue() throws IOException {
String json = "{\"instanceId\":\"instance-2\",\"role\":\"reader\",\"endpoint\":\"endpoint-2\",\"status\":\"rebooting\",\"availabilityZone\":\"eu-west-1a\",\"instanceType\":\"r5.medium\",\"annotations\":{\"annotation-2-key\":\"annotation-2-value\"},\"tags\":{\"app\":\"analytics\",\"name\":\"my-writer\"}}\n";
NeptuneInstanceMetadata i = NeptuneInstanceMetadata.fromByteArray(json.getBytes());
assertEquals("endpoint-2", i.getAddress());
}
} | 7,458 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/software/amazon/neptune | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/software/amazon/neptune/cluster/SuspendedEndpointsTest.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package software.amazon.neptune.cluster;
import org.apache.tinkerpop.gremlin.driver.*;
import org.junit.Test;
import java.util.Arrays;
import static org.junit.Assert.*;
public class SuspendedEndpointsTest {
@Test
public void shouldEnsureThatSuspendedEndpointsAreNotAccepted(){
Endpoint endpoint1 = new DatabaseEndpoint().withAddress("address1");
Endpoint endpoint2 = new DatabaseEndpoint().withAddress(null);
Endpoint endpoint3 = new DatabaseEndpoint().withAddress("address3");
Endpoint endpoint4 = new DatabaseEndpoint().withAddress(null);
EndpointFilter filter = new SuspendedEndpoints();
endpoint2.setAnnotation(SuspendedEndpoints.STATE_ANNOTATION, SuspendedEndpoints.SUSPENDED);
endpoint4.setAnnotation(SuspendedEndpoints.STATE_ANNOTATION, SuspendedEndpoints.SUSPENDED);
EndpointCollection endpoints = new EndpointCollection(
Arrays.asList(endpoint1, endpoint2, endpoint3, endpoint4));
EndpointCollection acceptedEndpoints = endpoints.getAcceptedEndpoints(filter);
assertEquals(2, acceptedEndpoints.size());
assertEquals(endpoint1, acceptedEndpoints.get("address1"));
assertEquals(endpoint3, acceptedEndpoints.get("address3"));
EndpointCollection rejectedEndpoints = endpoints.getRejectedEndpoints(filter);
assertEquals(2, rejectedEndpoints.size());
for (Endpoint rejectedEndpoint : rejectedEndpoints) {
assertEquals(SuspendedEndpoints.SUSPENDED, rejectedEndpoint.getAnnotations().get(ApprovalResult.REJECTED_REASON_ANNOTATION));
}
}
} | 7,459 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/software/amazon | Create_ds/neptune-gremlin-client/gremlin-client/src/test/java/software/amazon/utils/CollectionUtilsTest.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package software.amazon.utils;
import org.junit.Test;
import java.util.Arrays;
import java.util.List;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
public class CollectionUtilsTest {
@Test
public void shouldJoinTwoLists() {
List<String> l1 = Arrays.asList("one", "two", "three");
List<String> l2 = Arrays.asList("a", "b", "c");
List<String> result = CollectionUtils.join(l1, l2);
assertEquals(6, result.size());
assertTrue(result.contains("one"));
assertTrue(result.contains("two"));
assertTrue(result.contains("three"));
assertTrue(result.contains("a"));
assertTrue(result.contains("b"));
assertTrue(result.contains("c"));
}
} | 7,460 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/ClusterContext.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
public class ClusterContext implements AutoCloseable {
private final GremlinCluster cluster;
private final GremlinClient client;
private final GraphTraversalSource graphTraversalSource;
public ClusterContext(GremlinCluster cluster,
GremlinClient client,
GraphTraversalSource graphTraversalSource) {
this.cluster = cluster;
this.client = client;
this.graphTraversalSource = graphTraversalSource;
}
public GraphTraversalSource graphTraversalSource() {
return graphTraversalSource;
}
public GremlinCluster cluster() {
return cluster;
}
public GremlinClient client() {
return client;
}
@Override
public void close() throws Exception {
client.close();
cluster.close();
}
}
| 7,461 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/EndpointsUnavailableException.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import java.util.Collection;
public class EndpointsUnavailableException extends RuntimeException {
public EndpointsUnavailableException(Collection<String> reasons) {
super(String.format("%s", reasons.size() == 1 ? reasons.iterator().next() : reasons));
}
}
| 7,462 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/EagerRefreshContext.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
public class EagerRefreshContext {
}
| 7,463 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/MetricsHandler.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
public interface MetricsHandler {
void onMetricsPublished(ConnectionMetrics connectionMetrics, RequestMetrics requestMetrics);
}
| 7,464 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/ClusterFactory.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
public interface ClusterFactory {
Cluster createCluster(EndpointCollection endpoints);
}
| 7,465 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/MetricsConfig.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import com.amazonaws.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.utils.EnvironmentVariableUtils;
class MetricsConfig {
private static final String PROPERTY_NAME = "org.apache.tinkerpop.gremlin.driver.MetricsConfig.enableMetrics";
private static final Logger logger = LoggerFactory.getLogger(MetricsConfig.class);
private final boolean enableMetrics;
private final MetricsHandlerCollection metricsHandlers;
MetricsConfig(boolean enableMetrics, MetricsHandlerCollection metricsHandlers) {
this.enableMetrics = calculateEnableMetricsValue(enableMetrics);
this.metricsHandlers = metricsHandlers;
}
public boolean enableMetrics() {
return enableMetrics;
}
public MetricsHandlerCollection metricsHandlers() {
return metricsHandlers;
}
private boolean calculateEnableMetricsValue(boolean enableMetricsBuilder) {
Boolean enableMetricsEnv = null;
Boolean enableMetricsSys = null;
String envVar = EnvironmentVariableUtils.getOptionalEnv(PROPERTY_NAME, null);
if (!StringUtils.isNullOrEmpty(envVar)) {
enableMetricsEnv = Boolean.parseBoolean(envVar);
}
String sysProp = System.getProperty(PROPERTY_NAME, null);
if (!StringUtils.isNullOrEmpty(sysProp)) {
enableMetricsSys = Boolean.parseBoolean(sysProp);
}
boolean result = false;
if ((enableMetricsEnv != null && !enableMetricsEnv) || (enableMetricsSys != null && !enableMetricsSys)) {
result = false;
} else if (enableMetricsBuilder || enableMetricsEnv != null || enableMetricsSys != null) {
result = true;
}
logger.debug("Enable metrics: {} [builder: {}, env: {}, sys: {}]", result, enableMetricsBuilder, enableMetricsEnv, enableMetricsSys);
return result;
}
}
| 7,466 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/ConnectionMetrics.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import java.util.Collection;
public class ConnectionMetrics {
private final long durationMillis;
private final long totalConnectionAttempts;
private final Collection<EndpointConnectionMetrics> metrics;
ConnectionMetrics(long durationMillis, long totalConnectionAttempts, Collection<EndpointConnectionMetrics> metrics) {
this.durationMillis = durationMillis;
this.totalConnectionAttempts = totalConnectionAttempts;
this.metrics = metrics;
}
public long getDurationMillis() {
return durationMillis;
}
public long getTotalConnectionAttempts() {
return totalConnectionAttempts;
}
public Collection<EndpointConnectionMetrics> getMetrics() {
return metrics;
}
}
| 7,467 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/GremlinCluster.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.utils.GitProperties;
import software.amazon.utils.SoftwareVersion;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
public class GremlinCluster implements AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(GremlinCluster.class);
private final Collection<Endpoint> defaultEndpoints;
private final ClusterFactory clusterFactory;
private final Collection<ClientClusterCollection> clientClusterCollections = new CopyOnWriteArrayList<>();
private final AtomicReference<CompletableFuture<Void>> closing = new AtomicReference<>(null);
private final EndpointStrategies endpointStrategies;
private final AcquireConnectionConfig acquireConnectionConfig;
private final MetricsConfig metricsConfig;
public GremlinCluster(Collection<Endpoint> defaultEndpoints,
ClusterFactory clusterFactory,
EndpointStrategies endpointStrategies,
AcquireConnectionConfig acquireConnectionConfig,
MetricsConfig metricsConfig) {
logger.info("Version: {} {}", SoftwareVersion.FromResource, GitProperties.FromResource);
logger.info("Created GremlinCluster [defaultEndpoints: {}, enableMetrics: {}]",
defaultEndpoints.stream()
.map(Endpoint::getAddress)
.collect(Collectors.toList()),
metricsConfig.enableMetrics());
this.defaultEndpoints = defaultEndpoints;
this.clusterFactory = clusterFactory;
this.endpointStrategies = endpointStrategies;
this.acquireConnectionConfig = acquireConnectionConfig;
this.metricsConfig = metricsConfig;
}
public GremlinClient connect(List<String> addresses, Client.Settings settings) {
return connectToEndpoints(
addresses.stream()
.map(a -> new DatabaseEndpoint().withAddress(a))
.collect(Collectors.toList()),
settings);
}
public GremlinClient connectToEndpoints(Collection<Endpoint> endpoints, Client.Settings settings) {
logger.info("Connecting with: {}", endpoints.stream()
.map(Endpoint::getAddress)
.collect(Collectors.toList()));
if (endpoints.isEmpty()) {
throw new IllegalStateException("You must supply at least one endpoint");
}
Cluster parentCluster = clusterFactory.createCluster(null);
ClientClusterCollection clientClusterCollection = new ClientClusterCollection(clusterFactory, parentCluster);
Map<Endpoint, Cluster> clustersForEndpoints = clientClusterCollection.createClustersForEndpoints(new EndpointCollection(endpoints));
List<EndpointClient> newEndpointClients = EndpointClient.create(clustersForEndpoints);
EndpointClientCollection endpointClientCollection = new EndpointClientCollection(
EndpointClientCollection.builder()
.withEndpointClients(newEndpointClients)
.setCollectMetrics(metricsConfig.enableMetrics()));
clientClusterCollections.add(clientClusterCollection);
return new GremlinClient(
clientClusterCollection.getParentCluster(),
settings,
endpointClientCollection,
clientClusterCollection,
endpointStrategies,
acquireConnectionConfig,
metricsConfig
);
}
public GremlinClient connect(List<String> addresses) {
return connect(addresses, Client.Settings.build().create());
}
public GremlinClient connectToEndpoints(List<Endpoint> endpoints) {
return connectToEndpoints(endpoints, Client.Settings.build().create());
}
public GremlinClient connect() {
return connectToEndpoints(defaultEndpoints, Client.Settings.build().create());
}
public GremlinClient connect(Client.Settings settings) {
return connectToEndpoints(defaultEndpoints, settings);
}
public CompletableFuture<Void> closeAsync() {
if (closing.get() != null)
return closing.get();
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (ClientClusterCollection clientClusterCollection : clientClusterCollections) {
futures.add(clientClusterCollection.closeAsync());
}
closing.set(CompletableFuture.allOf(futures.toArray(new CompletableFuture[]{})));
return closing.get();
}
@Override
public void close() throws Exception {
closeAsync().join();
}
}
| 7,468 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/ConnectionAttemptManager.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.utils.Clock;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
class ConnectionAttemptManager implements AutoCloseable {
private final Refreshable client;
private final AtomicBoolean refreshing;
private final AtomicLong latestRefreshTime;
private final int maxWaitForConnection;
private final int eagerRefreshWaitTimeMillis;
private final OnEagerRefresh onEagerRefresh;
private final ExecutorService executorService;
private final int eagerRefreshBackoffMillis;
private final Clock clock;
private static final Logger logger = LoggerFactory.getLogger(ConnectionAttemptManager.class);
ConnectionAttemptManager(Refreshable client,
int maxWaitForConnection,
int eagerRefreshWaitTimeMillis,
OnEagerRefresh onEagerRefresh,
int eagerRefreshBackoffMillis,
Clock clock) {
this(client,
maxWaitForConnection,
eagerRefreshWaitTimeMillis,
onEagerRefresh,
eagerRefreshBackoffMillis,
clock,
Executors.newSingleThreadExecutor(),
0,
false
);
}
ConnectionAttemptManager(Refreshable client,
int maxWaitForConnection,
int eagerRefreshWaitTimeMillis,
OnEagerRefresh onEagerRefresh,
int eagerRefreshBackoffMillis,
Clock clock,
ExecutorService executorService,
long latestRefreshTime,
boolean isRefreshing) {
this.client = client;
this.maxWaitForConnection = maxWaitForConnection;
this.eagerRefreshWaitTimeMillis = eagerRefreshWaitTimeMillis;
this.onEagerRefresh = onEagerRefresh;
this.eagerRefreshBackoffMillis = eagerRefreshBackoffMillis;
this.clock = clock;
this.executorService = executorService;
this.latestRefreshTime = new AtomicLong(latestRefreshTime);
this.refreshing = new AtomicBoolean(isRefreshing);
logger.info("maxWaitForConnection: {}, eagerRefreshWaitTimeMillis: {}, eagerRefreshBackoffMillis: {}",
this.maxWaitForConnection,
this.eagerRefreshWaitTimeMillis,
this.eagerRefreshBackoffMillis);
}
public boolean maxWaitTimeExceeded(long start) {
return waitTime(start) > maxWaitForConnection;
}
public boolean eagerRefreshWaitTimeExceeded(long start) {
return eagerRefreshWaitTimeMillis > 0 && waitTime(start) > eagerRefreshWaitTimeMillis;
}
public void triggerEagerRefresh(EagerRefreshContext context) {
String message = String.format("Wait time to get connection has exceeded threshold [%s millis]", eagerRefreshWaitTimeMillis);
if (onEagerRefresh == null) {
return;
}
long lastRefreshTime = latestRefreshTime.get();
if (lastRefreshTime > 0 && waitTime(lastRefreshTime) < eagerRefreshBackoffMillis) {
logger.warn("{} but last refresh occurred within backoff interval, so not getting new endpoints", message);
return;
}
boolean isRefreshing = refreshing.get();
if (!isRefreshing) {
logger.warn("{} so getting new endpoints", message);
executorService.submit(
new RefreshEventTask(context, client, refreshing, latestRefreshTime, onEagerRefresh, clock));
} else {
logger.warn("{} but already refreshing, so not getting new endpoints", message);
}
}
private long waitTime(long start) {
return clock.currentTimeMillis() - start;
}
public void shutdownNow() {
executorService.shutdownNow();
}
@Override
public void close() throws Exception {
shutdownNow();
}
static class RefreshEventTask implements Runnable {
private final EagerRefreshContext context;
private final Refreshable client;
private final AtomicBoolean refreshing;
private final AtomicLong latestRefreshTime;
private final OnEagerRefresh onEagerRefresh;
private final Clock clock;
RefreshEventTask(EagerRefreshContext context,
Refreshable client,
AtomicBoolean refreshing,
AtomicLong latestRefreshTime,
OnEagerRefresh onEagerRefresh,
Clock clock) {
this.context = context;
this.client = client;
this.refreshing = refreshing;
this.latestRefreshTime = latestRefreshTime;
this.onEagerRefresh = onEagerRefresh;
this.clock = clock;
}
@Override
public void run() {
boolean allowRefresh = refreshing.compareAndSet(false, true);
if (allowRefresh) {
client.refreshEndpoints(onEagerRefresh.getEndpoints(context));
refreshing.set(false);
latestRefreshTime.getAndUpdate(currentValue -> Math.max(clock.currentTimeMillis(), currentValue));
} else {
logger.warn("Already refreshing, so taking no action");
}
}
}
}
| 7,469 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/EndpointConnectionMetrics.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
public class EndpointConnectionMetrics {
private final String address;
private long succeeded = 0;
private long unavailable = 0;
private long closing = 0;
private long dead = 0;
private long npe = 0;
private long nha = 0;
private long minMillis = 0;
private long maxMillis = 0;
private long totalMillis;
EndpointConnectionMetrics(String address) {
this.address = address;
}
void succeeded(long startMillis){
succeeded++;
updateTimings(startMillis);
}
void unavailable(long startMillis){
unavailable++;
updateTimings(startMillis);
}
void closing(long startMillis){
closing++;
updateTimings(startMillis);
}
void dead(long startMillis){
dead++;
updateTimings(startMillis);
}
void npe(long startMillis){
npe++;
updateTimings(startMillis);
}
void nha(long startMillis){
nha++;
updateTimings(startMillis);
}
public String getAddress() {
return address;
}
public long getSucceededCount() {
return succeeded;
}
public long getUnavailableCount() {
return unavailable;
}
public long getClosingCount() {
return closing;
}
public long getDeadCount() {
return dead;
}
public long getNullPointerExceptionCount() {
return npe;
}
public long getNoHostsAvailableCount() {
return nha;
}
public long getMinTimeToAcquireMillis() {
return minMillis;
}
public long getMaxTimeToAcquireMillis() {
return maxMillis;
}
public long getTotalAttempts(){
return succeeded + unavailable + closing + dead + npe + nha;
}
public double getAverageTimeToAcquireMillis(){
return (double)totalMillis/(double) getTotalAttempts();
}
private void updateTimings(long startMillis){
long endMillis = System.currentTimeMillis();
long duration = endMillis - startMillis;
totalMillis += duration;
if (duration > maxMillis){
maxMillis = duration;
}
if (duration < minMillis){
minMillis = duration;
}
}
@Override
public String toString() {
return String.format("%s [total: %s, succeeded: %s, unavailable: %s, closing: %s, dead: %s, npe: %s, nha: %s, minMillis: %s, maxMillis: %s, avgMillis: %.2f]",
getAddress(),
getTotalAttempts(),
getSucceededCount(),
getUnavailableCount(),
getClosingCount(),
getDeadCount(),
getNullPointerExceptionCount(),
getNoHostsAvailableCount(),
getMinTimeToAcquireMillis(),
getMaxTimeToAcquireMillis(),
getAverageTimeToAcquireMillis());
}
}
| 7,470 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/GremlinClusterBuilder.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import io.netty.handler.ssl.SslContext;
import org.apache.tinkerpop.gremlin.driver.ser.Serializers;
import javax.net.ssl.TrustManager;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.function.Supplier;
import java.util.stream.Collectors;
public class GremlinClusterBuilder {
public static GremlinClusterBuilder build() {
return new GremlinClusterBuilder();
}
private List<Endpoint> endpoints = new ArrayList<>();
private int port = 8182;
private String path = "/gremlin";
private MessageSerializer serializer = Serializers.GRAPHBINARY_V1D0.simpleInstance();
private int nioPoolSize = Runtime.getRuntime().availableProcessors();
private int workerPoolSize = Runtime.getRuntime().availableProcessors() * 2;
private int minConnectionPoolSize = ConnectionPool.MIN_POOL_SIZE;
private int maxConnectionPoolSize = ConnectionPool.MAX_POOL_SIZE;
private int minSimultaneousUsagePerConnection = ConnectionPool.MIN_SIMULTANEOUS_USAGE_PER_CONNECTION;
private int maxSimultaneousUsagePerConnection = ConnectionPool.MAX_SIMULTANEOUS_USAGE_PER_CONNECTION;
private int maxInProcessPerConnection = Connection.MAX_IN_PROCESS;
private int minInProcessPerConnection = Connection.MIN_IN_PROCESS;
private int maxWaitForConnection = Connection.MAX_WAIT_FOR_CONNECTION;
private int maxWaitForClose = Connection.MAX_WAIT_FOR_CLOSE;
private int maxContentLength = Connection.MAX_CONTENT_LENGTH;
private int reconnectInterval = Connection.RECONNECT_INTERVAL;
private int resultIterationBatchSize = Connection.RESULT_ITERATION_BATCH_SIZE;
private long keepAliveInterval = Connection.KEEP_ALIVE_INTERVAL;
private String channelizer = Channelizer.WebSocketChannelizer.class.getName();
private boolean enableSsl = false;
private String trustCertChainFile = null;
private String keyCertChainFile = null;
private String keyFile = null;
private String keyPassword = null;
private String keyStore = null;
private String keyStorePassword = null;
private String trustStore = null;
private String trustStorePassword = null;
private String keyStoreType = null;
private String validationRequest = "''";
private List<String> sslEnabledProtocols = new ArrayList<>();
private List<String> sslCipherSuites = new ArrayList<>();
private boolean sslSkipCertValidation = false;
private SslContext sslContext = null;
private Supplier<LoadBalancingStrategy> loadBalancingStrategy = LoadBalancingStrategy.RoundRobin::new;
private AuthProperties authProps = new AuthProperties();
private int eagerRefreshWaitTimeMillis = -1;
private int eagerRefreshBackoffMillis = 5000;
private int acquireConnectionBackoffMillis = 5;
private OnEagerRefresh onEagerRefresh = null;
private EndpointFilter endpointFilter;
private HandshakeInterceptor interceptor = HandshakeInterceptor.NO_OP;
private TopologyAwareBuilderConfigurator configurator = new TopologyAwareBuilderConfigurator() {
@Override
public void apply(Cluster.Builder builder, EndpointCollection endpoints) {
builder.handshakeInterceptor(interceptor);
if (endpoints != null && !endpoints.isEmpty()) {
for (Endpoint endpoint : endpoints) {
builder.addContactPoint(endpoint.getAddress());
}
}
}
};
private final MetricsHandlerCollection metricsHandlers = new MetricsHandlerCollection();
private boolean enableMetrics = false;
private GremlinClusterBuilder() {
}
public GremlinClusterBuilder topologyAwareBuilderConfigurator(TopologyAwareBuilderConfigurator configurator){
this.configurator = configurator;
return this;
}
public GremlinClusterBuilder addMetricsHandler(MetricsHandler handler){
this.metricsHandlers.addHandler(handler);
return this;
}
public GremlinClusterBuilder enableMetrics(boolean enableMetrics){
this.enableMetrics = enableMetrics;
return this;
}
/**
* Number of millis to wait between each attempt to acquire a connection.
*/
public GremlinClusterBuilder acquireConnectionBackoffMillis(final int acquireConnectionBackoffMillis) {
this.acquireConnectionBackoffMillis = acquireConnectionBackoffMillis;
return this;
}
/**
* Minimum number of millis to wait between invoking handler supplied in
* {@link #onEagerRefresh}.
*/
public GremlinClusterBuilder eagerRefreshBackoffMillis(final int eagerRefreshBackoffMillis) {
this.eagerRefreshBackoffMillis = eagerRefreshBackoffMillis;
return this;
}
/**
* Number of millis to wait while trying to acquire connection before invoking handler supplied in
* {@link #onEagerRefresh}.
*/
public GremlinClusterBuilder eagerRefreshWaitTimeMillis(final int eagerRefreshWaitTimeMillis) {
this.eagerRefreshWaitTimeMillis = eagerRefreshWaitTimeMillis;
return this;
}
/**
* Handler to be invoked after {@link #eagerRefreshWaitTimeMillis}.
* The handler should return a {@link Supplier< EndpointCollection >}.
*/
public GremlinClusterBuilder onEagerRefresh(final OnEagerRefresh eventHandler) {
this.onEagerRefresh = eventHandler;
return this;
}
/**
* Strategy for filtering and enriching available endpoints before creating clients.
*/
public GremlinClusterBuilder endpointFilter(EndpointFilter endpointFilter) {
this.endpointFilter = endpointFilter;
return this;
}
/**
* Size of the pool for handling request/response operations. Defaults to the number of available processors.
*/
public GremlinClusterBuilder nioPoolSize(final int nioPoolSize) {
this.nioPoolSize = nioPoolSize;
return this;
}
/**
* Size of the pool for handling background work. Defaults to the number of available processors multiplied
* by 2
*/
public GremlinClusterBuilder workerPoolSize(final int workerPoolSize) {
this.workerPoolSize = workerPoolSize;
return this;
}
/**
* The path to the Gremlin service on the host which is "/gremlin" by default.
*/
public GremlinClusterBuilder path(final String path) {
this.path = path;
return this;
}
/**
* Set the {@link MessageSerializer} to use given the exact name of a {@link Serializers} enum. Note that
* setting this value this way will not allow specific configuration of the serializer itself. If specific
* configuration is required * please use {@link #serializer(MessageSerializer)}.
*/
public GremlinClusterBuilder serializer(final String mimeType) {
serializer = Serializers.valueOf(mimeType).simpleInstance();
return this;
}
/**
* Set the {@link MessageSerializer} to use via the {@link Serializers} enum. If specific configuration is
* required please use {@link #serializer(MessageSerializer)}.
*/
public GremlinClusterBuilder serializer(final Serializers mimeType) {
serializer = mimeType.simpleInstance();
return this;
}
/**
* Sets the {@link MessageSerializer} to use.
*/
public GremlinClusterBuilder serializer(final MessageSerializer serializer) {
this.serializer = serializer;
return this;
}
/**
* Enables connectivity over SSL - note that the server should be configured with SSL turned on for this
* setting to work properly.
*/
public GremlinClusterBuilder enableSsl(final boolean enable) {
this.enableSsl = enable;
return this;
}
/**
* Explicitly set the {@code SslContext} for when more flexibility is required in the configuration than is
* allowed by the {@link GremlinClusterBuilder}. If this value is set to something other than {@code null} then all other
* related SSL settings are ignored. The {@link #enableSsl} setting should still be set to {@code true} for
* this setting to take effect.
*/
public GremlinClusterBuilder sslContext(final SslContext sslContext) {
this.sslContext = sslContext;
return this;
}
/**
* File location for a SSL Certificate Chain to use when SSL is enabled. If this value is not provided and
* SSL is enabled, the default {@link TrustManager} will be used.
*
* @deprecated As of release 3.2.10, replaced by {@link #trustStore}
*/
@Deprecated
public GremlinClusterBuilder trustCertificateChainFile(final String certificateChainFile) {
this.trustCertChainFile = certificateChainFile;
return this;
}
/**
* Length of time in milliseconds to wait on an idle connection before sending a keep-alive request. This
* setting is only relevant to {@link Channelizer} implementations that return {@code true} for
* {@link Channelizer#supportsKeepAlive()}. Set to zero to disable this feature.
*/
public GremlinClusterBuilder keepAliveInterval(final long keepAliveInterval) {
this.keepAliveInterval = keepAliveInterval;
return this;
}
/**
* The X.509 certificate chain file in PEM format.
*
* @deprecated As of release 3.2.10, replaced by {@link #keyStore}
*/
@Deprecated
public GremlinClusterBuilder keyCertChainFile(final String keyCertChainFile) {
this.keyCertChainFile = keyCertChainFile;
return this;
}
/**
* The PKCS#8 private key file in PEM format.
*
* @deprecated As of release 3.2.10, replaced by {@link #keyStore}
*/
@Deprecated
public GremlinClusterBuilder keyFile(final String keyFile) {
this.keyFile = keyFile;
return this;
}
/**
* The password of the {@link #keyFile}, or {@code null} if it's not password-protected.
*
* @deprecated As of release 3.2.10, replaced by {@link #keyStorePassword}
*/
@Deprecated
public GremlinClusterBuilder keyPassword(final String keyPassword) {
this.keyPassword = keyPassword;
return this;
}
/**
* The file location of the private key in JKS or PKCS#12 format.
*/
public GremlinClusterBuilder keyStore(final String keyStore) {
this.keyStore = keyStore;
return this;
}
/**
* The password of the {@link #keyStore}, or {@code null} if it's not password-protected.
*/
public GremlinClusterBuilder keyStorePassword(final String keyStorePassword) {
this.keyStorePassword = keyStorePassword;
return this;
}
/**
* The file location for a SSL Certificate Chain to use when SSL is enabled. If
* this value is not provided and SSL is enabled, the default {@link TrustManager} will be used.
*/
public GremlinClusterBuilder trustStore(final String trustStore) {
this.trustStore = trustStore;
return this;
}
/**
* The password of the {@link #trustStore}, or {@code null} if it's not password-protected.
*/
public GremlinClusterBuilder trustStorePassword(final String trustStorePassword) {
this.trustStorePassword = trustStorePassword;
return this;
}
/**
* The format of the {@link #keyStore}, either {@code JKS} or {@code PKCS12}
*/
public GremlinClusterBuilder keyStoreType(final String keyStoreType) {
this.keyStoreType = keyStoreType;
return this;
}
/**
* A list of SSL protocols to enable. @see <a href=
* "https://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html#SunJSSE_Protocols">JSSE
* Protocols</a>
*/
public GremlinClusterBuilder sslEnabledProtocols(final List<String> sslEnabledProtocols) {
this.sslEnabledProtocols = sslEnabledProtocols;
return this;
}
/**
* A list of cipher suites to enable. @see <a href=
* "https://docs.oracle.com/javase/8/docs/technotes/guides/security/SunProviders.html#SupportedCipherSuites">Cipher
* Suites</a>
*/
public GremlinClusterBuilder sslCipherSuites(final List<String> sslCipherSuites) {
this.sslCipherSuites = sslCipherSuites;
return this;
}
/**
* If true, trust all certificates and do not perform any validation.
*/
public GremlinClusterBuilder sslSkipCertValidation(final boolean sslSkipCertValidation) {
this.sslSkipCertValidation = sslSkipCertValidation;
return this;
}
/**
* The minimum number of in-flight requests that can occur on a {@link Connection} before it is considered
* for closing on return to the {@link ConnectionPool}.
*/
public GremlinClusterBuilder minInProcessPerConnection(final int minInProcessPerConnection) {
this.minInProcessPerConnection = minInProcessPerConnection;
return this;
}
/**
* The maximum number of in-flight requests that can occur on a {@link Connection}. This represents an
* indication of how busy a {@link Connection} is allowed to be. This number is linked to the
* {@link #maxSimultaneousUsagePerConnection} setting, but is slightly different in that it refers to
* the total number of requests on a {@link Connection}. In other words, a {@link Connection} might
* be borrowed once to have multiple requests executed against it. This number controls the maximum
* number of requests whereas {@link #maxInProcessPerConnection} controls the times borrowed.
*/
public GremlinClusterBuilder maxInProcessPerConnection(final int maxInProcessPerConnection) {
this.maxInProcessPerConnection = maxInProcessPerConnection;
return this;
}
/**
* The maximum number of times that a {@link Connection} can be borrowed from the pool simultaneously.
* This represents an indication of how busy a {@link Connection} is allowed to be. Set too large and the
* {@link Connection} may queue requests too quickly, rather than wait for an available {@link Connection}
* or create a fresh one. If set too small, the {@link Connection} will show as busy very quickly thus
* forcing waits for available {@link Connection} instances in the pool when there is more capacity available.
*/
public GremlinClusterBuilder maxSimultaneousUsagePerConnection(final int maxSimultaneousUsagePerConnection) {
this.maxSimultaneousUsagePerConnection = maxSimultaneousUsagePerConnection;
return this;
}
/**
* The minimum number of times that a {@link Connection} should be borrowed from the pool before it falls
* under consideration for closing. If a {@link Connection} is not busy and the
* {@link #minConnectionPoolSize} is exceeded, then there is no reason to keep that connection open. Set
* too large and {@link Connection} that isn't busy will continue to consume resources when it is not being
* used. Set too small and {@link Connection} instances will be destroyed when the driver might still be
* busy.
*/
public GremlinClusterBuilder minSimultaneousUsagePerConnection(final int minSimultaneousUsagePerConnection) {
this.minSimultaneousUsagePerConnection = minSimultaneousUsagePerConnection;
return this;
}
/**
* The maximum size that the {@link ConnectionPool} can grow.
*/
public GremlinClusterBuilder maxConnectionPoolSize(final int maxSize) {
this.maxConnectionPoolSize = maxSize;
return this;
}
/**
* The minimum size of the {@link ConnectionPool}. When the {@link Client} is started, {@link Connection}
* objects will be initially constructed to this size.
*/
public GremlinClusterBuilder minConnectionPoolSize(final int minSize) {
this.minConnectionPoolSize = minSize;
return this;
}
/**
* Override the server setting that determines how many results are returned per batch.
*/
public GremlinClusterBuilder resultIterationBatchSize(final int size) {
this.resultIterationBatchSize = size;
return this;
}
/**
* The maximum amount of time to wait for a connection to be borrowed from the connection pool.
*/
public GremlinClusterBuilder maxWaitForConnection(final int maxWait) {
this.maxWaitForConnection = maxWait;
return this;
}
/**
* If the connection is using a "session" this setting represents the amount of time in milliseconds to wait
* for that session to close before timing out where the default value is 3000. Note that the server will
* eventually clean up dead sessions itself on expiration of the session or during shutdown.
*/
public GremlinClusterBuilder maxWaitForClose(final int maxWait) {
this.maxWaitForClose = maxWait;
return this;
}
/**
* The maximum size in bytes of any request sent to the server. This number should not exceed the same
* setting defined on the server.
*/
public GremlinClusterBuilder maxContentLength(final int maxContentLength) {
this.maxContentLength = maxContentLength;
return this;
}
/**
* Specify the {@link Channelizer} implementation to use on the client when creating a {@link Connection}.
*/
public GremlinClusterBuilder channelizer(final String channelizerClass) {
this.channelizer = channelizerClass;
return this;
}
/**
* Specify the {@link Channelizer} implementation to use on the client when creating a {@link Connection}.
*/
public GremlinClusterBuilder channelizer(final Class channelizerClass) {
return channelizer(channelizerClass.getCanonicalName());
}
/**
* Specify a valid Gremlin script that can be used to test remote operations. This script should be designed
* to return quickly with the least amount of overhead possible. By default, the script sends an empty string.
* If the graph does not support that sort of script because it requires all scripts to include a reference
* to a graph then a good option might be {@code g.inject()}.
*/
public GremlinClusterBuilder validationRequest(final String script) {
validationRequest = script;
return this;
}
/**
* Time in milliseconds to wait between retries when attempting to reconnect to a dead host.
*/
public GremlinClusterBuilder reconnectInterval(final int interval) {
this.reconnectInterval = interval;
return this;
}
/**
* Specifies the load balancing strategy to use on the client side.
*/
public GremlinClusterBuilder loadBalancingStrategy(final Supplier<LoadBalancingStrategy> loadBalancingStrategy) {
this.loadBalancingStrategy = loadBalancingStrategy;
return this;
}
/**
* Specifies parameters for authentication to Gremlin Server.
*/
public GremlinClusterBuilder authProperties(final AuthProperties authProps) {
this.authProps = authProps;
return this;
}
/**
* Sets the {@link AuthProperties.Property#USERNAME} and {@link AuthProperties.Property#PASSWORD} properties
* for authentication to Gremlin Server.
*/
public GremlinClusterBuilder credentials(final String username, final String password) {
authProps = authProps.with(AuthProperties.Property.USERNAME, username).with(AuthProperties.Property.PASSWORD, password);
return this;
}
/**
* Sets the {@link AuthProperties.Property#PROTOCOL} properties for authentication to Gremlin Server.
*/
public GremlinClusterBuilder protocol(final String protocol) {
this.authProps = authProps.with(AuthProperties.Property.PROTOCOL, protocol);
return this;
}
/**
* Sets the {@link AuthProperties.Property#JAAS_ENTRY} properties for authentication to Gremlin Server.
*/
public GremlinClusterBuilder jaasEntry(final String jaasEntry) {
this.authProps = authProps.with(AuthProperties.Property.JAAS_ENTRY, jaasEntry);
return this;
}
/**
* Adds the address of a Gremlin Server to the list of servers a {@link Client} will try to contact to send
* requests to. The address should be parseable by {@link InetAddress#getByName(String)}. That's the only
* validation performed at this point. No connection to the host is attempted.
*/
public GremlinClusterBuilder addContactPoint(final String address) {
try {
InetAddress.getByName(address);
this.endpoints.add(new DatabaseEndpoint().withAddress(address));
return this;
} catch (UnknownHostException e) {
throw new IllegalArgumentException(e.getMessage());
}
}
/**
* Add one or more the addresses of a Gremlin Servers to the list of servers a {@link Client} will try to
* contact to send requests to. The address should be parseable by {@link InetAddress#getByName(String)}.
* That's the only validation performed at this point. No connection to the host is attempted.
*/
public GremlinClusterBuilder addContactPoints(final String... addresses) {
for (String address : addresses)
addContactPoint(address);
return this;
}
public GremlinClusterBuilder addContactPoints(final Collection<String> addresses) {
for (String address : addresses)
addContactPoint(address);
return this;
}
public GremlinClusterBuilder addContactPoint(final Endpoint endpoint) {
try {
InetAddress.getByName(endpoint.getAddress());
this.endpoints.add(endpoint);
return this;
} catch (UnknownHostException e) {
throw new IllegalArgumentException(e.getMessage());
}
}
public GremlinClusterBuilder addContactPoints(final EndpointCollection endpointCollection) {
for (Endpoint endpoint : endpointCollection)
addContactPoint(endpoint);
return this;
}
public GremlinClusterBuilder addContactPointsMetadata(final Endpoint... endpointCollection) {
for (Endpoint endpoint : endpointCollection)
addContactPoint(endpoint);
return this;
}
public GremlinClusterBuilder addContactPointsMetadata(final Collection<Endpoint> endpointCollection) {
for (Endpoint endpoint : endpointCollection)
addContactPoint(endpoint);
return this;
}
/**
* Sets the port that the Gremlin Servers will be listening on.
*/
public GremlinClusterBuilder port(final int port) {
this.port = port;
return this;
}
/**
* Specifies an {@link HandshakeInterceptor} that will allow manipulation of the
* {@code FullHttpRequest} prior to its being sent over the websocket.
*/
public GremlinClusterBuilder handshakeInterceptor(final HandshakeInterceptor interceptor) {
this.interceptor = interceptor;
return this;
}
List<InetSocketAddress> getContactPoints() {
return endpoints.stream().map(e -> new InetSocketAddress(e.getAddress(), port)).collect(Collectors.toList());
}
public GremlinCluster create() {
Collection<Endpoint> filteredEndpoints = new ArrayList<>();
EndpointFilter endpointFilter = this.endpointFilter != null ?
this.endpointFilter :
EndpointFilter.NULL_ENDPOINT_FILTER;
for (Endpoint endpoint : endpoints) {
if (endpointFilter.approveEndpoint(endpoint).isApproved()) {
filteredEndpoints.add(endpoint);
}
}
EndpointStrategies endpointStrategies = new EndpointStrategies(
endpointFilter
);
AcquireConnectionConfig acquireConnectionConfig = new AcquireConnectionConfig(
maxWaitForConnection,
eagerRefreshWaitTimeMillis,
onEagerRefresh,
eagerRefreshBackoffMillis,
acquireConnectionBackoffMillis);
MetricsConfig metricsConfig = new MetricsConfig(enableMetrics, metricsHandlers);
return new GremlinCluster(filteredEndpoints, endpoints -> {
Cluster.Builder builder = Cluster.build()
.reconnectInterval(reconnectInterval)
.maxWaitForConnection(maxWaitForConnection)
.enableSsl(enableSsl)
.maxInProcessPerConnection(maxInProcessPerConnection)
.minSimultaneousUsagePerConnection(minSimultaneousUsagePerConnection)
.port(port)
.authProperties(authProps)
.loadBalancingStrategy(loadBalancingStrategy.get())
.validationRequest(validationRequest)
.channelizer(channelizer)
.maxContentLength(maxContentLength)
.maxWaitForClose(maxWaitForClose)
.resultIterationBatchSize(resultIterationBatchSize)
.minConnectionPoolSize(minConnectionPoolSize)
.maxConnectionPoolSize(maxConnectionPoolSize)
.maxSimultaneousUsagePerConnection(maxSimultaneousUsagePerConnection)
.minInProcessPerConnection(minInProcessPerConnection)
.sslSkipCertValidation(sslSkipCertValidation)
.sslCipherSuites(sslCipherSuites)
.sslEnabledProtocols(sslEnabledProtocols)
.keyStoreType(keyStoreType)
.trustStorePassword(trustStorePassword)
.trustStore(trustStore)
.keyStorePassword(keyStorePassword)
.keyStore(keyStore)
.keepAliveInterval(keepAliveInterval)
.sslContext(sslContext)
.serializer(serializer)
.path(path)
.workerPoolSize(workerPoolSize)
.nioPoolSize(nioPoolSize);
configurator.apply(builder, endpoints);
return builder.create();
}, endpointStrategies, acquireConnectionConfig, metricsConfig);
}
}
| 7,471 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/EmptyEndpointFilter.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
public class EmptyEndpointFilter implements EndpointFilter {
private final EndpointFilter innerFilter;
public EmptyEndpointFilter(EndpointFilter innerFilter) {
this.innerFilter = innerFilter != null ? innerFilter : EndpointFilter.NULL_ENDPOINT_FILTER;
}
@Override
public ApprovalResult approveEndpoint(Endpoint endpoint) {
if (endpoint.getAddress() == null) {
return new ApprovalResult(false, "empty");
} else {
return innerFilter.approveEndpoint(endpoint);
}
}
@Override
public Endpoint enrichEndpoint(Endpoint endpoint) {
if (endpoint.getAddress() == null) {
return endpoint;
} else {
return innerFilter.enrichEndpoint(endpoint);
}
}
}
| 7,472 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/DatabaseEndpoint.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import com.fasterxml.jackson.annotation.JsonIgnore;
import java.util.HashMap;
import java.util.Map;
public class DatabaseEndpoint implements Endpoint {
private String address;
private final Map<String, String> annotations = new HashMap<>();
public void setAddress(String address) {
this.address = address;
}
@Deprecated
public void setEndpoint(String endpoint) {
this.address = endpoint;
}
public void setAnnotations(Map<String, String> annotations) {
this.annotations.clear();
this.annotations.putAll(annotations);
}
public DatabaseEndpoint withAddress(String endpoint) {
setAddress(endpoint);
return this;
}
public DatabaseEndpoint withAnnotations(Map<String, String> annotations) {
setAnnotations(annotations);
return this;
}
@Override
public String getAddress() {
return address;
}
@Override
@JsonIgnore
public boolean isAvailable() {
return true;
}
@Override
public Map<String, String> getAnnotations() {
return annotations;
}
@Override
public void setAnnotation(String key, String value) {
annotations.put(key, value);
}
@Override
public String toString() {
return "DatabaseEndpoint{" +
"address='" + address + '\'' +
", annotations=" + annotations +
'}';
}
}
| 7,473 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/MetricsHandlerCollection.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
class MetricsHandlerCollection implements MetricsHandler{
private static final Logger logger = LoggerFactory.getLogger(MetricsLogger.class);
private final Collection<MetricsHandler> handlers = new ArrayList<>();
MetricsHandlerCollection(){
addHandler(new MetricsLogger());
}
void addHandler(MetricsHandler handler){
handlers.add(handler);
}
@Override
public void onMetricsPublished(ConnectionMetrics connectionMetrics, RequestMetrics requestMetrics) {
for (MetricsHandler handler : handlers) {
try{
handler.onMetricsPublished(connectionMetrics, requestMetrics);
} catch (Exception e){
logger.error("Error while handling metrics", e);
}
}
}
}
| 7,474 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/EndpointStrategies.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
class EndpointStrategies {
private final EndpointFilter endpointFilter;
EndpointStrategies(EndpointFilter endpointFilter) {
this.endpointFilter = endpointFilter;
}
public EndpointFilter endpointFilter() {
return endpointFilter;
}
}
| 7,475 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/EndpointFilter.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
public interface EndpointFilter {
EndpointFilter NULL_ENDPOINT_FILTER = new EndpointFilter() {
};
default ApprovalResult approveEndpoint(Endpoint endpoint) {
return new ApprovalResult(true, null);
}
default Endpoint enrichEndpoint(Endpoint endpoint) {
return endpoint;
}
}
| 7,476 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/ClientClusterCollection.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import java.util.stream.Collectors;
class ClientClusterCollection {
private final ClusterFactory clusterFactory;
private final Cluster parentCluster;
private final Map<String, Cluster> clusters = new ConcurrentHashMap<>();
private final AtomicReference<CompletableFuture<Void>> closing = new AtomicReference<>(null);
private static final Logger logger = LoggerFactory.getLogger(ClientClusterCollection.class);
ClientClusterCollection(ClusterFactory clusterFactory, Cluster parentCluster) {
this.clusterFactory = clusterFactory;
this.parentCluster = parentCluster;
}
public Cluster createClusterForEndpoint(Endpoint endpoint) {
Cluster cluster = clusterFactory.createCluster(new EndpointCollection(Collections.singletonList(endpoint)));
clusters.put(endpoint.getAddress(), cluster);
return cluster;
}
public Map<Endpoint, Cluster> createClustersForEndpoints(EndpointCollection endpoints) {
Map<Endpoint, Cluster> results = new HashMap<>();
for (Endpoint endpoint : endpoints) {
results.put(endpoint, createClusterForEndpoint(endpoint));
}
return results;
}
public boolean containsClusterForEndpoint(Endpoint endpoint) {
return clusters.containsKey(endpoint.getAddress());
}
public void removeClustersWithNoMatchingEndpoint(EndpointCollection endpoints) {
removeClustersWithNoMatchingEndpoint(endpoints, cluster -> {
if (cluster != null) {
cluster.close();
}
return null;
});
}
void removeClustersWithNoMatchingEndpoint(EndpointCollection endpoints, Function<Cluster, Void> clusterCloseMethod) {
List<String> removalList = new ArrayList<>();
for (String address : clusters.keySet()) {
if (!endpoints.containsEndpoint(new DatabaseEndpoint().withAddress(address))) {
removalList.add(address);
}
}
for (String address : removalList) {
logger.info("Removing client for {}", address);
Cluster cluster = clusters.remove(address);
clusterCloseMethod.apply(cluster);
}
}
public Cluster getParentCluster() {
return parentCluster;
}
public CompletableFuture<Void> closeAsync() {
if (closing.get() != null)
return closing.get();
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (Cluster cluster : clusters.values()) {
futures.add(cluster.closeAsync());
}
futures.add(parentCluster.closeAsync());
closing.set(CompletableFuture.allOf(futures.toArray(new CompletableFuture[]{})));
return closing.get();
}
public Cluster getFirstOrNull() {
Optional<Map.Entry<String, Cluster>> first = clusters.entrySet().stream().findFirst();
return first.map(Map.Entry::getValue).orElse(null);
}
@Override
public String toString() {
return clusters.entrySet().stream()
.map(e -> String.format(" {%s, %s, isClosed: %s}",
e.getKey(),
e.getValue().allHosts().stream().map(h -> h.getHostUri().toString()).collect(Collectors.joining(",")),
e.getValue().isClosed()))
.collect(Collectors.joining(System.lineSeparator()));
}
}
| 7,477 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/RequestMetricsCollector.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.UUID;
class RequestMetricsCollector {
private static final int MAX_NUMBER_TRACE_IDS = 9000;
public final Map<String, EndpointRequestMetrics> metrics;
public final Map<UUID, String> traceIds = new LinkedHashMap<>(16, 0.75f, true);
private int dropped = 0;
private int skipped = 0;
public RequestMetricsCollector(Map<String, EndpointRequestMetrics> metrics) {
this.metrics = metrics;
}
public void registerAddressForTraceId(UUID traceId, String address) {
if (traceIds.size() > MAX_NUMBER_TRACE_IDS){
UUID toRemove = traceIds.keySet().iterator().next();
traceIds.remove(toRemove);
dropped++;
}
traceIds.put(traceId, address);
}
public void registerDurationForTraceId(UUID traceId, long durationMillis, Throwable e) {
String address = traceIds.remove(traceId);
if (address != null) {
if (metrics.containsKey(address)) {
metrics.get(address).update(durationMillis, e);
} else {
skipped++;
}
} else {
skipped++;
}
}
public int droppedRequests(){
return dropped;
}
public int skippedResponses(){
return skipped;
}
public long totalRequests(){
long totalRequests = 0;
for (EndpointRequestMetrics rm : metrics.values()) {
totalRequests += (rm.getSuccessCount() + rm.getErrorCount());
}
return totalRequests;
}
public long failedRequests(){
long failedRequests = 0;
for (EndpointRequestMetrics rm : metrics.values()) {
failedRequests += rm.getErrorCount();
}
return failedRequests;
}
public Collection<EndpointRequestMetrics> metrics(){
return metrics.values();
}
}
| 7,478 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/AcquireConnectionConfig.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
class AcquireConnectionConfig {
private final int maxWaitForConnection;
private final int eagerRefreshWaitTimeMillis;
private final OnEagerRefresh onEagerRefresh;
private final int eagerRefreshBackoffMillis;
private final int acquireConnectionBackoffMillis;
AcquireConnectionConfig(int maxWaitForConnection,
int eagerRefreshWaitTimeMillis,
OnEagerRefresh onEagerRefresh,
int eagerRefreshBackoffMillis,
int acquireConnectionBackoffMillis) {
this.maxWaitForConnection = maxWaitForConnection;
this.eagerRefreshWaitTimeMillis = eagerRefreshWaitTimeMillis;
this.onEagerRefresh = onEagerRefresh;
this.eagerRefreshBackoffMillis = eagerRefreshBackoffMillis;
this.acquireConnectionBackoffMillis = acquireConnectionBackoffMillis;
}
public ConnectionAttemptManager createConnectionAttemptManager(GremlinClient gremlinClient) {
return new ConnectionAttemptManager(
gremlinClient,
maxWaitForConnection,
eagerRefreshWaitTimeMillis,
onEagerRefresh,
eagerRefreshBackoffMillis,
() -> System.currentTimeMillis());
}
public int acquireConnectionBackoffMillis() {
return acquireConnectionBackoffMillis;
}
}
| 7,479 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/ChooseEndpointStrategy.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
public interface ChooseEndpointStrategy {
EndpointClient choose(EndpointClientCollection clientHolders);
}
| 7,480 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/RefreshTask.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import software.amazon.neptune.cluster.EndpointsSelector;
public class RefreshTask {
public static <T extends EndpointsSelector> RefreshTask refresh(GremlinClient client, T selector){
return new RefreshTask(client, selector);
}
private final GremlinClient client;
private final EndpointsSelector selector;
public <T extends EndpointsSelector> RefreshTask(GremlinClient client, T selector) {
this.client = client;
this.selector = selector;
}
public GremlinClient client() {
return client;
}
public EndpointsSelector selector() {
return selector;
}
}
| 7,481 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/EndpointClient.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.function.Function;
class EndpointClient {
public static List<EndpointClient> create(Map<Endpoint, Cluster> endpointClusters) {
return create(endpointClusters, cluster -> cluster.connect().init());
}
static List<EndpointClient> create(Map<Endpoint, Cluster> endpointClusters, Function<Cluster, Client> clientFactory) {
List<EndpointClient> results = new ArrayList<>();
for (Map.Entry<Endpoint, Cluster> entry : endpointClusters.entrySet()) {
Cluster cluster = entry.getValue();
Endpoint endpoint = entry.getKey();
Client client = clientFactory.apply(cluster);
results.add(new EndpointClient(endpoint, client));
}
return results;
}
private final Endpoint endpoint;
private final Client client;
EndpointClient(Endpoint endpoint, Client client) {
this.endpoint = endpoint;
this.client = client;
}
public boolean isAvailable() {
return !client.getCluster().availableHosts().isEmpty();
}
public Endpoint endpoint() {
return endpoint;
}
public Client client() {
return client;
}
public void initClient() {
client.init();
}
public CompletableFuture<Void> closeClientAsync() {
return client.closeAsync();
}
}
| 7,482 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/EndpointRequestMetrics.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import java.util.Collection;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.BiFunction;
import java.util.stream.Collectors;
public class EndpointRequestMetrics {
private final long start = System.currentTimeMillis();
private final String address;
private long totalDurationMillis;
private long minMillis = 0L;
private long maxMillis = 0L;
private long successCount;
private long errorCount;
private final ConcurrentHashMap<Class<? extends Throwable>, ErrorMetric> errors = new ConcurrentHashMap<>();
public EndpointRequestMetrics(String address) {
this.address = address;
}
public void update(long duration, Throwable e) {
totalDurationMillis += duration;
if (duration > maxMillis) {
maxMillis = duration;
}
if (duration < minMillis) {
minMillis = duration;
}
if (e == null) {
successCount++;
} else {
errors.compute(e.getClass(), new BiFunction<Class<? extends Throwable>, ErrorMetric, ErrorMetric>() {
@Override
public ErrorMetric apply(Class<? extends Throwable> aClass, ErrorMetric errorMetric) {
if (errorMetric == null){
return new ErrorMetric(aClass).increment();
} else {
return errorMetric.increment();
}
}
});
errorCount++;
}
}
public String getAddress() {
return address;
}
public long getSuccessCount() {
return successCount;
}
public long getErrorCount() {
return errorCount;
}
public double getRatePerSecond() {
long duration = System.currentTimeMillis() - start;
return (double) successCount / ((double) (duration) / 1000.00);
}
public double getAverageLatencyMillis() {
return (double) totalDurationMillis / (double) successCount;
}
public long getMinLatencyMillis() {
return minMillis;
}
public long getMaxLatencyMillis() {
return maxMillis;
}
public Collection<ErrorMetric> getErrors(){
return errors.values();
}
@Override
public String toString() {
String errorString = getErrors().isEmpty() ?
"" :
String.format(", errors: [%s]", getErrors().stream().map(ErrorMetric::toString).collect(Collectors.joining(", ")));
return String.format("%s [succeeded: %s, failed: %s, ratePerSec: %.3f, minMillis: %s, maxMillis: %s, avgMillis: %.2f%s]",
getAddress(),
getSuccessCount(),
getErrorCount(),
getRatePerSecond(),
getMinLatencyMillis(),
getMaxLatencyMillis(),
getAverageLatencyMillis(),
errorString
);
}
}
| 7,483 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/OnEagerRefresh.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
public interface OnEagerRefresh {
EndpointCollection getEndpoints(EagerRefreshContext context);
}
| 7,484 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/ConnectionMetricsCollector.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import java.util.Collection;
import java.util.Map;
public class ConnectionMetricsCollector {
private final Map<String, EndpointConnectionMetrics> connectionMetrics;
public ConnectionMetricsCollector(Map<String, EndpointConnectionMetrics> connectionMetrics) {
this.connectionMetrics = connectionMetrics;
}
public Collection<EndpointConnectionMetrics> metrics(){
return connectionMetrics.values();
}
public long totalConnectionAttempts(){
long totalConnectionAttempts = 0;
for (EndpointConnectionMetrics cm : connectionMetrics.values()) {
totalConnectionAttempts += cm.getTotalAttempts();
}
return totalConnectionAttempts;
}
void succeeded(String address, long startMillis){
if (connectionMetrics.containsKey(address)){
connectionMetrics.get(address).succeeded(startMillis);
}
}
void unavailable(String address, long startMillis){
if (connectionMetrics.containsKey(address)) {
connectionMetrics.get(address).unavailable(startMillis);
}
}
void closing(String address, long startMillis){
if (connectionMetrics.containsKey(address)) {
connectionMetrics.get(address).closing(startMillis);
}
}
void dead(String address, long startMillis){
if (connectionMetrics.containsKey(address)) {
connectionMetrics.get(address).dead(startMillis);
}
}
void npe(String address, long startMillis){
if (connectionMetrics.containsKey(address)) {
connectionMetrics.get(address).npe(startMillis);
}
}
void nha(String address, long startMillis){
if (connectionMetrics.containsKey(address)) {
connectionMetrics.get(address).nha(startMillis);
}
}
}
| 7,485 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/ApprovalResult.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
public class ApprovalResult {
public static ApprovalResult APPROVED = new ApprovalResult(true, "");
public static String REJECTED_REASON_ANNOTATION = "AWS:rejected_reason";
private final boolean isApproved;
private final String reason;
public ApprovalResult(boolean isApproved, String reason) {
this.isApproved = isApproved;
this.reason = reason;
}
public boolean isApproved() {
return isApproved;
}
public String reason() {
return reason;
}
public Endpoint enrich(Endpoint endpoint){
if (!isApproved){
endpoint.setAnnotation(REJECTED_REASON_ANNOTATION, reason);
}
return endpoint;
}
}
| 7,486 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/Endpoint.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import java.util.Map;
public interface Endpoint {
String getAddress();
boolean isAvailable();
Map<String, String> getAnnotations();
void setAnnotation(String key, String value);
}
| 7,487 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/ErrorMetric.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
public class ErrorMetric {
private final Class<? extends Throwable> errorClass;
private long count;
public ErrorMetric(Class<? extends Throwable> errorClass) {
this.errorClass = errorClass;
}
public Class<? extends Throwable> getErrorClass(){
return errorClass;
}
public long getCount() {
return count;
}
public ErrorMetric increment(){
count++;
return this;
}
@Override
public String toString() {
return String.format("%s: %s", errorClass.getSimpleName(), count);
}
}
| 7,488 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/MetricsLogger.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.stream.Collectors;
class MetricsLogger implements MetricsHandler {
private static final Logger logger = LoggerFactory.getLogger(MetricsLogger.class);
@Override
public void onMetricsPublished(ConnectionMetrics connectionMetrics, RequestMetrics requestMetrics) {
logger.info("Connection metrics: [duration: {}ms, totalConnectionAttempts:{}, endpoints: [{}]]",
connectionMetrics.getDurationMillis(),
connectionMetrics.getTotalConnectionAttempts(),
connectionMetrics.getMetrics().stream()
.map(EndpointConnectionMetrics::toString)
.collect(Collectors.joining(", ")));
logger.info("Request metrics: [duration: {}ms, totalRequests:{}, failed: {}, endpoints: [{}] (dropped: {}, skipped: {})]",
requestMetrics.getDurationMillis(),
requestMetrics.getTotalRequests(),
requestMetrics.getFailedRequestsCount(),
requestMetrics.getMetrics().stream()
.map(EndpointRequestMetrics::toString)
.collect(Collectors.joining(", ")),
requestMetrics.getDroppedRequestsCount(),
requestMetrics.getSkippedResponsesCount());
}
}
| 7,489 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/TopologyAwareBuilderConfigurator.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package org.apache.tinkerpop.gremlin.driver;
public interface TopologyAwareBuilderConfigurator {
void apply(Cluster.Builder builder, EndpointCollection endpoints);
}
| 7,490 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/EndpointClientCollection.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import org.apache.tinkerpop.gremlin.driver.exception.NoHostAvailableException;
import org.apache.tinkerpop.gremlin.driver.message.RequestMessage;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.concurrent.*;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.tinkerpop.gremlin.driver.ApprovalResult.REJECTED_REASON_ANNOTATION;
class EndpointClientCollection implements Iterable<EndpointClient> {
public static Builder builder(){
return new Builder();
}
private final List<EndpointClient> endpointClients;
private final EndpointCollection rejectedEndpoints;
private final boolean collectMetrics;
private final ConnectionMetricsCollector connectionMetrics;
private final RequestMetricsCollector requestMetrics;
private final long startMillis = System.currentTimeMillis();
private final ExecutorService executorService;
private volatile boolean allowSubmitMetrics = true;
private static final Logger logger = LoggerFactory.getLogger(EndpointClientCollection.class);
EndpointClientCollection(Builder builder) {
this.rejectedEndpoints = builder.getRejectedEndpoints();
this.endpointClients = builder.getEndpointClients();
this.collectMetrics = builder.collectMetrics();
this.executorService = collectMetrics ? Executors.newSingleThreadExecutor() : null;
this.connectionMetrics = collectMetrics ? initConnectionMetrics(endpointClients) : null;
this.requestMetrics = collectMetrics ? initRequestMetrics(endpointClients) : null;
}
EndpointClientCollection() {
this(new Builder());
}
private RequestMetricsCollector initRequestMetrics(List<EndpointClient> endpointClients) {
Map<String, EndpointRequestMetrics> requestMetrics = new ConcurrentHashMap<>();
for (EndpointClient endpointClient : endpointClients) {
String address = endpointClient.endpoint().getAddress();
requestMetrics.put(address, new EndpointRequestMetrics(address));
}
return new RequestMetricsCollector(requestMetrics);
}
private ConnectionMetricsCollector initConnectionMetrics(List<EndpointClient> endpointClients) {
Map<String, EndpointConnectionMetrics> endpointClientMetrics = new ConcurrentHashMap<>();
for (EndpointClient endpointClient : endpointClients) {
String address = endpointClient.endpoint().getAddress();
endpointClientMetrics.put(address, new EndpointConnectionMetrics(address));
}
return new ConnectionMetricsCollector(endpointClientMetrics);
}
List<EndpointClient> getSurvivingEndpointClients(EndpointCollection acceptedEndpoints) {
List<EndpointClient> results = new ArrayList<>();
for (EndpointClient endpointClient : endpointClients) {
Endpoint endpoint = endpointClient.endpoint();
if (acceptedEndpoints.containsEndpoint(endpoint)) {
logger.info("Retaining client for {}", endpoint.getAddress());
results.add(endpointClient);
}
}
return results;
}
Connection chooseConnection(RequestMessage msg, ChooseEndpointStrategy strategy) throws TimeoutException {
UUID traceId = msg.getRequestId();
long startMillis = System.currentTimeMillis();
EndpointClient endpointClient = strategy.choose(this);
String address = endpointClient.endpoint().getAddress();
if (!endpointClient.isAvailable()) {
logger.debug("No connections available for {}", address);
submitMetrics(() -> connectionMetrics.unavailable(address, startMillis));
return null;
}
try {
Connection connection = endpointClient.client().chooseConnection(msg);
if (connection.isClosing()) {
logger.debug("Connection is closing: {}", address);
submitMetrics(() -> connectionMetrics.closing(address, startMillis));
return null;
}
if (connection.isDead()) {
logger.debug("Connection is dead: {}", address);
submitMetrics(() -> connectionMetrics.dead(address, startMillis));
return null;
}
submitMetrics(() -> {
try {
connectionMetrics.succeeded(address, startMillis);
requestMetrics.registerAddressForTraceId(traceId, address);
} catch (Exception e) {
logger.error("Error while submitting metrics", e);
}
});
return connection;
} catch (NullPointerException e) {
logger.debug("NullPointerException: {}", address, e);
submitMetrics(() -> connectionMetrics.npe(address, startMillis));
return null;
} catch (NoHostAvailableException e) {
logger.debug("No connection available: {}", address, e);
submitMetrics(() -> connectionMetrics.nha(address, startMillis));
return null;
}
}
EndpointClient get(int index) {
return endpointClients.get(index);
}
int size() {
return endpointClients.size();
}
boolean isEmpty() {
return endpointClients.isEmpty();
}
@Override
public Iterator<EndpointClient> iterator() {
return endpointClients.iterator();
}
Stream<EndpointClient> stream() {
return endpointClients.stream();
}
EndpointCollection endpoints() {
List<Endpoint> endpoints = endpointClients.stream()
.map(EndpointClient::endpoint)
.collect(Collectors.toList());
return new EndpointCollection(endpoints);
}
boolean hasRejectedEndpoints() {
return !rejectedEndpoints.isEmpty();
}
Collection<String> rejectionReasons() {
return rejectedEndpoints.stream()
.map(e -> e.getAnnotations().getOrDefault(REJECTED_REASON_ANNOTATION, "unknown"))
.collect(Collectors.toSet());
}
private void submitMetrics(Runnable runnable){
if (collectMetrics && allowSubmitMetrics) {
try {
executorService.submit(runnable);
} catch (RejectedExecutionException e) {
logger.trace("Error submitting metrics", e);
}
}
}
void close(MetricsHandler handler) {
if (!collectMetrics) {
return;
}
allowSubmitMetrics = false;
if (handler != null && executorService != null){
Future<?> future = executorService.submit(() -> {
long duration = System.currentTimeMillis() - startMillis;
ConnectionMetrics conMetrics = new ConnectionMetrics(
duration,
connectionMetrics.totalConnectionAttempts(),
connectionMetrics.metrics());
RequestMetrics reqMetrics = new RequestMetrics(
duration,
requestMetrics.totalRequests(),
requestMetrics.failedRequests(),
requestMetrics.droppedRequests(),
requestMetrics.skippedResponses(),
requestMetrics.metrics());
handler.onMetricsPublished(conMetrics, reqMetrics);
});
try {
future.get(5, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
logger.error("Error while publishing metrics", e);
}
}
if (executorService != null) {
executorService.shutdownNow();
}
}
void registerDurationForTraceId(UUID traceId, long durationMillis, Throwable e) {
submitMetrics(() -> requestMetrics.registerDurationForTraceId(traceId, durationMillis, e));
}
static class Builder {
private List<EndpointClient> endpointClients = new ArrayList<>();
private EndpointCollection rejectedEndpoints = new EndpointCollection();
private boolean collectMetrics = false;
private Builder(){
}
public Builder withEndpointClients(List<EndpointClient> endpointClients) {
this.endpointClients = endpointClients;
return this;
}
public Builder withRejectedEndpoints(EndpointCollection rejectedEndpoints) {
this.rejectedEndpoints = rejectedEndpoints;
return this;
}
public Builder setCollectMetrics(boolean collectMetrics) {
this.collectMetrics = collectMetrics;
return this;
}
List<EndpointClient> getEndpointClients() {
return endpointClients;
}
EndpointCollection getRejectedEndpoints() {
return rejectedEndpoints;
}
boolean collectMetrics() {
return collectMetrics;
}
}
}
| 7,491 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/Refreshable.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
public interface Refreshable {
void refreshEndpoints(EndpointCollection endpoints);
}
| 7,492 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/EndpointCollection.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import java.util.*;
import java.util.stream.Stream;
public class EndpointCollection implements Iterable<Endpoint> {
private final Map<String, Endpoint> endpoints = new HashMap<>();
public EndpointCollection() {
}
public EndpointCollection(Collection<? extends Endpoint> endpoints) {
for (Endpoint endpoint : endpoints) {
addOrReplace(endpoint);
}
}
public EndpointCollection getEndpointsWithNoCluster(ClientClusterCollection clientClusterCollection) {
EndpointCollection results = new EndpointCollection();
for (Endpoint endpoint : endpoints.values()) {
if (!clientClusterCollection.containsClusterForEndpoint(endpoint)) {
results.addOrReplace(endpoint);
}
}
return results;
}
public EndpointCollection getEnrichedEndpoints(EndpointFilter endpointFilter) {
EndpointCollection results = new EndpointCollection();
for (Endpoint endpoint : endpoints.values()) {
results.addOrReplace(endpointFilter.enrichEndpoint(endpoint));
}
return results;
}
public EndpointCollection getAcceptedEndpoints(EndpointFilter endpointFilter) {
EndpointCollection results = new EndpointCollection();
for (Endpoint endpoint : endpoints.values()) {
ApprovalResult approvalResult = endpointFilter.approveEndpoint(endpoint);
if (approvalResult.isApproved()) {
results.addOrReplace(endpoint);
}
}
return results;
}
public EndpointCollection getRejectedEndpoints(EndpointFilter endpointFilter) {
EndpointCollection results = new EndpointCollection();
for (Endpoint endpoint : endpoints.values()) {
ApprovalResult approvalResult = endpointFilter.approveEndpoint(endpoint);
if (!approvalResult.isApproved()) {
results.addOrReplace(approvalResult.enrich(endpoint));
}
}
return results;
}
private void addOrReplace(Endpoint endpoint) {
endpoints.put(computeKey(endpoint), endpoint);
}
public boolean containsEndpoint(Endpoint endpoint) {
return endpoints.containsKey(endpoint.getAddress());
}
public Endpoint get(String address) {
return endpoints.get(address);
}
public boolean isEmpty() {
return endpoints.isEmpty();
}
@Override
public Iterator<Endpoint> iterator() {
return endpoints.values().iterator();
}
public Stream<Endpoint> stream() {
return endpoints.values().stream();
}
@Override
public String toString() {
return "EndpointCollection{" +
"endpoints=" + endpoints +
'}';
}
private String computeKey(Endpoint endpoint) {
return endpoint.getAddress() != null ? endpoint.getAddress() : String.valueOf(endpoint.hashCode());
}
public int size() {
return endpoints.size();
}
}
| 7,493 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/GremlinClient.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import org.apache.tinkerpop.gremlin.driver.exception.ConnectionException;
import org.apache.tinkerpop.gremlin.driver.message.RequestMessage;
import org.apache.tinkerpop.gremlin.process.traversal.Bytecode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.utils.CollectionUtils;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
public class GremlinClient extends Client implements Refreshable, AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(GremlinClient.class);
private final AtomicReference<EndpointClientCollection> endpointClientCollection = new AtomicReference<>(new EndpointClientCollection());
private final AtomicLong index = new AtomicLong(0);
private final AtomicReference<CompletableFuture<Void>> closing = new AtomicReference<>(null);
private final ConnectionAttemptManager connectionAttemptManager;
private final ClientClusterCollection clientClusterCollection;
private final EndpointStrategies endpointStrategies;
private final AcquireConnectionConfig acquireConnectionConfig;
private final MetricsConfig metricsConfig;
GremlinClient(Cluster cluster,
Settings settings,
EndpointClientCollection endpointClientCollection,
ClientClusterCollection clientClusterCollection,
EndpointStrategies endpointStrategies,
AcquireConnectionConfig acquireConnectionConfig,
MetricsConfig metricsConfig) {
super(cluster, settings);
this.endpointClientCollection.set(endpointClientCollection);
this.clientClusterCollection = clientClusterCollection;
this.endpointStrategies = endpointStrategies;
this.acquireConnectionConfig = acquireConnectionConfig;
this.connectionAttemptManager = acquireConnectionConfig.createConnectionAttemptManager(this);
this.metricsConfig = metricsConfig;
logger.info("availableEndpointFilter: {}", endpointStrategies.endpointFilter());
}
/**
* Refreshes the client with its current set of endpoints.
* (Useful for triggering metrics for static cluster topologies.)
*/
public synchronized void refreshEndpoints(){
refreshEndpoints(currentEndpoints());
}
/**
* Refreshes the list of endpoint addresses to which the client connects.
*/
@Override
public synchronized void refreshEndpoints(EndpointCollection endpoints) {
if (closing.get() != null) {
return;
}
EndpointFilter endpointFilter =
new EmptyEndpointFilter(endpointStrategies.endpointFilter());
EndpointClientCollection currentEndpointClientCollection = endpointClientCollection.get();
EndpointCollection enrichedEndpoints = endpoints.getEnrichedEndpoints(endpointFilter);
EndpointCollection acceptedEndpoints = enrichedEndpoints.getAcceptedEndpoints(endpointFilter);
EndpointCollection rejectedEndpoints = enrichedEndpoints.getRejectedEndpoints(endpointFilter);
List<EndpointClient> survivingEndpointClients =
currentEndpointClientCollection.getSurvivingEndpointClients(acceptedEndpoints);
EndpointCollection newEndpoints = acceptedEndpoints.getEndpointsWithNoCluster(clientClusterCollection);
Map<Endpoint, Cluster> newEndpointClusters = clientClusterCollection.createClustersForEndpoints(newEndpoints);
List<EndpointClient> newEndpointClients = EndpointClient.create(newEndpointClusters);
EndpointClientCollection newEndpointClientCollection = new EndpointClientCollection(
EndpointClientCollection.builder()
.withEndpointClients(CollectionUtils.join(survivingEndpointClients, newEndpointClients))
.withRejectedEndpoints(rejectedEndpoints)
.setCollectMetrics(metricsConfig.enableMetrics())
);
endpointClientCollection.set(newEndpointClientCollection);
clientClusterCollection.removeClustersWithNoMatchingEndpoint(newEndpointClientCollection.endpoints());
currentEndpointClientCollection.close(metricsConfig.metricsHandlers());
}
public EndpointCollection currentEndpoints(){
return endpointClientCollection.get().endpoints();
}
@Override
protected void initializeImplementation() {
// Do nothing
}
@Override
protected Connection chooseConnection(RequestMessage msg) throws TimeoutException, ConnectionException {
long start = System.currentTimeMillis();
logger.debug("Choosing connection");
Connection connection = null;
while (connection == null) {
EndpointClientCollection currentEndpointClientCollection = endpointClientCollection.get();
while (currentEndpointClientCollection.isEmpty()) {
if (connectionAttemptManager.maxWaitTimeExceeded(start)) {
if (currentEndpointClientCollection.hasRejectedEndpoints()) {
throw new EndpointsUnavailableException(currentEndpointClientCollection.rejectionReasons());
} else {
throw new TimeoutException("Timed-out waiting for connection");
}
}
if (connectionAttemptManager.eagerRefreshWaitTimeExceeded(start)) {
connectionAttemptManager.triggerEagerRefresh(new EagerRefreshContext());
}
try {
Thread.sleep(acquireConnectionConfig.acquireConnectionBackoffMillis());
currentEndpointClientCollection = endpointClientCollection.get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
connection = currentEndpointClientCollection.chooseConnection(
msg,
ec -> ec.get((int) (index.getAndIncrement() % ec.size())));
if (connection == null) {
if (connectionAttemptManager.maxWaitTimeExceeded(start)) {
throw new TimeoutException("Timed-out waiting for connection");
}
if (connectionAttemptManager.eagerRefreshWaitTimeExceeded(start)) {
connectionAttemptManager.triggerEagerRefresh(new EagerRefreshContext());
}
try {
Thread.sleep(acquireConnectionConfig.acquireConnectionBackoffMillis());
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
logger.debug("Connection: {} [{} ms]", connection.getConnectionInfo(), System.currentTimeMillis() - start);
return connection;
}
@Override
public Client alias(String graphOrTraversalSource) {
return alias(makeDefaultAliasMap(graphOrTraversalSource));
}
@Override
public Client alias(final Map<String, String> aliases) {
return new GremlinAliasClusterClient(this, aliases, settings, clientClusterCollection, endpointClientCollection);
}
@Override
public boolean isClosing() {
return closing.get() != null;
}
@Override
public CompletableFuture<Void> closeAsync() {
if (closing.get() != null)
return closing.get();
connectionAttemptManager.shutdownNow();
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (EndpointClient endpointClient : endpointClientCollection.get()) {
futures.add(endpointClient.closeClientAsync());
}
closing.set(CompletableFuture.allOf(futures.toArray(new CompletableFuture[]{})));
return closing.get();
}
@Override
public synchronized Client init() {
if (initialized)
return this;
logger.debug("Initializing internal clients");
for (EndpointClient endpointClient : endpointClientCollection.get()) {
endpointClient.initClient();
}
initializeImplementation();
initialized = true;
return this;
}
@Override
public String toString() {
return "Client holder queue: " + System.lineSeparator() +
endpointClientCollection.get().stream()
.map(c -> String.format(" {address: %s, isAvailable: %s}",
c.endpoint().getAddress(),
!c.client().getCluster().availableHosts().isEmpty()))
.collect(Collectors.joining(System.lineSeparator())) +
System.lineSeparator() +
"Cluster collection: " + System.lineSeparator() +
clientClusterCollection.toString();
}
public static class GremlinAliasClusterClient extends AliasClusteredClient {
private static final Logger logger = LoggerFactory.getLogger(GremlinAliasClusterClient.class);
private final ClientClusterCollection clientClusterCollection;
private final AtomicReference<EndpointClientCollection> endpointClientCollection;
GremlinAliasClusterClient(Client client,
Map<String, String> aliases,
Settings settings,
ClientClusterCollection clientClusterCollection,
AtomicReference<EndpointClientCollection> endpointClientCollection) {
super(client, aliases, settings);
this.clientClusterCollection = clientClusterCollection;
this.endpointClientCollection = endpointClientCollection;
}
@Override
public CompletableFuture<ResultSet> submitAsync(Bytecode bytecode, RequestOptions options) {
long start = System.currentTimeMillis();
UUID traceId = options.getOverrideRequestId().isPresent() ? options.getOverrideRequestId().get() : UUID.randomUUID();
logger.trace("_traceId: {}", traceId);
RequestOptions.Builder newOptions = RequestOptions.build();
newOptions.overrideRequestId(traceId);
if (options.getAliases().isPresent()) {
Map<String, String> aliases = options.getAliases().get();
for (Map.Entry<String, String> alias : aliases.entrySet()) {
newOptions.addAlias(alias.getKey(), alias.getValue());
}
}
if (options.getBatchSize().isPresent()) {
newOptions.batchSize(options.getBatchSize().get());
}
if (options.getTimeout().isPresent()) {
newOptions.timeout(options.getTimeout().get());
}
if (options.getLanguage().isPresent()) {
newOptions.language(options.getLanguage().get());
}
if (options.getUserAgent().isPresent()) {
newOptions.userAgent(options.getUserAgent().get());
}
if (options.getParameters().isPresent()) {
Map<String, Object> params = options.getParameters().get();
for (Map.Entry<String, Object> param : params.entrySet()) {
newOptions.addParameter(param.getKey(), param.getValue());
}
}
CompletableFuture<ResultSet> future = super.submitAsync(bytecode, newOptions.create());
EndpointClientCollection endpointClients = endpointClientCollection.get();
if (endpointClients != null){
return future.whenComplete((results, throwable) -> {
long durationMillis = System.currentTimeMillis() - start;
endpointClients.registerDurationForTraceId(traceId, durationMillis, throwable);
});
} else {
return future;
}
}
@Override
public Cluster getCluster() {
Cluster cluster = clientClusterCollection.getFirstOrNull();
if (cluster != null) {
logger.trace("Returning: Cluster: {}, Hosts: [{}}",
cluster,
cluster.availableHosts().stream().map(URI::toString).collect(Collectors.joining(", ")));
return cluster;
} else {
logger.warn("Unable to find cluster with available hosts in cluster collection, so returning parent cluster, which has no hosts.");
return super.getCluster();
}
}
}
}
| 7,494 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/org/apache/tinkerpop/gremlin/driver/RequestMetrics.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package org.apache.tinkerpop.gremlin.driver;
import java.util.Collection;
public class RequestMetrics {
private final long durationMillis;
private final long totalRequests;
private final long failedRequests;
private final int droppedRequests;
private final int skippedResponses;
private Collection<EndpointRequestMetrics> metrics;
RequestMetrics(long durationMillis,
long totalRequests,
long failedRequests,
int droppedRequests,
int skippedResponses,
Collection<EndpointRequestMetrics> metrics) {
this.durationMillis = durationMillis;
this.totalRequests = totalRequests;
this.failedRequests = failedRequests;
this.droppedRequests = droppedRequests;
this.skippedResponses = skippedResponses;
this.metrics = metrics;
}
public long getDurationMillis() {
return durationMillis;
}
public long getTotalRequests() {
return totalRequests;
}
public long getFailedRequestsCount() {
return failedRequests;
}
public int getDroppedRequestsCount() {
return droppedRequests;
}
public int getSkippedResponsesCount() {
return skippedResponses;
}
public Collection<EndpointRequestMetrics> getMetrics() {
return metrics;
}
}
| 7,495 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/software/amazon/neptune | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/software/amazon/neptune/cluster/ClusterEndpointsFetchStrategy.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package software.amazon.neptune.cluster;
import org.apache.tinkerpop.gremlin.driver.EndpointCollection;
import org.apache.tinkerpop.gremlin.driver.GremlinClient;
import java.util.Collection;
import java.util.Map;
public interface ClusterEndpointsFetchStrategy {
ClusterMetadataSupplier clusterMetadataSupplier();
Map<? extends EndpointsSelector, EndpointCollection> getEndpoints(Collection<? extends EndpointsSelector> selectors, boolean refresh);
default Map<? extends EndpointsSelector, EndpointCollection> getEndpoints(Map<? extends EndpointsSelector, GremlinClient> clientSelectors, boolean refresh){
return getEndpoints(clientSelectors.keySet(), refresh);
}
}
| 7,496 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/software/amazon/neptune | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/software/amazon/neptune/cluster/OnNewClusterMetadata.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package software.amazon.neptune.cluster;
public interface OnNewClusterMetadata {
void apply(NeptuneClusterMetadata neptuneClusterMetadata);
}
| 7,497 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/software/amazon/neptune | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/software/amazon/neptune/cluster/NeptuneGremlinClusterBuilder.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package software.amazon.neptune.cluster;
import com.amazonaws.auth.AWSCredentialsProvider;
import io.netty.handler.ssl.SslContext;
import org.apache.tinkerpop.gremlin.driver.*;
import org.apache.tinkerpop.gremlin.driver.ser.Serializers;
import java.util.*;
import java.util.function.Supplier;
public class NeptuneGremlinClusterBuilder {
public static NeptuneGremlinClusterBuilder build() {
return new NeptuneGremlinClusterBuilder();
}
private final GremlinClusterBuilder innerBuilder = GremlinClusterBuilder.build();
private List<Endpoint> endpoints = new ArrayList<>();
private String proxyAddress;
private boolean removeHostHeader = false;
private boolean enableSsl = true;
private boolean enableIamAuth = false;
private int port = 8182;
private int proxyPort = 80;
private String iamProfile = IamAuthConfig.DEFAULT_PROFILE;
private String serviceRegion = "";
private HandshakeInterceptor interceptor = null;
private AWSCredentialsProvider credentials = null;
private EndpointFilter endpointFilter = new SuspendedEndpoints();
private NeptuneGremlinClusterBuilder() {
}
public NeptuneGremlinClusterBuilder addMetricsHandler(MetricsHandler handler){
innerBuilder.addMetricsHandler(handler);
return this;
}
public NeptuneGremlinClusterBuilder enableMetrics(boolean enableMetrics){
innerBuilder.enableMetrics(enableMetrics);
return this;
}
/**
* Number of millis to wait between each attempt to acquire a connection.
*/
public NeptuneGremlinClusterBuilder acquireConnectionBackoffMillis(final int acquireConnectionBackoffMillis) {
innerBuilder.acquireConnectionBackoffMillis(acquireConnectionBackoffMillis);
return this;
}
/**
* Minimum number of millis to wait between invoking handler supplied in
* {@link #onEagerRefresh}.
*/
public NeptuneGremlinClusterBuilder eagerRefreshBackoffMillis(final int eagerRefreshBackoffMillis) {
innerBuilder.eagerRefreshBackoffMillis(eagerRefreshBackoffMillis);
return this;
}
/**
* Number of millis to wait while trying to acquire connection before invoking handler supplied in
* {@link #onEagerRefresh}.
*/
public NeptuneGremlinClusterBuilder eagerRefreshWaitTimeMillis(final int eagerRefreshWaitTimeMillis) {
innerBuilder.eagerRefreshWaitTimeMillis(eagerRefreshWaitTimeMillis);
return this;
}
/**
* Handler to be invoked after {@link #eagerRefreshWaitTimeMillis}.
* The handler should return a {@link Supplier< EndpointCollection >}.
*/
public NeptuneGremlinClusterBuilder onEagerRefresh(final OnEagerRefresh eventHandler) {
innerBuilder.onEagerRefresh(eventHandler);
return this;
}
/**
* Strategy for filtering and enriching available endpoints before creating clients.
*/
public NeptuneGremlinClusterBuilder endpointFilter(EndpointFilter endpointFilter) {
this.endpointFilter = endpointFilter;
return this;
}
public NeptuneGremlinClusterBuilder nioPoolSize(final int nioPoolSize) {
innerBuilder.nioPoolSize(nioPoolSize);
return this;
}
public NeptuneGremlinClusterBuilder workerPoolSize(final int workerPoolSize) {
innerBuilder.workerPoolSize(workerPoolSize);
return this;
}
public NeptuneGremlinClusterBuilder path(final String path) {
innerBuilder.path(path);
return this;
}
public NeptuneGremlinClusterBuilder serializer(final String mimeType) {
innerBuilder.serializer(mimeType);
return this;
}
public NeptuneGremlinClusterBuilder serializer(final Serializers mimeType) {
innerBuilder.serializer(mimeType);
return this;
}
public NeptuneGremlinClusterBuilder serializer(final MessageSerializer serializer) {
innerBuilder.serializer(serializer);
return this;
}
/**
* Enables connectivity over SSL - default 'true' for Amazon Neptune clusters.
*/
public NeptuneGremlinClusterBuilder enableSsl(final boolean enable) {
this.enableSsl = enable;
return this;
}
public NeptuneGremlinClusterBuilder sslContext(final SslContext sslContext) {
innerBuilder.sslContext(sslContext);
return this;
}
public NeptuneGremlinClusterBuilder keepAliveInterval(final long keepAliveInterval) {
innerBuilder.keepAliveInterval(keepAliveInterval);
return this;
}
public NeptuneGremlinClusterBuilder keyStore(final String keyStore) {
innerBuilder.keyStore(keyStore);
return this;
}
public NeptuneGremlinClusterBuilder keyStorePassword(final String keyStorePassword) {
innerBuilder.keyStorePassword(keyStorePassword);
return this;
}
public NeptuneGremlinClusterBuilder trustStore(final String trustStore) {
innerBuilder.trustStore(trustStore);
return this;
}
public NeptuneGremlinClusterBuilder trustStorePassword(final String trustStorePassword) {
innerBuilder.trustStorePassword(trustStorePassword);
return this;
}
public NeptuneGremlinClusterBuilder keyStoreType(final String keyStoreType) {
innerBuilder.keyStoreType(keyStoreType);
return this;
}
public NeptuneGremlinClusterBuilder sslEnabledProtocols(final List<String> sslEnabledProtocols) {
innerBuilder.sslEnabledProtocols(sslEnabledProtocols);
return this;
}
public NeptuneGremlinClusterBuilder sslCipherSuites(final List<String> sslCipherSuites) {
innerBuilder.sslCipherSuites(sslCipherSuites);
return this;
}
public NeptuneGremlinClusterBuilder sslSkipCertValidation(final boolean sslSkipCertValidation) {
innerBuilder.sslSkipCertValidation(sslSkipCertValidation);
return this;
}
public NeptuneGremlinClusterBuilder minInProcessPerConnection(final int minInProcessPerConnection) {
innerBuilder.minInProcessPerConnection(minInProcessPerConnection);
return this;
}
public NeptuneGremlinClusterBuilder maxInProcessPerConnection(final int maxInProcessPerConnection) {
innerBuilder.maxInProcessPerConnection(maxInProcessPerConnection);
return this;
}
public NeptuneGremlinClusterBuilder maxSimultaneousUsagePerConnection(final int maxSimultaneousUsagePerConnection) {
innerBuilder.maxSimultaneousUsagePerConnection(maxSimultaneousUsagePerConnection);
return this;
}
public NeptuneGremlinClusterBuilder minSimultaneousUsagePerConnection(final int minSimultaneousUsagePerConnection) {
innerBuilder.minSimultaneousUsagePerConnection(minSimultaneousUsagePerConnection);
return this;
}
public NeptuneGremlinClusterBuilder maxConnectionPoolSize(final int maxSize) {
innerBuilder.maxConnectionPoolSize(maxSize);
return this;
}
public NeptuneGremlinClusterBuilder minConnectionPoolSize(final int minSize) {
innerBuilder.minConnectionPoolSize(minSize);
return this;
}
public NeptuneGremlinClusterBuilder resultIterationBatchSize(final int size) {
innerBuilder.resultIterationBatchSize(size);
return this;
}
public NeptuneGremlinClusterBuilder maxWaitForConnection(final int maxWait) {
innerBuilder.maxWaitForConnection(maxWait);
return this;
}
public NeptuneGremlinClusterBuilder maxWaitForClose(final int maxWait) {
innerBuilder.maxWaitForClose(maxWait);
return this;
}
public NeptuneGremlinClusterBuilder maxContentLength(final int maxContentLength) {
innerBuilder.maxContentLength(maxContentLength);
return this;
}
public NeptuneGremlinClusterBuilder channelizer(final String channelizerClass) {
innerBuilder.channelizer(channelizerClass);
return this;
}
public NeptuneGremlinClusterBuilder channelizer(final Class channelizerClass) {
return channelizer(channelizerClass.getCanonicalName());
}
public NeptuneGremlinClusterBuilder validationRequest(final String script) {
innerBuilder.validationRequest(script);
return this;
}
public NeptuneGremlinClusterBuilder reconnectInterval(final int interval) {
innerBuilder.reconnectInterval(interval);
return this;
}
public NeptuneGremlinClusterBuilder loadBalancingStrategy(final Supplier<LoadBalancingStrategy> loadBalancingStrategy) {
innerBuilder.loadBalancingStrategy(loadBalancingStrategy);
return this;
}
public NeptuneGremlinClusterBuilder authProperties(final AuthProperties authProps) {
innerBuilder.authProperties(authProps);
return this;
}
public NeptuneGremlinClusterBuilder credentials(final String username, final String password) {
innerBuilder.credentials(username, password);
return this;
}
public NeptuneGremlinClusterBuilder protocol(final String protocol) {
innerBuilder.protocol(protocol);
return this;
}
public NeptuneGremlinClusterBuilder jaasEntry(final String jaasEntry) {
innerBuilder.jaasEntry(jaasEntry);
return this;
}
public NeptuneGremlinClusterBuilder addContactPoint(final String address) {
this.endpoints.add(new DatabaseEndpoint().withAddress(address));
return this;
}
public NeptuneGremlinClusterBuilder addContactPoints(final String... addresses) {
for (String address : addresses)
addContactPoint(address);
return this;
}
public NeptuneGremlinClusterBuilder addContactPoints(final Collection<String> addresses) {
for (String address : addresses)
addContactPoint(address);
return this;
}
public NeptuneGremlinClusterBuilder addContactPoint(final Endpoint endpoint) {
this.endpoints.add(endpoint);
return this;
}
public NeptuneGremlinClusterBuilder addContactPoints(final EndpointCollection endpointCollection) {
for (Endpoint endpoint : endpointCollection)
addContactPoint(endpoint);
return this;
}
public NeptuneGremlinClusterBuilder addContactPointsMetadata(final Endpoint... endpointCollection) {
for (Endpoint endpoint : endpointCollection)
addContactPoint(endpoint);
return this;
}
public NeptuneGremlinClusterBuilder addContactPointsMetadata(final Collection<Endpoint> endpointCollection) {
for (Endpoint endpoint : endpointCollection)
addContactPoint(endpoint);
return this;
}
public NeptuneGremlinClusterBuilder port(final int port) {
this.port = port;
return this;
}
public NeptuneGremlinClusterBuilder proxyPort(final int port) {
this.proxyPort = port;
return this;
}
public NeptuneGremlinClusterBuilder proxyAddress(final String address) {
this.proxyAddress = address;
return this;
}
public NeptuneGremlinClusterBuilder proxyRemoveHostHeader(final boolean removeHostHeader) {
this.removeHostHeader = removeHostHeader;
return this;
}
public NeptuneGremlinClusterBuilder enableIamAuth(final boolean enable) {
this.enableIamAuth = enable;
return this;
}
public NeptuneGremlinClusterBuilder serviceRegion(final String serviceRegion) {
this.serviceRegion = serviceRegion;
return this;
}
public NeptuneGremlinClusterBuilder iamProfile(final String iamProfile) {
this.iamProfile = iamProfile;
return this;
}
public NeptuneGremlinClusterBuilder handshakeInterceptor(final HandshakeInterceptor interceptor) {
this.interceptor = interceptor;
return this;
}
public NeptuneGremlinClusterBuilder credentials(final AWSCredentialsProvider credentials) {
this.credentials = credentials;
return this;
}
private boolean isDirectConnection() {
return proxyAddress == null;
}
public GremlinCluster create() {
innerBuilder.enableSsl(this.enableSsl);
Collection<Endpoint> filteredEndpoints = new ArrayList<>();
Set<String> rejectedReasons = new HashSet<>();
if (endpointFilter != null) {
innerBuilder.endpointFilter(endpointFilter);
for (Endpoint endpoint : endpoints) {
ApprovalResult approvalResult = endpointFilter.approveEndpoint(endpoint);
if (approvalResult.isApproved()) {
filteredEndpoints.add(endpoint);
} else {
rejectedReasons.add(approvalResult.reason());
}
}
} else {
filteredEndpoints.addAll(endpoints);
}
if (filteredEndpoints.isEmpty()) {
if (!rejectedReasons.isEmpty()) {
throw new EndpointsUnavailableException(rejectedReasons);
}
if (isDirectConnection()) {
throw new IllegalStateException("The list of endpoint addresses is empty. You must supply one or more endpoints.");
} else if (enableIamAuth) {
throw new IllegalStateException("The list of endpoint addresses is empty. You must supply one or more endpoints to sign the Host header.");
}
}
for (Endpoint endpoint : filteredEndpoints) {
innerBuilder.addContactPoint(endpoint);
}
TopologyAwareBuilderConfigurator configurator = new HandshakeInterceptorConfigurator(
isDirectConnection(),
interceptor,
enableIamAuth,
port,
proxyPort,
proxyAddress,
serviceRegion,
iamProfile,
credentials,
removeHostHeader
);
innerBuilder.topologyAwareBuilderConfigurator(configurator);
return innerBuilder.create();
}
}
| 7,498 |
0 | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/software/amazon/neptune | Create_ds/neptune-gremlin-client/gremlin-client/src/main/java/software/amazon/neptune/cluster/CommonClusterEndpointsFetchStrategy.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package software.amazon.neptune.cluster;
import org.apache.tinkerpop.gremlin.driver.EndpointCollection;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
class CommonClusterEndpointsFetchStrategy implements ClusterEndpointsFetchStrategy {
private final ClusterMetadataSupplier clusterMetadataSupplier;
CommonClusterEndpointsFetchStrategy(ClusterMetadataSupplier clusterMetadataSupplier) {
this.clusterMetadataSupplier = clusterMetadataSupplier;
}
@Override
public ClusterMetadataSupplier clusterMetadataSupplier() {
return clusterMetadataSupplier;
}
@Override
public Map<? extends EndpointsSelector, EndpointCollection> getEndpoints(Collection<? extends EndpointsSelector> selectors, boolean refresh) {
if (refresh) {
return refreshEndpoints(selectors);
}
NeptuneClusterMetadata clusterMetadata = clusterMetadataSupplier().getClusterMetadata();
if (clusterMetadata == null) {
return refreshEndpoints(selectors);
}
Map<EndpointsSelector, EndpointCollection> results = new HashMap<>();
for (EndpointsSelector selector : selectors) {
results.put(selector, clusterMetadata.select(selector));
}
return results;
}
private Map<? extends EndpointsSelector, EndpointCollection> refreshEndpoints(Collection<? extends EndpointsSelector> selectors) {
NeptuneClusterMetadata clusterMetadata = clusterMetadataSupplier.refreshClusterMetadata();
Map<EndpointsSelector, EndpointCollection> results = new HashMap<>();
for (EndpointsSelector selector : selectors) {
results.put(selector, clusterMetadata.select(selector));
}
return results;
}
}
| 7,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.