index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/util/TransferManagerWrapper.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.util;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.amazonaws.services.s3.transfer.TransferManager;
import com.amazonaws.services.s3.transfer.TransferManagerBuilder;
import org.apache.commons.lang.StringUtils;
public class TransferManagerWrapper implements AutoCloseable {
private final TransferManager transferManager;
public TransferManagerWrapper(String s3Region) {
this(s3Region, null);
}
public TransferManagerWrapper(String s3Region, AWSCredentialsProvider credentialsProvider) {
AmazonS3ClientBuilder amazonS3ClientBuilder = AmazonS3ClientBuilder.standard();
if (credentialsProvider != null) {
amazonS3ClientBuilder = amazonS3ClientBuilder.withCredentials(credentialsProvider);
}
if (StringUtils.isNotEmpty(s3Region)) {
amazonS3ClientBuilder = amazonS3ClientBuilder.withRegion(s3Region);
}
transferManager = TransferManagerBuilder.standard()
.withS3Client(amazonS3ClientBuilder.build())
.build();
}
public TransferManager get() {
return transferManager;
}
@Override
public void close() {
transferManager.shutdownNow();
}
}
| 4,200 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/util/SemicolonUtils.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.util;
import org.apache.commons.lang.StringUtils;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.regex.Pattern;
public class SemicolonUtils {
private static final String SEMICOLON_SEPARATOR = "(?<!\\\\);";
private static final Pattern regexPattern = Pattern.compile(SEMICOLON_SEPARATOR);
public static Collection<String> split(String s) {
if (StringUtils.isEmpty(s)){
return Collections.emptyList();
}
return Arrays.asList(regexPattern.split(s, 0));
}
public static String unescape(String s) {
if (s.contains(";")){
// String[] strings = regexPattern.split(s, 0);
// if (strings.length == 1) {
// return strings[0].replace("\\;", ";");
// } else {
// return s;
// }
return s.replace("\\;", ";");
} else {
return s;
}
}
}
| 4,201 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/auth/HandshakeRequestConfig.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.auth;
import org.joda.time.DateTime;
import java.util.*;
import java.util.stream.Collectors;
public class HandshakeRequestConfig {
public static HandshakeRequestConfig parse(String s) {
String[] values = s.split(",");
boolean removeHostHeaderAfterSigning = Boolean.parseBoolean(values[0]);
int port = Integer.parseInt(values[1]);
Collection<String> endpoints = new ArrayList<>();
endpoints.addAll(Arrays.asList(values).subList(2, values.length));
return new HandshakeRequestConfig(endpoints, port, removeHostHeaderAfterSigning);
}
private final List<String> endpoints;
private final int port;
private final boolean removeHostHeaderAfterSigning;
private final Random random = new Random(DateTime.now().getMillis());
public HandshakeRequestConfig(Collection<String> endpoints, int port, boolean removeHostHeaderAfterSigning) {
this.endpoints = new ArrayList<>(endpoints);
this.port = port;
this.removeHostHeaderAfterSigning = removeHostHeaderAfterSigning;
}
public String chooseHostHeader() {
return String.format("%s:%s", endpoints.get(random.nextInt(endpoints.size())), port);
}
public boolean removeHostHeaderAfterSigning() {
return removeHostHeaderAfterSigning;
}
public String value() {
return String.format("%s,%s,%s", removeHostHeaderAfterSigning, port, endpoints.stream().collect(Collectors.joining(",")));
}
@Override
public String toString() {
return value();
}
}
| 4,202 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/auth/LBAwareAwsSigV4ClientHandshaker.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.auth;
import com.amazon.neptune.gremlin.driver.sigv4.ChainedSigV4PropertiesProvider;
import com.amazon.neptune.gremlin.driver.sigv4.SigV4Properties;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.neptune.auth.NeptuneNettyHttpSigV4Signer;
import com.amazonaws.neptune.auth.NeptuneSigV4SignerException;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.websocketx.WebSocketClientHandshaker13;
import io.netty.handler.codec.http.websocketx.WebSocketVersion;
import java.net.URI;
public class LBAwareAwsSigV4ClientHandshaker extends WebSocketClientHandshaker13 {
private final ChainedSigV4PropertiesProvider sigV4PropertiesProvider;
private final HandshakeRequestConfig handshakeRequestConfig;
private final SigV4Properties sigV4Properties;
public LBAwareAwsSigV4ClientHandshaker(URI webSocketURL, WebSocketVersion version, String subprotocol, boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, ChainedSigV4PropertiesProvider sigV4PropertiesProvider, HandshakeRequestConfig handshakeRequestConfig) {
super(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength);
this.sigV4PropertiesProvider = sigV4PropertiesProvider;
this.handshakeRequestConfig = handshakeRequestConfig;
this.sigV4Properties = this.loadProperties();
}
protected FullHttpRequest newHandshakeRequest() {
FullHttpRequest request = super.newHandshakeRequest();
request.headers().remove("Host");
request.headers().add("Host", handshakeRequestConfig.chooseHostHeader());
try {
NeptuneNettyHttpSigV4Signer sigV4Signer = new NeptuneNettyHttpSigV4Signer(this.sigV4Properties.getServiceRegion(), new DefaultAWSCredentialsProviderChain());
sigV4Signer.signRequest(request);
if (handshakeRequestConfig.removeHostHeaderAfterSigning()) {
request.headers().remove("Host");
}
return request;
} catch (NeptuneSigV4SignerException var4) {
throw new RuntimeException("Exception occurred while signing the request", var4);
}
}
private SigV4Properties loadProperties() {
return this.sigV4PropertiesProvider.getSigV4Properties();
}
}
| 4,203 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/FileToStreamOutputWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import org.apache.commons.io.input.Tailer;
import org.apache.commons.io.input.TailerListenerAdapter;
import java.io.Writer;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.concurrent.atomic.AtomicInteger;
public class FileToStreamOutputWriter implements OutputWriter {
private final OutputWriter innerOutputWriter;
private final Path filePath;
private final Stream stream;
private final Tailer tailer;
private final ExportListener listener;
FileToStreamOutputWriter(OutputWriter innerOutputWriter, Path filePath, KinesisConfig kinesisConfig) {
this.innerOutputWriter = innerOutputWriter;
this.filePath = filePath;
this.stream = kinesisConfig.stream();
this.listener = new ExportListener(stream);
this.tailer = Tailer.create(filePath.toFile(), listener);
}
@Override
public boolean isNewTarget() {
return false;
}
@Override
public String outputId() {
return String.format("%s [for stream %s]", filePath.toString(), stream.name());
}
@Override
public void startCommit() {
innerOutputWriter.startCommit();
}
@Override
public void endCommit() {
innerOutputWriter.endCommit();
listener.incrementTotalLineCount();
}
@Override
public void print(String s) {
innerOutputWriter.print(s);
}
@Override
public Writer writer() {
return innerOutputWriter.writer();
}
@Override
public void startOp() {
innerOutputWriter.startOp();
}
@Override
public void endOp() {
innerOutputWriter.endOp();
}
@Override
public String lineSeparator() {
return innerOutputWriter.lineSeparator();
}
@Override
public void close() throws Exception {
innerOutputWriter.close();
while (!listener.isFinished()) {
Thread.sleep(1000);
}
tailer.stop();
stream.flushRecords();
Files.deleteIfExists(filePath);
}
private static class ExportListener extends TailerListenerAdapter {
private final Stream stream;
private final AtomicInteger totalLineCount = new AtomicInteger(0);
private int linesProcessed = 0;
private ExportListener(Stream stream) {
this.stream = stream;
}
public void handle(String line) {
stream.publish(line);
linesProcessed++;
}
public void incrementTotalLineCount() {
totalLineCount.incrementAndGet();
}
public boolean isFinished() {
return linesProcessed == totalLineCount.get();
}
}
}
| 4,204 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/FileExtension.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
public interface FileExtension {
String extension();
}
| 4,205 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/Status.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Status {
private static final Logger logger = LoggerFactory.getLogger(Status.class);
private final AtomicLong counter = new AtomicLong();
private final AtomicBoolean allowContinue = new AtomicBoolean(true);
private final StatusOutputFormat outputFormat;
private final String description;
private final Supplier<String> additionalDetailsSupplier;
public Status(StatusOutputFormat outputFormat) {
this(outputFormat, "");
}
public Status(StatusOutputFormat outputFormat, String description) {
this(outputFormat, description, () -> "");
}
public Status(StatusOutputFormat outputFormat, String description, Supplier<String> additionalDetailsSupplier) {
this.outputFormat = outputFormat;
this.description = description;
this.additionalDetailsSupplier = additionalDetailsSupplier;
}
public void update() {
long counterValue = counter.incrementAndGet();
if (counterValue % 10000 == 0 && outputFormat == StatusOutputFormat.Dot) {
System.err.print(".");
} else if (counterValue % 100000 == 0 && outputFormat == StatusOutputFormat.Description) {
logger.info("{} ({}){}", counterValue, description, additionalDetailsSupplier.get());
}
}
public boolean allowContinue() {
return allowContinue.get();
}
public void halt() {
allowContinue.set(false);
}
}
| 4,206 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/RecordSplitter.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.JsonNodeType;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.commons.lang.StringUtils;
import org.eclipse.rdf4j.model.IRI;
import org.eclipse.rdf4j.model.Resource;
import org.eclipse.rdf4j.model.Statement;
import org.eclipse.rdf4j.model.Value;
import org.eclipse.rdf4j.model.impl.SimpleValueFactory;
import org.eclipse.rdf4j.rio.RDFHandler;
import org.eclipse.rdf4j.rio.RDFHandlerException;
import org.eclipse.rdf4j.rio.RDFParser;
import org.eclipse.rdf4j.rio.nquads.NQuadsParserFactory;
import org.eclipse.rdf4j.rio.nquads.NQuadsWriter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.StringReader;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
public class RecordSplitter {
private static final Logger logger = LoggerFactory.getLogger(RecordSplitter.class);
public static Collection<String> splitByLength(String s, int length){
return splitByLength(s ,length, 10);
}
public static Collection<String> splitByLength(String s, int length, int wordBoundaryMargin) {
int startIndex = 0;
Collection<String> results = new ArrayList<>();
while (startIndex < s.length()) {
boolean foundWordBoundary = false;
int endIndex = Math.min(startIndex + length, s.length());
int minCandidateEndIndex = Math.max(startIndex +1, endIndex - wordBoundaryMargin);
for (int actualEndIndex = endIndex; actualEndIndex >= minCandidateEndIndex; actualEndIndex--){
if (!StringUtils.isAlphanumeric( s.substring(actualEndIndex - 1, actualEndIndex))){
String result = s.substring(startIndex, actualEndIndex);
String trimmedResult = result.trim();
if (StringUtils.isNotEmpty(trimmedResult)){
results.add(trimmedResult);
}
startIndex = actualEndIndex;
foundWordBoundary = true;
break;
}
}
if (!foundWordBoundary){
String result = s.substring(startIndex, endIndex);
String trimmedResult = result.trim();
if (StringUtils.isNotEmpty(trimmedResult)){
results.add(trimmedResult);
}
startIndex = endIndex;
}
}
return results;
}
private static int calculateStringMaxLength(int maxLength, int recordLength, int valueLength) {
return maxLength - (recordLength - valueLength) - 2;
}
private final int maxSize;
private final LargeStreamRecordHandlingStrategy largeStreamRecordHandlingStrategy;
private final ObjectMapper mapper = new ObjectMapper();
private final RDFParser parser = new NQuadsParserFactory().getParser();
private final StatementHandler handler = new StatementHandler();
public RecordSplitter(int maxSize, LargeStreamRecordHandlingStrategy largeStreamRecordHandlingStrategy) {
this.maxSize = maxSize;
this.largeStreamRecordHandlingStrategy = largeStreamRecordHandlingStrategy;
this.parser.setRDFHandler(handler);
}
public Collection<String> split(String s) {
Collection<String> results = new ArrayList<>();
int opNum = 1;
try {
JsonNode json = mapper.readTree(s);
for (JsonNode jsonNode : json) {
if (isNeptuneStreamEvent(jsonNode)) {
Collection<String> events = splitNeptuneStreamEvent(jsonNode, opNum);
results.addAll(events);
opNum += events.size();
} else {
JsonNodeType nodeType = jsonNode.getNodeType();
if (nodeType == JsonNodeType.NUMBER) {
results.addAll(splitNumber(jsonNode));
} else if (nodeType == JsonNodeType.STRING) {
results.addAll(splitString(jsonNode));
} else {
// This may end up being dropped
results.add(format(jsonNode.toString()));
}
}
}
} catch (JsonProcessingException e) {
// This will almost certainly be dropped
results.add(s);
}
return results;
}
private Collection<String> splitNeptuneStreamEvent(JsonNode jsonNode, int opNum) {
Collection<String> results = new ArrayList<>();
((ObjectNode) jsonNode.get("eventId")).replace("opNum", mapper.valueToTree(opNum));
String jsonString = jsonNode.toString();
int eventJsonLength = jsonString.length();
if (eventJsonLength > maxSize && largeStreamRecordHandlingStrategy.allowShred()) {
if (isProperytGraphEvent(jsonNode)) {
String value = jsonNode.get("data").get("value").get("value").textValue();
int maxStringLength = calculateStringMaxLength(maxSize, eventJsonLength, value.length());
Collection<String> splitValues = splitByLength(value, maxStringLength);
for (String splitValue : splitValues) {
((ObjectNode) jsonNode.get("eventId")).replace("opNum", mapper.valueToTree(opNum));
((ObjectNode) jsonNode.get("data").get("value")).replace("value", mapper.valueToTree(splitValue));
results.add(format(jsonNode.toString()));
opNum += 1;
}
} else {
String statement = jsonNode.get("data").get("stmt").textValue();
int statementLength = statement.length();
int maxStatementLength = calculateStringMaxLength(maxSize, eventJsonLength, statementLength);
handler.reset(statementLength, maxStatementLength);
try {
parser.parse(new StringReader(statement));
for (String splitStatement : handler.statements()) {
((ObjectNode) jsonNode.get("eventId")).replace("opNum", mapper.valueToTree(opNum));
((ObjectNode) jsonNode.get("data")).replace("stmt", mapper.valueToTree(splitStatement));
results.add(format(jsonNode.toString()));
opNum += 1;
}
} catch (IOException e) {
// What to do here?
results.add(format(jsonString));
}
}
} else {
results.add(format(jsonString));
}
return results;
}
private boolean isProperytGraphEvent(JsonNode jsonNode) {
return jsonNode.get("data").has("value");
}
private Collection<String> splitString(JsonNode jsonNode) {
Collection<String> results = new ArrayList<>();
String jsonString = jsonNode.textValue();
if (jsonString.length() > maxSize) {
Collection<String> splitValues = splitByLength(jsonString, maxSize);
for (String splitValue : splitValues) {
results.add(format(splitValue, true));
}
} else {
results.add(format(jsonString, true));
}
return results;
}
private Collection<String> splitNumber(JsonNode jsonNode) {
return Collections.singletonList(format(jsonNode.asText()));
}
private boolean isNeptuneStreamEvent(JsonNode jsonNode) {
return jsonNode.has("eventId");
}
private String format(String s) {
return format(s, false);
}
private String format(String s, boolean addQuotes) {
if (addQuotes) {
return String.format("[\"%s\"]", s);
} else {
return String.format("[%s]", s);
}
}
private static class StatementHandler implements RDFHandler {
private final Collection<String> results = new ArrayList<>();
private int statementLength;
private int maxStatementLength;
@Override
public void startRDF() throws RDFHandlerException {
}
@Override
public void endRDF() throws RDFHandlerException {
}
@Override
public void handleNamespace(String s, String s1) throws RDFHandlerException {
}
@Override
public void handleStatement(Statement statement) throws RDFHandlerException {
Value object = statement.getObject();
if (object.isLiteral()) {
String objectValue = object.stringValue();
int maxObjectLength = calculateStringMaxLength(maxStatementLength, statementLength, objectValue.length());
Collection<String> splitValues = splitByLength(objectValue, maxObjectLength);
for (String splitValue : splitValues) {
StringWriter writer = new StringWriter();
new NQuadsWriter(writer).consumeStatement(new Statement() {
@Override
public Resource getSubject() {
return statement.getSubject();
}
@Override
public IRI getPredicate() {
return statement.getPredicate();
}
@Override
public Value getObject() {
return SimpleValueFactory.getInstance().createLiteral(splitValue);
}
@Override
public Resource getContext() {
return statement.getContext();
}
});
results.add(writer.toString());
}
} else {
results.add(String.format("%s\n", statement.toString()));
}
}
@Override
public void handleComment(String s) throws RDFHandlerException {
}
public void reset(int statementLength, int maxStatementLength) {
this.statementLength = statementLength;
this.maxStatementLength = maxStatementLength;
results.clear();
}
public Collection<String> statements() {
return results;
}
}
}
| 4,207 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/CommandWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
public interface CommandWriter {
void writeReturnValue(String value);
void writeMessage(String value);
}
| 4,208 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/DirectoryStructure.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
public enum DirectoryStructure {
Config {
@Override
public void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException {
Files.createDirectories(directory);
}
},
PropertyGraph {
@Override
public void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException {
Files.createDirectories(nodesDirectory);
Files.createDirectories(edgesDirectory);
}
},
Rdf {
@Override
public void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException {
Files.createDirectories(statementsDirectory);
}
},
GremlinQueries {
@Override
public void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException {
Files.createDirectories(resultsDirectory);
}
},
SparqlQueries {
@Override
public void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException {
Files.createDirectories(resultsDirectory);
}
},
SimpleStreamsOutput {
@Override
public void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException {
Files.createDirectories(recordsDirectory);
}
};
public abstract void createDirectories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory) throws IOException;
}
| 4,209 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/Stream.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import com.amazonaws.services.kinesis.producer.Attempt;
import com.amazonaws.services.kinesis.producer.KinesisProducer;
import com.amazonaws.services.kinesis.producer.UserRecordFailedException;
import com.amazonaws.services.kinesis.producer.UserRecordResult;
import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
public class Stream {
private final KinesisProducer kinesisProducer;
private final String streamName;
private final StreamThrottle streamThrottle;
private final LargeStreamRecordHandlingStrategy largeStreamRecordHandlingStrategy;
private final RecordSplitter splitter;
private final AtomicLong counter = new AtomicLong();
private static final Logger logger = LoggerFactory.getLogger(Stream.class);
private static final int MAX_SIZE_BYTES = 1000000;
public Stream(KinesisProducer kinesisProducer,
String streamName,
LargeStreamRecordHandlingStrategy largeStreamRecordHandlingStrategy) {
this.kinesisProducer = kinesisProducer;
this.streamName = streamName;
this.streamThrottle = new StreamThrottle(kinesisProducer);
this.largeStreamRecordHandlingStrategy = largeStreamRecordHandlingStrategy;
this.splitter = new RecordSplitter(MAX_SIZE_BYTES, largeStreamRecordHandlingStrategy);
}
public synchronized void publish(String s) {
if (StringUtils.isNotEmpty(s) && s.length() > 2) {
try {
long partitionKeyValue = counter.incrementAndGet();
byte[] bytes = s.getBytes(StandardCharsets.UTF_8.name());
if (bytes.length > MAX_SIZE_BYTES && largeStreamRecordHandlingStrategy.allowSplit()) {
Collection<String> splitRecords = splitter.split(s);
for (String splitRecord : splitRecords) {
publish(partitionKeyValue, splitRecord.getBytes(StandardCharsets.UTF_8.name()));
}
} else {
publish(partitionKeyValue, bytes);
}
} catch (UnsupportedEncodingException e) {
logger.error(e.getMessage());
}
}
}
private void publish(long partitionKeyValue, byte[] bytes) {
if (bytes.length > MAX_SIZE_BYTES) {
logger.warn("Dropping record because it is larger than 1 MB: [{}] '{}...'", bytes.length, new String(Arrays.copyOfRange(bytes, 0, 256)));
return;
}
try {
ByteBuffer data = ByteBuffer.wrap(bytes);
streamThrottle.recalculateMaxBufferSize(partitionKeyValue, bytes.length);
streamThrottle.throttle();
ListenableFuture<UserRecordResult> future = kinesisProducer.addUserRecord(streamName, String.valueOf(partitionKeyValue), data);
Futures.addCallback(future, CALLBACK, MoreExecutors.directExecutor());
} catch (InterruptedException e) {
logger.error(e.getMessage());
Thread.currentThread().interrupt();
}
}
public String name() {
return streamName;
}
public void flushRecords() {
kinesisProducer.flushSync();
}
private static final FutureCallback<UserRecordResult> CALLBACK = new FutureCallback<UserRecordResult>() {
@Override
public void onSuccess(UserRecordResult userRecordResult) {
if (!userRecordResult.isSuccessful()) {
logger.error("Unsuccessful attempt to write to stream: " + formatAttempts(userRecordResult.getAttempts()));
}
}
@Override
public void onFailure(Throwable throwable) {
if (UserRecordFailedException.class.isAssignableFrom(throwable.getClass())) {
UserRecordFailedException e = (UserRecordFailedException) throwable;
logger.error("Error writing to stream: " + formatAttempts(e.getResult().getAttempts()));
}
logger.error("Error writing to stream.", throwable);
}
};
private static String formatAttempts(List<Attempt> attempts) {
StringBuilder builder = new StringBuilder();
for (Attempt attempt : attempts) {
builder.append("[");
builder.append(attempt.getErrorCode()).append(":").append(attempt.getErrorMessage());
builder.append("(").append(attempt.getDelay()).append(",").append(attempt.getDuration()).append(")");
builder.append("]");
}
return builder.toString();
}
} | 4,210 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/StdOutPrintOutputWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.*;
public class StdOutPrintOutputWriter extends PrintOutputWriter {
private static final String StdOut = "StdOut";
public StdOutPrintOutputWriter() {
this(System.out, true);
}
private StdOutPrintOutputWriter(Writer out) {
super(StdOut, out);
}
private StdOutPrintOutputWriter(Writer out, boolean autoFlush) {
super(StdOut, out, autoFlush);
}
private StdOutPrintOutputWriter(OutputStream out) {
super(StdOut, out);
}
private StdOutPrintOutputWriter(OutputStream out, boolean autoFlush) {
super(StdOut, out, autoFlush);
}
private StdOutPrintOutputWriter(String fileName) throws FileNotFoundException {
super(fileName);
}
private StdOutPrintOutputWriter(String fileName, String csn) throws FileNotFoundException, UnsupportedEncodingException {
super(fileName, csn);
}
private StdOutPrintOutputWriter(File file) throws FileNotFoundException {
super(file);
}
private StdOutPrintOutputWriter(File file, String csn) throws FileNotFoundException, UnsupportedEncodingException {
super(file, csn);
}
@Override
public void endCommit() {
flush();
}
@Override
public void close() {
flush();
}
}
| 4,211 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/OutputWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.Writer;
public interface OutputWriter extends AutoCloseable {
boolean isNewTarget();
String outputId();
void startCommit();
void endCommit();
void print(String s);
Writer writer();
void startOp();
void endOp();
String lineSeparator();
void close() throws Exception;
}
| 4,212 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/LargeStreamRecordHandlingStrategy.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
public enum LargeStreamRecordHandlingStrategy {
dropAll,
splitAndDrop {
@Override
public boolean allowSplit() {
return true;
}
},
splitAndShred{
@Override
public boolean allowSplit() {
return true;
}
@Override
public boolean allowShred() {
return true;
}
};
public boolean allowSplit() {
return false;
}
public boolean allowShred() {
return false;
}
}
| 4,213 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/StreamThrottle.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import com.amazonaws.services.kinesis.producer.KinesisProducer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicLong;
public class StreamThrottle {
private static final Logger logger = LoggerFactory.getLogger(StreamThrottle.class);
private final KinesisProducer kinesisProducer;
private final AtomicLong windowSizeBytes = new AtomicLong();
private volatile long queueHighWatermark = 10000;
private volatile int tumblingWindowSize = 10;
private static final long MAX_QUEUE_HIGH_WATERMARK = 10000;
private static final long QUEUE_SIZE_BYTES = 10000000;
private static final int LENGTH_HIGH_WATERMARK = 900000;
public StreamThrottle(KinesisProducer kinesisProducer) {
this.kinesisProducer = kinesisProducer;
}
public void recalculateMaxBufferSize(long counter, long length) {
long currentWindowSizeBytes = windowSizeBytes.addAndGet(length);
if (length > LENGTH_HIGH_WATERMARK || counter % tumblingWindowSize == 0) {
queueHighWatermark = Math.min(QUEUE_SIZE_BYTES / (currentWindowSizeBytes / tumblingWindowSize), MAX_QUEUE_HIGH_WATERMARK);
logger.trace("Current window has {} records totalling {} bytes, meaning that maxNumberOfQueuedRecords cannot exceed {}", tumblingWindowSize, currentWindowSizeBytes, queueHighWatermark);
windowSizeBytes.set(0);
}
}
public void throttle() throws InterruptedException {
if (kinesisProducer.getOutstandingRecordsCount() > (queueHighWatermark)) {
long start = System.currentTimeMillis();
while (kinesisProducer.getOutstandingRecordsCount() > (queueHighWatermark)) {
Thread.sleep(1);
}
long end = System.currentTimeMillis();
logger.debug("Paused adding records to stream for {} millis while number of queued records exceeded maxNumberOfQueuedRecords of {}", end - start, queueHighWatermark);
}
}
}
| 4,214 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/Directories.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import com.amazonaws.services.neptune.cluster.EventId;
import com.amazonaws.services.neptune.propertygraph.ExportStats;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.NamedQueriesCollection;
import com.amazonaws.services.neptune.propertygraph.io.JsonResource;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.lang.StringUtils;
import java.io.File;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
public class Directories {
public static String fileName(String name, AtomicInteger index) throws UnsupportedEncodingException {
String filename = String.format("%s-%s", name, index.incrementAndGet());
return URLEncoder.encode(filename, StandardCharsets.UTF_8.toString());
}
public static String fileName(String filename) throws UnsupportedEncodingException {
return URLEncoder.encode(filename, StandardCharsets.UTF_8.toString());
}
private static final String CONFIG_FILE = "config.json";
private static final String STATS_FILE = "stats.json";
private static final String LAST_EVENT_ID_FILE = "lastEventId.json";
private static final String QUERIES_FILE = "queries.json";
public static Directories createFor(DirectoryStructure directoryStructure,
File root,
String exportId,
String tag,
String partitionDirectories) throws IOException {
if (root == null) {
throw new IllegalArgumentException("You must supply a directory");
}
Path rootDirectory = root.toPath();
Path directory;
if (StringUtils.isNotEmpty(partitionDirectories)){
directory = rootDirectory;
} else {
String directoryName = tag.isEmpty() ?
exportId :
String.format("%s-%s", tag, exportId);
directory = rootDirectory.resolve(directoryName);
}
Path nodesDirectory = createElementDirectory("nodes", directory, partitionDirectories);
Path edgesDirectory = createElementDirectory("edges", directory, partitionDirectories);
Path statementsDirectory = createElementDirectory("statements", directory, partitionDirectories);
Path resultsDirectory = createElementDirectory("results", directory, partitionDirectories);
Path recordsDirectory = createElementDirectory("records", directory, partitionDirectories);
directoryStructure.createDirectories(
directory,
nodesDirectory,
edgesDirectory,
statementsDirectory,
resultsDirectory,
recordsDirectory);
return new Directories(
directory,
pathOrNull(nodesDirectory),
pathOrNull(edgesDirectory),
pathOrNull(statementsDirectory),
resultsDirectory,
pathOrNull(recordsDirectory),
tag);
}
private static Path pathOrNull(Path path){
if (path.toFile().exists()){
return path;
} else {
return null;
}
}
private static Path createElementDirectory(String name, Path directory, String partitionDirectories){
Path elementDirectory = directory.resolve(name);
if (StringUtils.isNotEmpty(partitionDirectories)){
String[] partitions = partitionDirectories.split("/");
for (String partition : partitions) {
if (StringUtils.isNotEmpty(partition)){
elementDirectory = elementDirectory.resolve(partition);
}
}
}
return elementDirectory;
}
private final String tag;
private final Path directory;
private final Path nodesDirectory;
private final Path edgesDirectory;
private final Path statementsDirectory;
private final Path resultsDirectory;
private final Path recordsDirectory;
private final File directoryFile;
private Directories(Path directory,
Path nodesDirectory,
Path edgesDirectory,
Path statementsDirectory,
Path resultsDirectory,
Path recordsDirectory,
String tag) {
this.directory = directory;
this.nodesDirectory = nodesDirectory;
this.edgesDirectory = edgesDirectory;
this.statementsDirectory = statementsDirectory;
this.resultsDirectory = resultsDirectory;
this.recordsDirectory = recordsDirectory;
this.tag = tag;
this.directoryFile = directory.toFile();
}
public void writeRootDirectoryPathAsMessage(String fileType, CommandWriter writer){
writer.writeMessage(fileType + " files : " + directory.toAbsolutePath().toString());
}
public Path writeRootDirectoryPathAsReturnValue(CommandWriter writer){
Path path = directory.toAbsolutePath();
writer.writeReturnValue(path.toString());
return path;
}
public long freeSpaceInGigabytes(){
return directoryFile.getFreeSpace() / 1000000000;
}
public Path rootDirectory() {
return directory.toAbsolutePath();
}
public Collection<Path> subdirectories(){
List<Path> paths = new ArrayList<>();
addIfNotNull(nodesDirectory, paths);
addIfNotNull(edgesDirectory, paths);
addIfNotNull(statementsDirectory, paths);
addIfNotNull(resultsDirectory, paths);
addIfNotNull(recordsDirectory, paths);
return paths;
}
private void addIfNotNull(Path path, List<Path> paths){
if (path != null){
paths.add(path.toAbsolutePath());
}
}
public Path writeConfigFilePathAsReturnValue(CommandWriter writer){
Path path = configFilePath().toAbsolutePath();
writer.writeReturnValue(path.toString());
return path;
}
public void writeResultsDirectoryPathAsMessage(String fileType, CommandWriter writer){
writer.writeMessage(fileType + " files : " + resultsDirectory.toAbsolutePath().toString());
}
public Path createNodesFilePath(String name, FileExtension extension, Label label, boolean perLabelDirectories) {
if (nodesDirectory == null && recordsDirectory != null){
return createFilePath(recordsDirectory, String.format("nodes-%s", name), extension);
} else if (perLabelDirectories){
File labelDirectory = new File(nodesDirectory.toFile(), label.labelsAsString());
if (!labelDirectory.exists()){
synchronized(this){
if (!labelDirectory.exists()){
try {
Files.createDirectories(labelDirectory.toPath());
} catch (IOException e) {
throw new RuntimeException(String.format("Unable to create nodes directory for %s", label.labelsAsString()));
}
}
}
}
return createFilePath(labelDirectory.toPath(), name, extension);
} else {
return createFilePath(nodesDirectory, name, extension);
}
}
public Path createEdgesFilePath(String name, FileExtension extension, Label label, boolean perLabelDirectories){
if (edgesDirectory == null && recordsDirectory != null){
return createFilePath(recordsDirectory, String.format("edges-%s", name), extension);
}
if (perLabelDirectories){
File labelDirectory = new File(edgesDirectory.toFile(), label.labelsAsString());
if (!labelDirectory.exists()){
synchronized(this){
if (!labelDirectory.exists()){
try {
Files.createDirectories(labelDirectory.toPath());
} catch (IOException e) {
throw new RuntimeException(String.format("Unable to create edges directory for %s", label.labelsAsString()));
}
}
}
}
return createFilePath(labelDirectory.toPath(), name, extension);
} else {
return createFilePath(edgesDirectory, name, extension);
}
}
public Path createStatementsFilePath(String name, FileExtension extension){
if (statementsDirectory == null && recordsDirectory != null){
return createFilePath(recordsDirectory, name, extension);
} else {
return createFilePath(statementsDirectory, name, extension);
}
}
public Path createQueryResultsFilePath(String directoryName, String fileName, FileExtension extension){
Path directory = resultsDirectory.resolve(directoryName);
return createFilePath(directory, fileName, extension);
}
public void createResultsSubdirectories(Collection<String> subdirectoryNames) throws IOException {
for (String subdirectoryName : subdirectoryNames) {
Files.createDirectories(resultsDirectory.resolve(subdirectoryName));
}
}
public JsonResource<GraphSchema, Boolean> configFileResource() {
return new JsonResource<>("Config file",
configFilePath().toUri(),
GraphSchema.class);
}
public JsonResource<ExportStats, GraphSchema> statsFileResource() {
return new JsonResource<>("Stats file",
statsFilePath().toUri(),
ExportStats.class);
}
public JsonResource<EventId, Object> lastEventIdFileResource() {
return new JsonResource<>("LastEventId file",
lastEventIdFilePath().toUri(),
EventId.class);
}
public JsonResource<NamedQueriesCollection, Object> queriesResource() {
return new JsonResource<>("Queries file",
queriesFilePath().toUri(),
NamedQueriesCollection.class);
}
public Path createFilePath(Path directory, String name, FileExtension extension) {
String filenameWithoutExtension = tag.isEmpty() ?
name :
String.format("%s-%s", tag, name);
String filename = filenameWithoutExtension.getBytes().length > 250 ?
String.format("%s.%s", DigestUtils.sha1Hex(filenameWithoutExtension), extension.extension()) :
String.format("%s.%s", filenameWithoutExtension, extension.extension());
return directory.resolve(filename);
}
private Path configFilePath() {
return directory.resolve(CONFIG_FILE).toAbsolutePath();
}
private Path statsFilePath() {
return directory.resolve(STATS_FILE).toAbsolutePath();
}
private Path lastEventIdFilePath() {
return directory.resolve(LAST_EVENT_ID_FILE).toAbsolutePath();
}
private Path queriesFilePath() {
return directory.resolve(QUERIES_FILE).toAbsolutePath();
}
public Path debugFilePath(String name) {
return directory.resolve(name + ".txt").toAbsolutePath();
}
}
| 4,215 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/KinesisConfig.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import com.amazonaws.services.kinesis.producer.*;
import com.amazonaws.services.neptune.cli.AbstractTargetModule;
import org.apache.commons.lang.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class KinesisConfig {
private final Stream stream;
private static final Logger logger = LoggerFactory.getLogger(KinesisConfig.class);
@Deprecated
public KinesisConfig(String streamName, String region, LargeStreamRecordHandlingStrategy largeStreamRecordHandlingStrategy, boolean enableAggregation) {
this(new AbstractTargetModule() {
@Override
protected DirectoryStructure directoryStructure() {
return null;
}
@Override
public String getStreamName() {
return streamName;
}
@Override
public String getRegion() {
return region;
}
@Override
public LargeStreamRecordHandlingStrategy getLargeStreamRecordHandlingStrategy() {
return largeStreamRecordHandlingStrategy;
}
@Override
public boolean isEnableAggregation() {
return enableAggregation;
}
});
}
public KinesisConfig(AbstractTargetModule targetModule) {
if (StringUtils.isNotEmpty(targetModule.getRegion()) && StringUtils.isNotEmpty(targetModule.getStreamName())) {
logger.trace("Constructing new KinesisConfig for stream name: {}, in region: {}, with LargeStreamRecordHandlingStrategy: {} and AggregationEnabled={}",
targetModule.getStreamName(), targetModule.getRegion(), targetModule.getLargeStreamRecordHandlingStrategy(), targetModule.isEnableAggregation());
this.stream = new Stream(
new KinesisProducer(new KinesisProducerConfiguration()
.setAggregationEnabled(targetModule.isEnableAggregation())
.setRegion(targetModule.getRegion())
.setRateLimit(100)
.setConnectTimeout(12000)
.setRequestTimeout(12000)
.setRecordTtl(Integer.MAX_VALUE)
.setCredentialsProvider(targetModule.getCredentialsProvider())
),
targetModule.getStreamName(),
targetModule.getLargeStreamRecordHandlingStrategy());
}
else {
this.stream = null;
}
}
public Stream stream() {
if (stream == null) {
throw new IllegalArgumentException("You must supply an AWS Region and Amazon Kinesis Data Stream name");
}
return stream;
}
}
| 4,216 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/Target.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.file.Path;
import java.util.function.Supplier;
public enum Target implements CommandWriter {
files {
@Override
public OutputWriter createOutputWriter(Supplier<Path> pathSupplier, KinesisConfig kinesisConfig) throws IOException {
File file = pathSupplier.get().toFile();
boolean isNewTarget = !(file.exists());
return new PrintOutputWriter(file.getAbsolutePath(), isNewTarget, new BufferedWriter(new FileWriter(file)));
}
@Override
public boolean isFileBased() {
return true;
}
@Override
public void writeReturnValue(String value) {
System.out.println(value);
}
},
stdout {
@Override
public OutputWriter createOutputWriter(Supplier<Path> pathSupplier, KinesisConfig kinesisConfig) throws IOException {
return new StdOutPrintOutputWriter();
}
@Override
public boolean isFileBased() {
return false;
}
@Override
public void writeReturnValue(String value) {
System.err.println(value);
}
},
devnull {
@Override
public OutputWriter createOutputWriter(Supplier<Path> pathSupplier, KinesisConfig kinesisConfig) throws IOException {
return new NoOpOutputWriter();
}
@Override
public boolean isFileBased() {
return false;
}
@Override
public void writeReturnValue(String value) {
System.err.println(value);
}
},
stream {
@Override
public OutputWriter createOutputWriter(Supplier<Path> pathSupplier, KinesisConfig kinesisConfig) throws IOException {
Path filePath = pathSupplier.get();
File file = filePath.toFile();
return new FileToStreamOutputWriter(
new KinesisStreamPrintOutputWriter(file.getAbsolutePath(), new FileWriter(file)),
filePath,
kinesisConfig);
}
@Override
public boolean isFileBased() {
return false;
}
@Override
public void writeReturnValue(String value) {
System.out.println(value);
}
};
@Override
public void writeMessage(String value) {
System.err.println(value);
}
public abstract OutputWriter createOutputWriter(Supplier<Path> pathSupplier, KinesisConfig kinesisConfig) throws IOException;
public abstract boolean isFileBased();
@Override
public abstract void writeReturnValue(String value);
}
| 4,217 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/NoOpOutputWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.*;
public class NoOpOutputWriter extends PrintOutputWriter {
private static final String NoOp = "NoOp";
public NoOpOutputWriter() {
this(new NoOpOutputStream(), false);
}
private NoOpOutputWriter(Writer out) {
super(NoOp, out);
}
private NoOpOutputWriter(Writer out, boolean autoFlush) {
super(NoOp, out, autoFlush);
}
private NoOpOutputWriter(OutputStream out) {
super(NoOp, out);
}
private NoOpOutputWriter(OutputStream out, boolean autoFlush) {
super(NoOp, out, autoFlush);
}
private NoOpOutputWriter(String fileName) throws FileNotFoundException {
super(fileName);
}
private NoOpOutputWriter(String fileName, String csn) throws FileNotFoundException, UnsupportedEncodingException {
super(fileName, csn);
}
private NoOpOutputWriter(File file) throws FileNotFoundException {
super(file);
}
private NoOpOutputWriter(File file, String csn) throws FileNotFoundException, UnsupportedEncodingException {
super(file, csn);
}
@Override
public void endCommit() {
flush();
}
@Override
public void close() {
flush();
}
private static class NoOpOutputStream extends OutputStream{
@Override
public void write(int b) throws IOException {
// Do nothing
}
@Override
public void write(byte[] b) throws IOException {
// Do nothing
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
// Do nothing
}
}
}
| 4,218 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/PrintOutputWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.*;
public class PrintOutputWriter extends PrintWriter implements OutputWriter {
private final String outputId;
private final boolean isNewTarget;
public PrintOutputWriter(String outputId, Writer out) {
this(outputId, true, out);
}
public PrintOutputWriter(String outputId, boolean isNewTarget, Writer out) {
super(out);
this.outputId = outputId;
this.isNewTarget = isNewTarget;
}
PrintOutputWriter(String outputId, Writer out, boolean autoFlush) {
super(out, autoFlush);
this.outputId = outputId;
this.isNewTarget = false;
}
PrintOutputWriter(String outputId, OutputStream out) {
super(out);
this.outputId = outputId;
this.isNewTarget = false;
}
PrintOutputWriter(String outputId, OutputStream out, boolean autoFlush) {
super(out, autoFlush);
this.outputId = outputId;
this.isNewTarget = false;
}
PrintOutputWriter(String fileName) throws FileNotFoundException {
super(fileName);
this.outputId = fileName;
this.isNewTarget = false;
}
PrintOutputWriter(String fileName, String csn) throws FileNotFoundException, UnsupportedEncodingException {
super(fileName, csn);
this.outputId = fileName;
this.isNewTarget = false;
}
PrintOutputWriter(File file) throws FileNotFoundException {
super(file);
this.outputId = file.getAbsolutePath();
this.isNewTarget = false;
}
PrintOutputWriter(File file, String csn) throws FileNotFoundException, UnsupportedEncodingException {
super(file, csn);
this.outputId = file.getAbsolutePath();
this.isNewTarget = false;
}
@Override
public boolean isNewTarget() {
return isNewTarget;
}
@Override
public String outputId() {
return outputId;
}
@Override
public void startCommit() {
// Do nothing
}
@Override
public void endCommit() {
flush();
}
@Override
public Writer writer() {
return this;
}
@Override
public void startOp() {
// Do nothing
}
@Override
public void endOp() {
// Do nothing
}
@Override
public String lineSeparator() {
return System.lineSeparator();
}
@Override
public void close() {
super.flush();
super.close();
}
}
| 4,219 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/StatusOutputFormat.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
public enum StatusOutputFormat {
None,
Dot,
Description
}
| 4,220 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/io/KinesisStreamPrintOutputWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.io;
import java.io.Writer;
public class KinesisStreamPrintOutputWriter extends PrintOutputWriter {
private int opCount;
private static final String LINE_SEPARATOR = "";
KinesisStreamPrintOutputWriter(String outputId, Writer out) {
super(outputId, out);
}
@Override
public void startCommit() {
opCount = 0;
write("[");
}
@Override
public void endCommit() {
write("]");
write(System.lineSeparator());
}
@Override
public Writer writer() {
return this;
}
@Override
public String lineSeparator(){
return LINE_SEPARATOR;
}
@Override
public void startOp() {
if (opCount > 0) {
write(",");
}
opCount++;
}
@Override
public void endOp(){
}
}
| 4,221 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PrinterOptionsModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.propertygraph.TokenPrefix;
import com.amazonaws.services.neptune.propertygraph.io.CsvPrinterOptions;
import com.amazonaws.services.neptune.propertygraph.io.JsonPrinterOptions;
import com.amazonaws.services.neptune.propertygraph.io.PrinterOptions;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
public class PrinterOptionsModule {
@Option(name = {"--exclude-type-definitions"}, description = "Exclude type definitions from CSV column headers (optional, default 'false').")
@Once
private boolean excludeTypeDefinitions = false;
@Option(name = {"--escape-csv-headers"}, description = "Escape characters in CSV column headers (optional, default 'false').")
@Once
private boolean escapeCsvHeaders = false;
@Option(name = {"--strict-cardinality"}, description = "Format all set and list cardinality properties as arrays in JSON, including properties with a single value (optional, default 'false').")
@Once
private boolean strictCardinality = false;
@Option(name = {"--escape-newline"}, description = "Escape newline characters in CSV files (optional, default 'false').")
@Once
private boolean escapeNewline = false;
@Option(name = {"--multi-value-separator"}, description = "Separator for multi-value properties in CSV output (optional, default ';').")
@Once
private String multiValueSeparator = ";";
@Option(name = {"--token-prefix"}, description = "Token prefix (optional, default '~').")
@Once
private String tokenPrefix = "~";
public PrinterOptions config(){
CsvPrinterOptions csvPrinterOptions = CsvPrinterOptions.builder()
.setMultiValueSeparator(multiValueSeparator)
.setIncludeTypeDefinitions(!excludeTypeDefinitions)
.setEscapeCsvHeaders(escapeCsvHeaders)
.setEscapeNewline(escapeNewline)
.setTokenPrefix(new TokenPrefix(tokenPrefix))
.build();
JsonPrinterOptions jsonPrinterOptions = JsonPrinterOptions.builder()
.setStrictCardinality(strictCardinality)
.setTokenPrefix(new TokenPrefix(tokenPrefix))
.build();
return new PrinterOptions(csvPrinterOptions, jsonPrinterOptions);
}
}
| 4,222 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PropertyGraphRangeModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.propertygraph.RangeConfig;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
public class PropertyGraphRangeModule {
@Option(name = {"-r", "--range", "--range-size"}, description = "Number of items to fetch per request (optional).")
@Once
private long rangeSize = -1;
@Option(name = {"--limit"}, description = "Maximum number of items to export (optional).")
@Once
private long limit = Long.MAX_VALUE;
@Option(name = {"--skip"}, description = "Number of items to skip (optional).")
@Once
private long skip = 0;
@Option(name = {"--approx-node-count"}, description = "Approximate number of nodes in the graph.")
@Once
private long approxNodeCount = -1;
@Option(name = {"--approx-edge-count"}, description = "Approximate number of edges in the graph.")
@Once
private long approxEdgeCount = -1;
public RangeConfig config(){
return new RangeConfig(rangeSize, skip, limit, approxNodeCount, approxEdgeCount);
}
}
| 4,223 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PropertyGraphSchemaInferencingModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.propertygraph.SchemaSamplingSpecification;
import com.amazonaws.services.neptune.propertygraph.schema.ExportSpecification;
import com.amazonaws.services.neptune.propertygraph.schema.CreateGraphSchemaCommand;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import java.util.Collection;
public class PropertyGraphSchemaInferencingModule {
@Option(name = {"--sample"}, description = "Select only a subset of nodes and edges when generating schema.")
@Once
private boolean sample = false;
@Option(name = {"--sample-size"}, description = "Schema sample size (optional, default 1000).")
@Once
private long sampleSize = 1000;
public PropertyGraphSchemaInferencingModule(){
}
public boolean isFullScan(){
return !sample;
}
public CreateGraphSchemaCommand createSchemaCommand(Collection<ExportSpecification> exportSpecifications,
GraphTraversalSource g){
return new SchemaSamplingSpecification(sample, sampleSize).createSchemaCommand(exportSpecifications, g);
}
}
| 4,224 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PropertyGraphConcurrencyModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.cluster.ConcurrencyConfig;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
public class PropertyGraphConcurrencyModule {
@Option(name = {"-cn", "--concurrency"}, description = "Concurrency – the number of parallel queries used to run the export (optional, default 4).")
@Once
private int concurrency = 4;
public ConcurrencyConfig config(){
return config(true);
}
public ConcurrencyConfig config(boolean allowConcurrentOperations){
return new ConcurrencyConfig(allowConcurrentOperations ? concurrency : 1);
}
}
| 4,225 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/FeatureToggleModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedEnumValues;
import java.util.Collection;
import java.util.HashSet;
public class FeatureToggleModule {
@Option(name = {"--feature-toggle"}, description = "Name of a feature to enable.", hidden = true)
@AllowedEnumValues(FeatureToggle.class)
private Collection<FeatureToggle> featureToggles = new HashSet<>();
public FeatureToggles featureToggles() {
return new FeatureToggles(featureToggles);
}
}
| 4,226 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PropertyGraphSerializationModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.propertygraph.NeptuneGremlinClient;
import com.amazonaws.services.neptune.propertygraph.io.SerializationConfig;
import com.amazonaws.services.neptune.propertygraph.schema.TokensOnly;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedEnumValues;
import com.github.rvesse.airline.annotations.restrictions.AllowedValues;
import com.github.rvesse.airline.annotations.restrictions.Once;
import org.apache.tinkerpop.gremlin.driver.ser.Serializers;
public class PropertyGraphSerializationModule {
@Option(name = {"--serializer"}, description = "Message serializer – (optional, default 'GRAPHBINARY_V1D0').")
@AllowedEnumValues(Serializers.class)
@Once
private String serializer = Serializers.GRAPHBINARY_V1D0.name();
@Option(name = {"--janus"}, description = "Use JanusGraph serializer.")
@Once
private boolean useJanusSerializer = false;
@Option(name = {"--max-content-length"}, description = "Max content length (optional, default 50000000).")
@Once
private int maxContentLength = 50000000;
@Option(name = {"-b", "--batch-size"}, description = "Batch size (optional, default 64). Reduce this number if your queries trigger CorruptedFrameExceptions.")
@Once
private int batchSize = NeptuneGremlinClient.DEFAULT_BATCH_SIZE;
public SerializationConfig config(){
return new SerializationConfig(serializer, maxContentLength, batchSize, useJanusSerializer);
}
}
| 4,227 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/CommonConnectionModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.AmazonNeptune;
import com.amazonaws.services.neptune.cluster.ConnectionConfig;
import com.amazonaws.services.neptune.cluster.NeptuneClusterMetadata;
import com.amazonaws.services.neptune.cluster.ProxyConfig;
import com.amazonaws.services.neptune.export.EndpointValidator;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.*;
import org.apache.commons.lang.StringUtils;
import javax.inject.Inject;
import java.util.Collection;
import java.util.HashSet;
import java.util.function.Supplier;
public class CommonConnectionModule {
@Inject
private CredentialProfileModule credentialProfileModule = new CredentialProfileModule();
@Option(name = {"-e", "--endpoint"}, description = "Neptune endpoint(s) – supply multiple instance endpoints if you want to load balance requests across a cluster.", title = "endpoint")
@RequireSome(tag = "endpoint or clusterId")
private Collection<String> endpoints = new HashSet<>();
@Option(name = {"--cluster-id", "--cluster", "--clusterid"}, description = "ID of an Amazon Neptune cluster. If you specify a cluster ID, neptune-export will use all of the instance endpoints in the cluster in addition to any endpoints you have specified using the endpoint options.")
@Once
@RequireSome(tag = "endpoint or clusterId")
private String clusterId;
@Option(name = {"-p", "--port"}, description = "Neptune port (optional, default 8182).")
@Port(acceptablePorts = {PortType.SYSTEM, PortType.USER})
@Once
private int port = 8182;
@Option(name = {"--use-iam-auth"}, description = "Use IAM database authentication to authenticate to Neptune (remember to set the SERVICE_REGION environment variable).")
@Once
private boolean useIamAuth = false;
@Option(name = {"--use-ssl"}, description = "Enables connectivity over SSL. This option is deprecated: neptune-export will always connect via SSL unless you use --disable-ssl to explicitly disable connectivity over SSL.")
@Once
private boolean useSsl = true;
@Option(name = {"--disable-ssl"}, description = "Disables connectivity over SSL.")
@Once
private boolean disableSsl = false;
@Option(name = {"--nlb-endpoint"}, description = "Network load balancer endpoint (optional: use only if connecting to an IAM DB enabled Neptune cluster through a network load balancer (NLB) – see https://github.com/aws-samples/aws-dbs-refarch-graph/tree/master/src/connecting-using-a-load-balancer#connecting-to-amazon-neptune-from-clients-outside-the-neptune-vpc-using-aws-network-load-balancer).")
@Once
@MutuallyExclusiveWith(tag = "proxy-endpoint")
private String networkLoadBalancerEndpoint;
@Option(name = {"--alb-endpoint"}, description = "Application load balancer endpoint (optional: use only if connecting to an IAM DB enabled Neptune cluster through an application load balancer (ALB) – see https://github.com/aws-samples/aws-dbs-refarch-graph/tree/master/src/connecting-using-a-load-balancer#connecting-to-amazon-neptune-from-clients-outside-the-neptune-vpc-using-aws-application-load-balancer).")
@Once
@MutuallyExclusiveWith(tag = "proxy-endpoint")
private String applicationLoadBalancerEndpoint;
@Option(name = {"--lb-port"}, description = "Load balancer port (optional, default 80).")
@Port(acceptablePorts = {PortType.SYSTEM, PortType.USER})
@Once
private int loadBalancerPort = 80;
@Option(name = {"--proxy-endpoint"}, description = "Proxy endpoint (optional: use only if connecting to an IAM DB enabled Neptune cluster through a proxy such as a bastion host).")
@Once
@MutuallyExclusiveWith(tag = "proxy-endpoint")
private String proxyEndpoint;
@Option(name = {"--proxy-port"}, description = "Proxy port (optional, default 8182).")
@Port(acceptablePorts = {PortType.SYSTEM, PortType.USER})
@Once
private int proxyPort = 8182;
@Option(name = {"--proxy-remove-host-header"}, description = "Remove Host header after Sigv4 signing request to be forwarded via proxy.")
@Port(acceptablePorts = {PortType.SYSTEM, PortType.USER})
@Once
private boolean removeProxyHostHeader = false;
private final Supplier<AmazonNeptune> amazonNeptuneClientSupplier;
public CommonConnectionModule(Supplier<AmazonNeptune> amazonNeptuneClientSupplier) {
this.amazonNeptuneClientSupplier = amazonNeptuneClientSupplier;
}
public NeptuneClusterMetadata clusterMetadata(){
if (StringUtils.isNotEmpty(clusterId)) {
return NeptuneClusterMetadata.createFromClusterId(clusterId, amazonNeptuneClientSupplier);
} else {
return NeptuneClusterMetadata.createFromEndpoints(endpoints, amazonNeptuneClientSupplier);
}
}
public ConnectionConfig config() {
if (StringUtils.isNotEmpty(clusterId)) {
endpoints.addAll(clusterMetadata().endpoints());
}
if (endpoints.isEmpty()) {
throw new IllegalStateException("You must supply a cluster ID or one or more endpoints");
}
ProxyConfig proxyConfig = null;
if (StringUtils.isNotEmpty(networkLoadBalancerEndpoint)) {
proxyConfig = new ProxyConfig(networkLoadBalancerEndpoint, loadBalancerPort, false);
} else if (StringUtils.isNotEmpty(applicationLoadBalancerEndpoint)) {
proxyConfig = new ProxyConfig(applicationLoadBalancerEndpoint, loadBalancerPort, true);
} else if (StringUtils.isNotEmpty(proxyEndpoint)) {
proxyConfig = new ProxyConfig(proxyEndpoint, proxyPort, removeProxyHostHeader);
}
return new ConnectionConfig(
clusterId,
EndpointValidator.validate(endpoints),
port,
useIamAuth,
!disableSsl,
proxyConfig,
credentialProfileModule.getCredentialsProvider()
);
}
}
| 4,228 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/GraphSchemaProviderModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.propertygraph.io.JsonResource;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.MutuallyExclusiveWith;
import org.apache.commons.lang.StringUtils;
import java.io.IOException;
import java.net.URI;
public class GraphSchemaProviderModule {
@Option(name = {"-c", "--config-file", "--filter-config-file"}, description = "Path to JSON schema config file (file path, or 'https' or 's3' URI).")
@MutuallyExclusiveWith(tag = "configFile or config")
private URI configFile;
@Option(name = {"--config", "--filter"}, description = "JSON schema for property graph.")
@MutuallyExclusiveWith(tag = "configFile or config")
private String configJson;
private final boolean configIsMandatory;
public GraphSchemaProviderModule(boolean configIsMandatory) {
this.configIsMandatory = configIsMandatory;
}
public GraphSchema graphSchema() throws IOException {
if (configFile != null) {
JsonResource<GraphSchema, Boolean> configFileResource = new JsonResource<>(
"Config file",
configFile,
GraphSchema.class);
return configFileResource.get();
} else {
if (StringUtils.isEmpty(configJson)){
if (configIsMandatory){
throw new IllegalStateException("You must supply either a configuration file URI or inline configuration JSON");
}
return new GraphSchema();
}
return GraphSchema.fromJson(new ObjectMapper().readTree(configJson));
}
}
}
| 4,229 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/ProfilesModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.profiles.ProfilesConfig;
import com.github.rvesse.airline.annotations.Option;
import java.util.Collection;
import java.util.HashSet;
public class ProfilesModule {
@Option(name = {"--profile"}, description = "Name of an export profile.")
private Collection<String> profiles = new HashSet<>();
public ProfilesConfig config() {
return new ProfilesConfig(profiles);
}
}
| 4,230 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/RdfTargetModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.io.*;
import com.amazonaws.services.neptune.rdf.io.RdfExportFormat;
import com.amazonaws.services.neptune.rdf.io.RdfTargetConfig;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.*;
public class RdfTargetModule extends AbstractTargetModule {
@Option(name = {"--format"}, description = "Output format (optional, default 'turtle').")
@Once
@AllowedEnumValues(RdfExportFormat.class)
private RdfExportFormat format = RdfExportFormat.turtle;
public RdfTargetConfig config(Directories directories) {
return new RdfTargetConfig(directories,
new KinesisConfig(this),
getOutput(), format);
}
@Override
protected DirectoryStructure directoryStructure(){
if (format == RdfExportFormat.neptuneStreamsSimpleJson){
return DirectoryStructure.SimpleStreamsOutput;
} else {
return DirectoryStructure.Rdf;
}
}
}
| 4,231 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/AwsCliModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.client.builder.AwsClientBuilder;
import com.amazonaws.services.neptune.AmazonNeptune;
import com.amazonaws.services.neptune.AmazonNeptuneClientBuilder;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
import org.apache.commons.lang.StringUtils;
import javax.inject.Inject;
import java.util.function.Supplier;
public class AwsCliModule implements Supplier<AmazonNeptune> {
@Inject
private CredentialProfileModule credentialProfileModule = new CredentialProfileModule();
@Option(name = {"--aws-cli-endpoint-url"}, description = "AWS CLI endpoint URL.", hidden = true)
@Once
private String awsCliEndpointUrl;
@Option(name = {"--aws-cli-region"}, description = "AWS CLI region.", hidden = true)
@Once
private String awsCliRegion;
@Override
public AmazonNeptune get() {
AmazonNeptuneClientBuilder builder = AmazonNeptuneClientBuilder.standard();
if (StringUtils.isNotEmpty(awsCliEndpointUrl) && StringUtils.isNotEmpty(awsCliRegion)) {
builder = builder.withEndpointConfiguration(
new AwsClientBuilder.EndpointConfiguration(awsCliEndpointUrl, awsCliRegion)
);
}
if (credentialProfileModule.getCredentialsProvider() != null) {
builder = builder
.withCredentials(credentialProfileModule.getCredentialsProvider())
.withRegion(credentialProfileModule.getRegionProvider().getRegion());
}
return builder.build();
}
}
| 4,232 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/NeptuneStreamsModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.cluster.*;
import com.amazonaws.services.neptune.propertygraph.io.JsonResource;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
public class NeptuneStreamsModule {
@Option(name = {"--include-last-event-id"}, description = "Get the last event ID from the Amazon Neptune stream, if enabled, and save it to a JSON file (optional, default 'false').")
@Once
private boolean includeLastEventId = false;
public GetLastEventIdStrategy lastEventIdStrategy(Cluster cluster, JsonResource<EventId, Object> eventIdResource){
if (includeLastEventId){
return new GetLastEventIdTask(cluster, eventIdResource);
} else {
return new DoNotGetLastEventIdTask();
}
}
}
| 4,233 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PropertyGraphScopeModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.*;
import com.amazonaws.services.neptune.propertygraph.schema.ExportSpecification;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import com.amazonaws.services.neptune.propertygraph.schema.TokensOnly;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedEnumValues;
import com.github.rvesse.airline.annotations.restrictions.Once;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
public class PropertyGraphScopeModule {
@Option(name = {"-nl", "--node-label"}, description = "Labels of nodes to be included in config (optional, default all labels).",
arity = 1)
private List<String> nodeLabels = new ArrayList<>();
@Option(name = {"-el", "--edge-label"}, description = "Labels of edges to be included in config (optional, default all labels).",
arity = 1)
private List<String> edgeLabels = new ArrayList<>();
@Option(name = {"-s", "--scope"}, description = "Scope (optional, default 'all').")
@Once
@AllowedEnumValues(Scope.class)
private Scope scope = Scope.all;
@Option(name = {"--tokens-only"}, description = "Export tokens (~id, ~label, ~from, ~to) only (optional, default 'off').")
@Once
@AllowedEnumValues(TokensOnly.class)
private TokensOnly tokensOnly = TokensOnly.off;
@Option(name = {"--edge-label-strategy"}, description = "Export edges by their edge labels, or by a combination of their start vertex label, edge label, and end vertex label (optional, default 'edgeLabelsOnly').")
@Once
@AllowedEnumValues(EdgeLabelStrategy.class)
private EdgeLabelStrategy edgeLabelStrategy = EdgeLabelStrategy.edgeLabelsOnly;
public Collection<ExportSpecification> exportSpecifications(ExportStats stats,
GremlinFilters gremlinFilters,
FeatureToggles featureToggles){
return exportSpecifications(new GraphSchema(), gremlinFilters, stats, featureToggles);
}
public Collection<ExportSpecification> exportSpecifications(GraphSchema graphSchema,
GremlinFilters gremlinFilters,
ExportStats stats,
FeatureToggles featureToggles){
return scope.exportSpecifications(
graphSchema,
Label.forLabels(nodeLabels),
Label.forLabels(edgeLabels),
gremlinFilters,
tokensOnly,
edgeLabelStrategy,
stats,
featureToggles);
}
}
| 4,234 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/AbstractTargetModule.java | package com.amazonaws.services.neptune.cli;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.neptune.io.CommandWriter;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.io.DirectoryStructure;
import com.amazonaws.services.neptune.io.LargeStreamRecordHandlingStrategy;
import com.amazonaws.services.neptune.io.Target;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedEnumValues;
import com.github.rvesse.airline.annotations.restrictions.Once;
import com.github.rvesse.airline.annotations.restrictions.PathKind;
import com.github.rvesse.airline.annotations.restrictions.Required;
import org.apache.commons.lang.StringUtils;
import javax.inject.Inject;
import java.io.File;
import java.io.IOException;
import java.util.UUID;
import static com.amazonaws.services.neptune.util.AWSCredentialsUtil.getSTSAssumeRoleCredentialsProvider;
public abstract class AbstractTargetModule implements CommandWriter {
@Inject
private CredentialProfileModule credentialProfileModule = new CredentialProfileModule();
@Option(name = {"-d", "--dir"}, description = "Root directory for output.")
@Required
@com.github.rvesse.airline.annotations.restrictions.Path(mustExist = false, kind = PathKind.DIRECTORY)
@Once
private File directory;
@Option(name = {"-t", "--tag"}, description = "Directory prefix (optional).")
@Once
private String tag = "";
@Option(name = {"-o", "--output"}, description = "Output target (optional, default 'file').")
@Once
@AllowedEnumValues(Target.class)
private Target output = Target.files;
@Option(name = {"--stream-name"}, description = "Name of an Amazon Kinesis Data Stream.")
@Once
private String streamName;
@Option(name = {"--region", "--stream-region"}, description = "AWS Region in which your Amazon Kinesis Data Stream is located.")
@Once
private String region;
@Option(name = {"--stream-large-record-strategy"}, description = "Strategy for dealing with records to be sent to Amazon Kinesis that are larger than 1 MB.")
@Once
@AllowedEnumValues(LargeStreamRecordHandlingStrategy.class)
private LargeStreamRecordHandlingStrategy largeStreamRecordHandlingStrategy = LargeStreamRecordHandlingStrategy.splitAndShred;
@Option(name = {"--disable-stream-aggregation"}, description = "Disable aggregation of Kinesis Data Stream records).")
@Once
private boolean disableAggregation = false;
@Option(name = {"--stream-role-arn"}, description = "Optional. Assume specified role for upload to Kinesis stream.")
@Once
private String streamRoleArn = null;
@Option(name = {"--stream-role-session-name"}, description = "Optional. To be used with '--stream-role-arn'. Use specified session name when assuming stream role.")
@Once
private String streamRoleSessionName = "Neptune-Export";
@Option(name = {"--stream-role-external-id"}, description = "Optional. To be used with '--stream-role-arn'. Use specified external id when assuming stream role.")
@Once
private String streamRoleExternalId = null;
@Option(name = {"--export-id"}, description = "Export ID")
@Once
private String exportId = UUID.randomUUID().toString().replace("-", "");
@Option(name = {"--partition-directories"}, description = "Partition directory path (e.g. 'year=2021/month=07/day=21').")
@Once
private String partitionDirectories = "";
public AbstractTargetModule() {}
public AbstractTargetModule(Target target) {
this.output = target;
}
public File getDirectory() {
return directory;
}
public String getTag() {
return tag;
}
public Target getOutput() {
return output;
}
public String getStreamName() {
return streamName;
}
public String getRegion() {
return region;
}
public LargeStreamRecordHandlingStrategy getLargeStreamRecordHandlingStrategy() {
return largeStreamRecordHandlingStrategy;
}
public boolean isEnableAggregation() {
return !disableAggregation;
}
public Directories createDirectories() throws IOException {
return Directories.createFor(directoryStructure(), directory, exportId, tag, partitionDirectories );
}
public Directories createDirectories(DirectoryStructure directoryStructure) throws IOException {
return Directories.createFor(directoryStructure, directory, exportId, tag, partitionDirectories );
}
@Override
public void writeReturnValue(String value){
output.writeReturnValue(value);
}
@Override
public void writeMessage(String value) {
output.writeMessage(value);
}
protected abstract DirectoryStructure directoryStructure();
public AWSCredentialsProvider getCredentialsProvider() {
if (StringUtils.isEmpty(streamRoleArn)) {
return credentialProfileModule.getCredentialsProvider();
}
return getSTSAssumeRoleCredentialsProvider(streamRoleArn, streamRoleSessionName, streamRoleExternalId, credentialProfileModule.getCredentialsProvider(), region);
}
}
| 4,235 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/GremlinFiltersModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.propertygraph.GremlinFilters;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
public class GremlinFiltersModule {
@Option(name = {"--gremlin-node-filter"}, description = "Gremlin steps for filtering nodes (overrides --gremlin-filter).")
@Once
private String gremlinNodeFilter;
@Option(name = {"--gremlin-edge-filter"}, description = "Gremlin steps for filtering edges (overrides --gremlin-filter).")
@Once
private String gremlinEdgeFilter;
@Option(name = {"--gremlin-filter"}, description = "Gremlin steps for filtering nodes and edges.")
@Once
private String gremlinFilter;
@Option(name = {"--filter-edges-early"}, description = "Configures edge exports to apply all filters to the " +
"traversal before adding range() steps for concurrency. Results in faster exports for simple fast filters which remove most results.")
@Once
private boolean filterEdgesEarly = false;
public GremlinFilters filters(){
return new GremlinFilters(gremlinFilter, gremlinNodeFilter, gremlinEdgeFilter, filterEdgesEarly);
}
}
| 4,236 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/RdfExportScopeModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.rdf.*;
import com.amazonaws.services.neptune.rdf.io.*;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedEnumValues;
import com.github.rvesse.airline.annotations.restrictions.Once;
import org.apache.commons.lang.StringUtils;
public class RdfExportScopeModule {
@Option(name = {"--rdf-export-scope"}, description = "Export scope (optional, default 'graph').")
@Once
@AllowedEnumValues(RdfExportScope.class)
private RdfExportScope scope = RdfExportScope.graph;
@Option(name = {"--sparql"}, description = "SPARQL query.")
@Once
private String query;
public ExportRdfJob createJob(NeptuneSparqlClient client, RdfTargetConfig targetConfig){
if (scope == RdfExportScope.graph){
return new ExportRdfGraphJob(client, targetConfig);
} else if (scope == RdfExportScope.edges){
return new ExportRdfEdgesJob(client, targetConfig);
} else if (scope == RdfExportScope.query){
if (StringUtils.isEmpty(query)){
throw new IllegalStateException("You must supply a SPARQL query if exporting from a query");
}
return new ExportRdfFromQuery(client, targetConfig, query);
}
throw new IllegalStateException(String.format("Unknown export scope: %s", scope));
}
public String scope(){
return scope.name();
}
}
| 4,237 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/CloneClusterModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.cluster.*;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.AllowedValues;
import com.github.rvesse.airline.annotations.restrictions.Once;
import com.github.rvesse.airline.annotations.restrictions.ranges.IntegerRange;
public class CloneClusterModule {
@Option(name = {"--clone-cluster"}, description = "Clone an Amazon Neptune cluster.")
@Once
private boolean cloneCluster = false;
@Option(name = {"--clone-cluster-instance-type"}, description = "Instance type for cloned cluster (by default neptune-export will use the same instance type as the source cluster).")
@Once
@AllowedValues(allowedValues = {
"db.r4.large",
"db.r4.xlarge",
"db.r4.2xlarge",
"db.r4.4xlarge",
"db.r4.8xlarge",
"db.r5.large",
"db.r5.xlarge",
"db.r5.2xlarge",
"db.r5.4xlarge",
"db.r5.8xlarge",
"db.r5.12xlarge",
"db.r5.16xlarge",
"db.r5.24xlarge",
"db.r5d.large",
"db.r5d.xlarge",
"db.r5d.2xlarge",
"db.r5d.4xlarge",
"db.r5d.8xlarge",
"db.r5d.12xlarge",
"db.r5d.16xlarge",
"db.r5d.24xlarge",
"db.r6g.large",
"db.r6g.xlarge",
"db.r6g.2xlarge",
"db.r6g.4xlarge",
"db.r6g.8xlarge",
"db.r6g.12xlarge",
"db.r6g.16xlarge",
"db.x2g.large",
"db.x2g.xlarge",
"db.x2g.2xlarge",
"db.x2g.4xlarge",
"db.x2g.8xlarge",
"db.x2g.12xlarge",
"db.x2g.16xlarge",
"db.t3.medium",
"db.t4g.medium",
"r4.large",
"r4.xlarge",
"r4.2xlarge",
"r4.4xlarge",
"r4.8xlarge",
"r5.large",
"r5.xlarge",
"r5.2xlarge",
"r5.4xlarge",
"r5.8xlarge",
"r5.12xlarge",
"r5.16xlarge",
"r5.24xlarge",
"r5d.large",
"r5d.xlarge",
"r5d.2xlarge",
"r5d.4xlarge",
"r5d.8xlarge",
"r5d.12xlarge",
"r5d.16xlarge",
"r5d.24xlarge",
"r6g.large",
"r6g.xlarge",
"r6g.2xlarge",
"r6g.4xlarge",
"r6g.8xlarge",
"r6g.12xlarge",
"r6g.16xlarge",
"x2g.large",
"x2g.xlarge",
"x2g.2xlarge",
"x2g.4xlarge",
"x2g.8xlarge",
"x2g.12xlarge",
"x2g.16xlarge",
"t3.medium",
"t4g.medium"})
private String cloneClusterInstanceType;
@Option(name = {"--clone-cluster-replica-count"}, description = "Number of read replicas to add to the cloned cluster (default, 0).")
@Once
@IntegerRange(min = 0, minInclusive = true, max = 15, maxInclusive = true)
private int replicaCount = 0;
@Option(name = {"--clone-cluster-max-concurrency"}, description = "Limits concurrency when exporting from cloned cluster (default, no limit).", hidden = true)
@Once
private int maxConcurrency = -1;
@Option(name = {"--clone-cluster-engine-version"}, description = "Cloned cluster Neptune engine version (default, latest).", hidden = true)
@Once
private String engineVersion;
@Option(name = {"--clone-cluster-correlation-id"}, description = "Correlation ID to be added to a correlation-id tag on the cloned cluster.")
@Once
private String cloneCorrelationId;
public CloneClusterModule() {
}
public Cluster cloneCluster(NeptuneClusterMetadata clusterMetadata,
ConnectionConfig connectionConfig,
ConcurrencyConfig concurrencyConfig,
FeatureToggles featureToggles) throws Exception {
clusterMetadata.printDetails();
if (cloneCluster) {
if (featureToggles.containsFeature(FeatureToggle.Simulate_Cloned_Cluster)) {
return new SimulatedCloneCluster(clusterMetadata).cloneCluster(connectionConfig, concurrencyConfig);
} else {
CloneCluster command = new CloneCluster(
clusterMetadata,
cloneClusterInstanceType,
replicaCount,
maxConcurrency,
engineVersion,
cloneCorrelationId);
return command.cloneCluster(connectionConfig, concurrencyConfig);
}
} else {
return new DoNotCloneCluster(clusterMetadata).cloneCluster(connectionConfig, concurrencyConfig);
}
}
}
| 4,238 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/CredentialProfileModule.java | package com.amazonaws.services.neptune.cli;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.regions.AwsProfileRegionProvider;
import com.amazonaws.regions.AwsRegionProvider;
import com.amazonaws.regions.AwsRegionProviderChain;
import com.amazonaws.regions.DefaultAwsRegionProviderChain;
import com.amazonaws.services.neptune.util.AWSCredentialsUtil;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.Once;
import org.apache.commons.lang.StringUtils;
public class CredentialProfileModule {
@Option(name = {"--credentials-profile"}, description = "Use profile from credentials config file.", hidden = true)
@Once
private String credentialsProfile;
@Option(name = {"--credentials-config-file"}, description = "Load credentials profile from specified config file.", hidden = true)
@Once
private String credentialsConfigFilePath;
public AWSCredentialsProvider getCredentialsProvider() {
return AWSCredentialsUtil.getProfileCredentialsProvider(credentialsProfile, credentialsConfigFilePath);
}
public AwsRegionProvider getRegionProvider() {
if(StringUtils.isEmpty(credentialsProfile)) {
return new DefaultAwsRegionProviderChain();
}
return new AwsRegionProviderChain(new AwsProfileRegionProvider(credentialsProfile), new DefaultAwsRegionProviderChain());
}
}
| 4,239 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/cli/PropertyGraphTargetModule.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.cli;
import com.amazonaws.services.neptune.io.*;
import com.amazonaws.services.neptune.propertygraph.io.PrinterOptions;
import com.amazonaws.services.neptune.propertygraph.io.PropertyGraphExportFormat;
import com.amazonaws.services.neptune.propertygraph.io.PropertyGraphTargetConfig;
import com.github.rvesse.airline.annotations.Option;
import com.github.rvesse.airline.annotations.restrictions.*;
public class PropertyGraphTargetModule extends AbstractTargetModule {
@Option(name = {"--format"}, description = "Output format (optional, default 'csv').")
@Once
@AllowedEnumValues(PropertyGraphExportFormat.class)
private PropertyGraphExportFormat format = PropertyGraphExportFormat.csv;
@Option(name = {"--merge-files"}, description = "Merge files for each vertex or edge label (currently only supports CSV files for export-pg).")
@Once
private boolean mergeFiles = false;
@Option(name = {"--per-label-directories"}, description = "Create a subdirectory for each distinct vertex or edge label.")
@Once
private boolean perLabelDirectories = false;
public PropertyGraphTargetModule() {
}
public PropertyGraphTargetModule(Target target) {
super(target);
}
public PropertyGraphTargetConfig config(Directories directories, PrinterOptions printerOptions){
if (mergeFiles && (format != PropertyGraphExportFormat.csv && format != PropertyGraphExportFormat.csvNoHeaders)){
throw new IllegalArgumentException("Merge files is only supported for CSV formats for export-pg");
}
KinesisConfig kinesisConfig = new KinesisConfig(this);
return new PropertyGraphTargetConfig(directories, kinesisConfig, printerOptions, format, getOutput(), mergeFiles, perLabelDirectories, true);
}
public String description(){
return format.description();
}
@Override
protected DirectoryStructure directoryStructure(){
if (format == PropertyGraphExportFormat.neptuneStreamsSimpleJson){
return DirectoryStructure.SimpleStreamsOutput;
} else {
return DirectoryStructure.PropertyGraph;
}
}
}
| 4,240 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/SpecifiedLabels.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import com.amazonaws.services.neptune.propertygraph.schema.PropertySchema;
import org.apache.tinkerpop.gremlin.process.traversal.Traversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.structure.Element;
import java.util.*;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.*;
public class SpecifiedLabels implements LabelsFilter {
private final Collection<Label> labels;
private final LabelStrategy labelStrategy;
public SpecifiedLabels(Collection<Label> labels, LabelStrategy labelStrategy) {
this.labels = labels;
this.labelStrategy = labelStrategy;
}
@Override
public GraphTraversal<? extends Element, ?> apply(GraphTraversal<? extends Element, ?> traversal, FeatureToggles featureToggles, GraphElementType graphElementType) {
boolean simpleEdgeLabels = graphElementType == GraphElementType.edges &&
labels.stream().allMatch(l -> !l.hasFromLabels() && !l.hasToLabels());
if (simpleEdgeLabels || featureToggles.containsFeature(FeatureToggle.ExportByIndividualLabels)) {
List<String> labelList = labels.stream()
.flatMap((Function<Label, Stream<String>>) label -> label.labels().stream())
.collect(Collectors.toList());
String firstLabel = labelList.stream().findFirst().orElseThrow(() -> new IllegalStateException("No labels specified"));
String[] remainingLabels = labelList.stream()
.skip(1)
.collect(Collectors.toList())
.toArray(new String[]{});
return traversal.hasLabel(firstLabel, remainingLabels);
} else {
if (labels.size() > 1) {
List<Traversal<?, ?>> traversals = new ArrayList<>();
for (Label label : labels) {
traversals.add(createFilterForLabel(label, null));
}
traversal = traversal.or(traversals.toArray(new Traversal<?, ?>[]{}));
} else {
Label label = labels.iterator().next();
traversal = createFilterForLabel(label, traversal);
}
return traversal;
}
}
private GraphTraversal<? extends Element, ?> createFilterForLabel(Label label, GraphTraversal<? extends Element, ?> t) {
for (String labelValue : label.labels()) {
if (t == null) {
t = hasLabel(labelValue);
} else {
t = t.hasLabel(labelValue);
}
}
if (labelStrategy == EdgeLabelStrategy.edgeAndVertexLabels) {
if (label.hasFromAndToLabels()) {
List<Traversal<?, ?>> traversals = new ArrayList<>();
GraphTraversal<? extends Element, ?> startVertex = outV();
startVertex = createFilterForLabel(label.fromLabels(), startVertex);
traversals.add(startVertex);
GraphTraversal<? extends Element, ?> endVertex = inV();
endVertex = createFilterForLabel(label.toLabels(), endVertex);
traversals.add(endVertex);
t = t.where(and(traversals.toArray(new Traversal<?, ?>[]{})));
} else if (label.hasFromLabels()) {
GraphTraversal<? extends Element, ?> startVertex = outV();
startVertex = createFilterForLabel(label.fromLabels(), startVertex);
t = t.where(startVertex);
} else if (label.hasToLabels()) {
GraphTraversal<? extends Element, ?> endVertex = inV();
endVertex = createFilterForLabel(label.toLabels(), endVertex);
t = t.where(endVertex);
}
}
return t;
}
@Override
public Collection<Label> getLabelsUsing(GraphClient<?> graphClient) {
return labels;
}
@Override
public String[] getPropertiesForLabels(GraphElementSchemas graphElementSchemas) {
Set<String> properties = new HashSet<>();
for (Label label : labels) {
LabelSchema labelSchema = graphElementSchemas.getSchemaFor(label);
for (PropertySchema propertySchema : labelSchema.propertySchemas()) {
properties.add(propertySchema.nameWithoutDataType());
}
}
return properties.toArray(new String[]{});
}
@Override
public Label getLabelFor(Map<String, Object> input) {
return labelStrategy.getLabelFor(input);
}
@Override
public Label getLabelFor(PGResult input) {
return labelStrategy.getLabelFor(input);
}
@Override
public String[] addAdditionalColumnNames(String... columns) {
return labelStrategy.additionalColumns(columns);
}
@Override
public <T> GraphTraversal<? extends Element, T> addAdditionalColumns(GraphTraversal<? extends Element, T> t) {
return labelStrategy.addAdditionalColumns(t);
}
@Override
public LabelsFilter filterFor(Label label) {
return new SpecifiedLabels(Collections.singletonList(label), labelStrategy);
}
@Override
public LabelsFilter intersection(Collection<Label> others) {
Collection<Label> results = new HashSet<>();
for (Label label : labels) {
for (Label other : others) {
if (label.isAssignableFrom(other)){
results.add(other);
}
}
}
return new SpecifiedLabels(results, labelStrategy);
}
@Override
public boolean isEmpty() {
return labels.isEmpty();
}
@Override
public String description(String element) {
if (isEmpty()){
return String.format("%s with zero labels", element);
}
String labelList = labels.stream().map(l -> String.format("'%s'", l.fullyQualifiedLabel())).collect(Collectors.joining(" or "));
return String.format("%s with label(s) %s", element, labelList);
}
@Override
public Collection<LabelsFilter> split() {
return labels.stream()
.map(l -> new SpecifiedLabels(Collections.singletonList(l), labelStrategy))
.collect(Collectors.toList());
}
}
| 4,241 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/Label.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.propertygraph.schema.DataType;
import com.amazonaws.services.neptune.util.SemicolonUtils;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import java.util.*;
import java.util.stream.Collectors;
public class Label {
public static List<String> fixLabelsIssue(List<String> list) {
if (list.size() == 1 && list.get(0).contains("::")) {
List<String> newResults = Arrays.asList(list.get(0).split("::"));
newResults.sort(String::compareTo);
return newResults;
}
return list;
}
public static Label fromJson(JsonNode jsonNode) {
if (jsonNode.isObject()) {
String label = jsonNode.path("~label").textValue();
Collection<String> fromLabels = new ArrayList<>();
Collection<String> toLabels = new ArrayList<>();
if (jsonNode.has("~fromLabels")) {
JsonNode fromLabelsNode = jsonNode.path("~fromLabels");
if (fromLabelsNode.isArray()) {
ArrayNode fromLabelsArrays = (ArrayNode) fromLabelsNode;
fromLabelsArrays.forEach(l -> fromLabels.add(l.textValue()));
} else {
fromLabels.addAll(SemicolonUtils.split(fromLabelsNode.textValue()));
}
}
if (jsonNode.has("~toLabels")) {
JsonNode toLabelsNode = jsonNode.path("~toLabels");
if (toLabelsNode.isArray()) {
ArrayNode toLabelsArray = (ArrayNode) toLabelsNode;
toLabelsArray.forEach(l -> toLabels.add(l.textValue()));
} else {
toLabels.addAll(SemicolonUtils.split(toLabelsNode.textValue()));
}
}
return new Label(Collections.singletonList(label), fromLabels, toLabels);
} else {
if (jsonNode.isArray()) {
ArrayNode labelsNode = (ArrayNode) jsonNode;
Collection<String> labels = new ArrayList<>();
labelsNode.forEach(l -> labels.add(l.textValue()));
return new Label(labels);
} else {
return new Label(jsonNode.textValue());
}
}
}
public static Collection<Label> forLabels(Collection<String> labels) {
Set<Label> results = new HashSet<>();
for (String label : labels) {
results.add(new Label(Collections.singletonList(label)));
}
return results;
}
private final List<String> labels;
private final List<String> fromLabels;
private final List<String> toLabels;
private final String fullyQualifiedLabel;
public Label(String label) {
this(SemicolonUtils.split(label));
}
public
Label(Collection<String> labels) {
this(labels, Collections.emptyList(), Collections.emptyList());
}
public Label(String label, String fromLabels, String toLabels) {
this(label, SemicolonUtils.split(fromLabels), SemicolonUtils.split(toLabels));
}
public Label(String label, Collection<String> fromLabels, Collection<String> toLabels) {
this(Collections.singletonList(label), fromLabels, toLabels);
}
private Label(Collection<String> labels, Collection<String> fromLabels, Collection<String> toLabels) {
this.labels = labelList(labels);
this.fromLabels = labelList(fromLabels);
this.toLabels = labelList(toLabels);
this.fullyQualifiedLabel = hasFromLabels() || hasToLabels() ?
format(fromLabelsAsString(), labelsAsString(), toLabelsAsString()) :
labelsAsString();
}
private String format(String fromLabels, String label, String toLabels) {
return String.format("(%s)-%s-(%s)", fromLabels, label, toLabels);
}
private List<String> escapeSemicolons(List<String> list) {
return list.stream().map(v -> DataType.escapeSeparators(v, ";")).collect(Collectors.toList());
}
private List<String> labelList(Collection<String> col) {
List<String> results = new ArrayList<>(col);
results = fixLabelsIssue(results);
results.sort(String::compareTo);
return results;
}
public boolean isAssignableFrom(Label l){
boolean allLabelsFound = l.labels.containsAll(labels);
boolean allFromLabelsFound = l.fromLabels.containsAll(fromLabels);
boolean allToLabelsFound = l.toLabels.containsAll(toLabels);
return allLabelsFound && allFromLabelsFound && allToLabelsFound;
}
public List<String> labels() {
return labels;
}
public Label fromLabels() {
return new Label(fromLabels);
}
public Label toLabels() {
return new Label(toLabels);
}
public String fromLabelsAsString() {
if (fromLabels.isEmpty()) {
return "_";
}
return String.join(";", escapeSemicolons(fromLabels));
}
public String toLabelsAsString() {
if (toLabels.isEmpty()) {
return "_";
}
return String.join(";", escapeSemicolons(toLabels));
}
public String labelsAsString() {
return String.join(";", escapeSemicolons(labels));
}
public String fullyQualifiedLabel() {
return fullyQualifiedLabel;
}
public String allLabelsAsArrayString(){
return hasFromLabels() || hasToLabels() ?
String.format("[%s, %s, %s]", fromLabelsAsString(), labelsAsString(), toLabelsAsString()):
labelsAsString();
}
public boolean hasFromAndToLabels() {
return !fromLabels.isEmpty() && !toLabels.isEmpty();
}
public boolean hasFromLabels() {
return !fromLabels.isEmpty();
}
public boolean hasToLabels() {
return !toLabels.isEmpty();
}
public Label createCopy() {
return Label.fromJson(toJson());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Label label = (Label) o;
return fullyQualifiedLabel.equals(label.fullyQualifiedLabel);
}
@Override
public int hashCode() {
return Objects.hash(fullyQualifiedLabel);
}
public JsonNode toJson() {
if (!hasFromAndToLabels()) {
if (labels.size() > 1) {
ArrayNode labelsArray = JsonNodeFactory.instance.arrayNode();
for (String label : labels) {
labelsArray.add(label);
}
return labelsArray;
} else {
return JsonNodeFactory.instance.textNode(labels.get(0));
}
}
ObjectNode labelNode = JsonNodeFactory.instance.objectNode();
ArrayNode fromLabelsArray = JsonNodeFactory.instance.arrayNode();
ArrayNode toLabelsArray = JsonNodeFactory.instance.arrayNode();
labelNode.put("~label", labels.get(0));
for (String fromLabel : fromLabels) {
fromLabelsArray.add(fromLabel);
}
labelNode.set("~fromLabels", fromLabelsArray);
for (String toLabel : toLabels) {
toLabelsArray.add(toLabel);
}
labelNode.set("~toLabels", toLabelsArray);
return labelNode;
}
} | 4,242 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/GremlinQueryDebugger.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import org.apache.tinkerpop.gremlin.process.traversal.translator.GroovyTranslator;
public class GremlinQueryDebugger {
public static String queryAsString(Object o){
return new GroovyTranslator.DefaultTypeTranslator(false).apply("g", o).getScript();
}
}
| 4,243 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/LabelsFilter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.structure.Element;
import java.util.Collection;
import java.util.Map;
public interface LabelsFilter {
GraphTraversal<? extends Element, ?> apply(GraphTraversal<? extends Element, ?> traversal, FeatureToggles featureToggles, GraphElementType graphElementType);
Collection<Label> getLabelsUsing(GraphClient<?> graphClient);
String[] getPropertiesForLabels(GraphElementSchemas graphElementSchemas);
Label getLabelFor(Map<String, Object> input);
Label getLabelFor(PGResult result);
String[] addAdditionalColumnNames(String... columns);
<T> GraphTraversal<? extends Element, T> addAdditionalColumns(GraphTraversal<? extends Element, T> t);
LabelsFilter filterFor(Label label);
LabelsFilter intersection(Collection<Label> labels);
boolean isEmpty();
String description(String element);
Collection<LabelsFilter> split();
}
| 4,244 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/NamedQuery.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
public class NamedQuery {
private final String name;
private final String query;
public NamedQuery(String name, String query) {
if (query.contains(".addV(") || query.contains(".addE(") || query.contains(".drop(") || query.contains(".property(")){
throw new IllegalArgumentException("Query must not contain any Gremlin write steps");
}
this.name = name;
this.query = query;
}
public String name() {
return name;
}
public String query() {
return query;
}
}
| 4,245 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/GraphClient.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.propertygraph.io.GraphElementHandler;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import java.util.Collection;
import java.util.Map;
public interface GraphClient<T> {
String description();
void queryForSchema(GraphElementHandler<Map<?, Object>> handler, Range range, LabelsFilter labelsFilter, GremlinFilters gremlinFilters);
void queryForValues(GraphElementHandler<T> handler, Range range, LabelsFilter labelsFilter, GremlinFilters gremlinFilters, GraphElementSchemas graphElementSchemas);
long approxCount(LabelsFilter labelsFilter, RangeConfig rangeConfig, GremlinFilters gremlinFilters);
Collection<Label> labels(LabelStrategy labelStrategy);
Label getLabelFor(T input, LabelsFilter labelsFilter);
void updateStats(Label label);
}
| 4,246 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/NamedQueriesCollection.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.propertygraph.io.Jsonizable;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.stream.Collectors;
public class NamedQueriesCollection implements Jsonizable<Object> {
public static NamedQueriesCollection fromJson(JsonNode json) {
List<NamedQueries> collection = new ArrayList<>();
for (JsonNode jsonNode : json) {
collection.add(NamedQueries.fromJson(jsonNode));
}
return new NamedQueriesCollection(collection);
}
private final Collection<NamedQueries> namedQueries;
public NamedQueriesCollection(Collection<NamedQueries> namedQueries) {
this.namedQueries = namedQueries;
}
public Collection<NamedQuery> flatten() {
List<NamedQuery> queries = new ArrayList<>();
namedQueries.forEach(q -> q.addTo(queries));
return queries;
}
public Collection<String> names(){
return namedQueries.stream().map(NamedQueries::name).collect(Collectors.toList());
}
@Override
public JsonNode toJson(Object o) {
ArrayNode json = JsonNodeFactory.instance.arrayNode();
for (NamedQueries queries : namedQueries) {
ObjectNode queriesNode = JsonNodeFactory.instance.objectNode();
ArrayNode arrayNode = queries.toJson();
queriesNode.put("name", queries.name());
queriesNode.set("queries", arrayNode);
json.add(queriesNode);
}
return json;
}
}
| 4,247 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/GremlinFilters.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import org.apache.commons.lang.StringUtils;
import org.apache.tinkerpop.gremlin.jsr223.CachedGremlinScriptEngineManager;
import org.apache.tinkerpop.gremlin.jsr223.GremlinScriptEngine;
import org.apache.tinkerpop.gremlin.process.traversal.Bytecode;
import org.apache.tinkerpop.gremlin.process.traversal.Traversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.structure.Element;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.ISODateTimeFormat;
import javax.script.Bindings;
import javax.script.ScriptException;
import java.util.Arrays;
import java.util.List;
public class GremlinFilters {
public static final GremlinFilters EMPTY = new GremlinFilters(null, null, null, false);
private final String gremlinFilter;
private final String gremlinNodeFilter;
private final String gremlinEdgeFilter;
private final boolean filterEdgesEarly;
private static final List<String> INVALID_OPERATORS = Arrays.asList("addV", "addE", "write", "drop", "sideEffect", "property");
public GremlinFilters(String gremlinFilter, String gremlinNodeFilter, String gremlinEdgeFilter, boolean filterEdgesEarly) {
this.gremlinFilter = gremlinFilter;
this.gremlinNodeFilter = gremlinNodeFilter;
this.gremlinEdgeFilter = gremlinEdgeFilter;
this.filterEdgesEarly = filterEdgesEarly;
}
public GraphTraversal<? extends Element, ?> applyToNodes(GraphTraversal<? extends Element, ?> t) {
if (StringUtils.isNotEmpty(gremlinNodeFilter)) {
return apply(t, gremlinNodeFilter);
} else if (StringUtils.isNotEmpty(gremlinFilter)) {
return apply(t, gremlinFilter);
} else {
return t;
}
}
public GraphTraversal<? extends Element, ?> applyToEdges(GraphTraversal<? extends Element, ?> t) {
if (StringUtils.isNotEmpty(gremlinEdgeFilter)) {
return apply(t, gremlinEdgeFilter);
} else if (StringUtils.isNotEmpty(gremlinFilter)) {
return apply(t, gremlinFilter);
} else {
return t;
}
}
public boolean filterEdgesEarly() {
return filterEdgesEarly;
}
private GraphTraversal<? extends Element, ?> apply(GraphTraversal<? extends Element, ?> t, String gremlin) {
CachedGremlinScriptEngineManager scriptEngineManager = new CachedGremlinScriptEngineManager();
GremlinScriptEngine engine = scriptEngineManager.getEngineByName("gremlin-groovy");
Bindings engineBindings = engine.createBindings();
engineBindings.put("datetime", new DatetimeConverter());
Traversal.Admin<?, ?> whereTraversal = null;
try {
whereTraversal = (Traversal.Admin) engine.eval(gremlin, engineBindings);
} catch (ScriptException e) {
throw new IllegalStateException(String.format("Invalid Gremlin filter: %s. %s", gremlin, e.getMessage()), e);
}
for (Bytecode.Instruction instruction : whereTraversal.getBytecode().getInstructions()) {
String operator = instruction.getOperator();
validateOperator(operator);
t.asAdmin().getBytecode().addStep(operator, instruction.getArguments());
}
return t;
}
private void validateOperator(String operator) {
if (INVALID_OPERATORS.contains(operator)) {
throw new IllegalArgumentException(String.format("Invalid operator: '%s'. Gremlin filter cannot contain side effect or mutating step.", operator));
}
}
private static class DatetimeConverter {
private static final DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateTimeParser().withZoneUTC();
public Object call(String args) {
return dateTimeFormatter.parseDateTime(args).toDate();
}
}
}
| 4,248 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/EdgesClient.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.io.GraphElementHandler;
import com.amazonaws.services.neptune.propertygraph.io.result.PGEdgeResult;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType;
import com.amazonaws.services.neptune.util.Activity;
import com.amazonaws.services.neptune.util.Timer;
import org.apache.tinkerpop.gremlin.process.traversal.P;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.structure.Edge;
import org.apache.tinkerpop.gremlin.structure.Element;
import org.apache.tinkerpop.gremlin.structure.T;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.*;
public class EdgesClient implements GraphClient<PGResult> {
private static final Logger logger = LoggerFactory.getLogger(EdgesClient.class);
private final GraphTraversalSource g;
private final boolean tokensOnly;
private final ExportStats stats;
private final FeatureToggles featureToggles;
public EdgesClient(GraphTraversalSource g,
boolean tokensOnly,
ExportStats stats,
FeatureToggles featureToggles) {
this.g = g;
this.tokensOnly = tokensOnly;
this.stats = stats;
this.featureToggles = featureToggles;
}
@Override
public String description() {
return "edge";
}
@Override
public void queryForSchema(GraphElementHandler<Map<?, Object>> handler, Range range, LabelsFilter labelsFilter, GremlinFilters gremlinFilters) {
GraphTraversal<? extends Element, Map<Object, Object>> t1 = tokensOnly ?
traversal(range, labelsFilter).valueMap(true, "~TOKENS-ONLY") :
traversal(range, labelsFilter).valueMap(true);
logger.info(GremlinQueryDebugger.queryAsString(t1));
t1.forEachRemaining(m -> {
try {
handler.handle(m, false);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
@Override
public void queryForValues(GraphElementHandler<PGResult> handler,
Range range,
LabelsFilter labelsFilter,
GremlinFilters gremlinFilters,
GraphElementSchemas graphElementSchemas) {
GraphTraversal<Edge, Edge> t1 = tokensOnly ?
g.withSideEffect("x", new HashMap<String, Object>()).E() :
g.E();
GraphTraversal<? extends Element, ?> t2 = labelsFilter.apply(t1, featureToggles, GraphElementType.edges);
if(!gremlinFilters.filterEdgesEarly()) {
t2 = range.applyRange(t2);
}
GraphTraversal<? extends Element, ?> t3 = filterByPropertyKeys(t2, labelsFilter, graphElementSchemas);
GraphTraversal<? extends Element, ?> t4 = gremlinFilters.applyToEdges(t3);
if(gremlinFilters.filterEdgesEarly()) {
t4 = range.applyRange(t4);
}
GraphTraversal<? extends Element, Map<String, Object>> t5 = t4.
project("~id", labelsFilter.addAdditionalColumnNames("~label", "properties", "~from", "~to")).
by(T.id).
by(T.label).
by(tokensOnly ?
select("x") :
valueMap(labelsFilter.getPropertiesForLabels(graphElementSchemas))
).
by(outV().id()).
by(inV().id());
GraphTraversal<? extends Element, Map<String, Object>> traversal = labelsFilter.addAdditionalColumns(t5);
logger.info(GremlinQueryDebugger.queryAsString(traversal));
traversal.forEachRemaining(p -> {
try {
if (featureToggles.containsFeature(FeatureToggle.Inject_Fault)){
throw new IllegalStateException("Simulated fault in EdgesClient");
}
handler.handle(new PGEdgeResult(p), false);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
private GraphTraversal<? extends Element, ?> filterByPropertyKeys(GraphTraversal<? extends Element, ?> traversal,
LabelsFilter labelsFilter,
GraphElementSchemas graphElementSchemas) {
if (!featureToggles.containsFeature(FeatureToggle.FilterByPropertyKeys)) {
return traversal;
}
return traversal.where(
properties().key().is(P.within(labelsFilter.getPropertiesForLabels(graphElementSchemas))));
}
@Override
public long approxCount(LabelsFilter labelsFilter, RangeConfig rangeConfig, GremlinFilters gremlinFilters) {
if (rangeConfig.approxEdgeCount() > 0) {
return rangeConfig.approxEdgeCount();
}
String description = labelsFilter.description("edges");
System.err.println(String.format("Counting %s...", description));
return Timer.timedActivity(String.format("counting %s", description), (Activity.Callable<Long>) () -> {
GraphTraversal<? extends Element, ?> traversal = traversal(Range.ALL, labelsFilter);
if(gremlinFilters.filterEdgesEarly()) {
traversal = gremlinFilters.applyToEdges(traversal);
}
GraphTraversal<? extends Element, Long> t = traversal.count();
logger.info(GremlinQueryDebugger.queryAsString(t));
Long count = t.next();
stats.setEdgeCount(count);
return count;
});
}
@Override
public Collection<Label> labels(LabelStrategy labelStrategy) {
return labelStrategy.getLabels(g);
}
@Override
public Label getLabelFor(PGResult input, LabelsFilter labelsFilter) {
return labelsFilter.getLabelFor(input);
}
@Override
public void updateStats(Label label) {
stats.incrementEdgeStats(label);
}
private GraphTraversal<? extends Element, ?> traversal(Range range, LabelsFilter labelsFilter) {
return range.applyRange(labelsFilter.apply(g.E(), featureToggles, GraphElementType.edges));
}
}
| 4,249 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/NodesClient.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.io.GraphElementHandler;
import com.amazonaws.services.neptune.propertygraph.io.result.ExportPGNodeResult;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType;
import com.amazonaws.services.neptune.util.Activity;
import com.amazonaws.services.neptune.util.Timer;
import org.apache.tinkerpop.gremlin.process.traversal.P;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.structure.Element;
import org.apache.tinkerpop.gremlin.structure.T;
import org.apache.tinkerpop.gremlin.structure.Vertex;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.*;
public class NodesClient implements GraphClient<PGResult> {
private static final Logger logger = LoggerFactory.getLogger(NodesClient.class);
private final GraphTraversalSource g;
private final boolean tokensOnly;
private final ExportStats stats;
private final FeatureToggles featureToggles;
public NodesClient(GraphTraversalSource g,
boolean tokensOnly,
ExportStats stats,
FeatureToggles featureToggles) {
this.g = g;
this.tokensOnly = tokensOnly;
this.stats = stats;
this.featureToggles = featureToggles;
}
@Override
public String description() {
return "node";
}
@Override
public void queryForSchema(GraphElementHandler<Map<?, Object>> handler,
Range range,
LabelsFilter labelsFilter,
GremlinFilters gremlinFilters) {
GraphTraversal<? extends Element, Map<Object, Object>> t = tokensOnly ?
createTraversal(range, labelsFilter, gremlinFilters).valueMap(true, "~TOKENS-ONLY") :
createTraversal(range, labelsFilter, gremlinFilters).valueMap(true);
logger.info(GremlinQueryDebugger.queryAsString(t));
t.forEachRemaining(m -> {
try {
handler.handle(m, false);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
@Override
public void queryForValues(GraphElementHandler<PGResult> handler,
Range range,
LabelsFilter labelsFilter,
GremlinFilters gremlinFilters,
GraphElementSchemas graphElementSchemas) {
GraphTraversal<? extends Element, ?> t1 = createTraversal(range, labelsFilter, gremlinFilters);
GraphTraversal<? extends Element, ?> t2 = filterByPropertyKeys(t1, labelsFilter, graphElementSchemas);
GraphTraversal<? extends Element, Map<String, Object>> t3 = t2.
project("~id", labelsFilter.addAdditionalColumnNames("~label", "properties")).
by(T.id).
by(label().fold()).
by(tokensOnly ?
select("x") :
valueMap(labelsFilter.getPropertiesForLabels(graphElementSchemas))
);
GraphTraversal<? extends Element, Map<String, Object>> traversal = labelsFilter.addAdditionalColumns(t3);
logger.info(GremlinQueryDebugger.queryAsString(traversal));
traversal.forEachRemaining(m -> {
try {
if (featureToggles.containsFeature(FeatureToggle.Inject_Fault)){
throw new IllegalStateException("Simulated fault in NodesClient");
}
handler.handle(new ExportPGNodeResult(m), false);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
private GraphTraversal<? extends Element, ?> filterByPropertyKeys(GraphTraversal<? extends Element, ?> traversal,
LabelsFilter labelsFilter,
GraphElementSchemas graphElementSchemas) {
if (!featureToggles.containsFeature(FeatureToggle.FilterByPropertyKeys)) {
return traversal;
}
return traversal.where(
properties().key().is(P.within(labelsFilter.getPropertiesForLabels(graphElementSchemas))));
}
@Override
public long approxCount(LabelsFilter labelsFilter, RangeConfig rangeConfig, GremlinFilters gremlinFilters) {
if (rangeConfig.approxNodeCount() > 0) {
return rangeConfig.approxNodeCount();
}
String description = labelsFilter.description("nodes");
System.err.println(String.format("Counting %s...", description));
return Timer.timedActivity(String.format("counting %s", description), (Activity.Callable<Long>) () ->
{
GraphTraversal<? extends Element, Long> t = createTraversal(Range.ALL, labelsFilter, gremlinFilters).count();
logger.info(GremlinQueryDebugger.queryAsString(t));
Long count = t.next();
stats.setNodeCount(count);
return count;
});
}
@Override
public Collection<Label> labels(LabelStrategy labelStrategy) {
return labelStrategy.getLabels(g);
}
@Override
public Label getLabelFor(PGResult input, LabelsFilter labelsFilter) {
return labelsFilter.getLabelFor(input);
}
@Override
public void updateStats(Label label) {
stats.incrementNodeStats(label);
}
private GraphTraversal<? extends Element, ?> createTraversal(Range range, LabelsFilter labelsFilter, GremlinFilters gremlinFilters) {
GraphTraversal<Vertex, Vertex> t = tokensOnly ?
g.withSideEffect("x", new HashMap<String, Object>()).V() :
g.V();
return range.applyRange(gremlinFilters.applyToNodes( labelsFilter.apply(t, featureToggles, GraphElementType.nodes)));
}
}
| 4,250 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/NeptuneGremlinClient.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazon.neptune.gremlin.driver.sigv4.ChainedSigV4PropertiesProvider;
import com.amazonaws.neptune.auth.NeptuneNettyHttpSigV4Signer;
import com.amazonaws.neptune.auth.NeptuneSigV4SignerException;
import com.amazonaws.services.neptune.cluster.Cluster;
import com.amazonaws.services.neptune.cluster.ConcurrencyConfig;
import com.amazonaws.services.neptune.cluster.ConnectionConfig;
import com.amazonaws.services.neptune.propertygraph.io.SerializationConfig;
import org.apache.tinkerpop.gremlin.driver.*;
import org.apache.tinkerpop.gremlin.driver.Cluster.Builder;
import org.apache.tinkerpop.gremlin.driver.remote.DriverRemoteConnection;
import org.apache.tinkerpop.gremlin.process.traversal.AnonymousTraversalSource;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class NeptuneGremlinClient implements AutoCloseable {
public static final int DEFAULT_BATCH_SIZE = 64;
private static final Logger logger = LoggerFactory.getLogger(NeptuneGremlinClient.class);
public static NeptuneGremlinClient create(Cluster cluster, SerializationConfig serializationConfig) {
ConnectionConfig connectionConfig = cluster.connectionConfig();
ConcurrencyConfig concurrencyConfig = cluster.concurrencyConfig();
if (!connectionConfig.useSsl()){
logger.warn("SSL has been disabled");
}
Builder builder = org.apache.tinkerpop.gremlin.driver.Cluster.build()
.port(connectionConfig.port())
.enableSsl(connectionConfig.useSsl())
.maxWaitForConnection(10000);
builder = serializationConfig.apply(builder);
if (connectionConfig.useIamAuth()) {
builder = configureIamSigning(builder, connectionConfig);
}
for (String endpoint : connectionConfig.endpoints()) {
builder = builder.addContactPoint(endpoint);
}
int numberOfEndpoints = connectionConfig.endpoints().size();
return new NeptuneGremlinClient(concurrencyConfig.applyTo(builder, numberOfEndpoints).create());
}
protected static Builder configureIamSigning (Builder builder, ConnectionConfig connectionConfig) {
if (connectionConfig.isDirectConnection()) {
builder = builder.handshakeInterceptor( r ->
{
try {
NeptuneNettyHttpSigV4Signer sigV4Signer =
new NeptuneNettyHttpSigV4Signer(
new ChainedSigV4PropertiesProvider().getSigV4Properties().getServiceRegion(),
connectionConfig.getCredentialsProvider());
sigV4Signer.signRequest(r);
} catch (NeptuneSigV4SignerException e) {
throw new RuntimeException("Exception occurred while signing the request", e);
}
return r;
}
);
} else {
builder = builder
// use the JAAS_ENTRY auth property to pass Host header info to the channelizer
.authProperties(new AuthProperties().with(AuthProperties.Property.JAAS_ENTRY, connectionConfig.handshakeRequestConfig().value()))
.channelizer(LBAwareSigV4WebSocketChannelizer.class);
}
return builder;
}
private final org.apache.tinkerpop.gremlin.driver.Cluster cluster;
private NeptuneGremlinClient(org.apache.tinkerpop.gremlin.driver.Cluster cluster) {
this.cluster = cluster;
}
public GraphTraversalSource newTraversalSource() {
return AnonymousTraversalSource.traversal().withRemote(DriverRemoteConnection.using(cluster));
}
public QueryClient queryClient() {
return new QueryClient(cluster.connect());
}
@Override
public void close() throws Exception {
if (cluster != null && !cluster.isClosed() && !cluster.isClosing()) {
cluster.close();
}
}
public static class QueryClient implements AutoCloseable {
private final Client client;
QueryClient(Client client) {
this.client = client;
}
public ResultSet submit(String gremlin, Long timeoutMillis) {
if (timeoutMillis != null){
return client.submit(gremlin, RequestOptions.build().timeout(timeoutMillis).create());
} else {
return client.submit(gremlin);
}
}
@Override
public void close() throws Exception {
client.close();
}
}
}
| 4,251 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/NamedQueries.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import java.util.*;
public class NamedQueries {
public static NamedQueries fromJson(JsonNode json) {
String name = json.path("name").textValue();
if (json.has("query")){
String query = json.path("query").textValue();
return new NamedQueries(name, Collections.singletonList(query));
} else {
ArrayNode queries = (ArrayNode) json.path("queries");
List<String> collection = new ArrayList<>();
for (JsonNode query : queries) {
collection.add(query.textValue());
}
return new NamedQueries(name, collection);
}
}
private final String name;
private final Collection<String> queries;
public NamedQueries(String name, Collection<String> queries) {
this.name = name;
this.queries = queries;
}
public String name() {
return name;
}
public Collection<String> queries() {
return queries;
}
public void addTo(Collection<NamedQuery> namedQueries) {
for (String query : queries) {
namedQueries.add(new NamedQuery(name, query));
}
}
public ArrayNode toJson() {
ArrayNode json = JsonNodeFactory.instance.arrayNode();
for (String query : queries) {
json.add(query);
}
return json;
}
}
| 4,252 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/EdgeLabelStrategy.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.structure.Edge;
import org.apache.tinkerpop.gremlin.structure.Element;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import static org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.__.*;
public enum EdgeLabelStrategy implements LabelStrategy {
edgeLabelsOnly {
@Override
public Collection<Label> getLabels(GraphTraversalSource g) {
// Using dedup can cause MemoryLimitExceededException on large datasets, so do the dedup in the set
GraphTraversal<Edge, String> traversal = g.E().label();
logger.info(GremlinQueryDebugger.queryAsString(traversal));
Set<Label> labels = new HashSet<>();
traversal.forEachRemaining(r -> labels.add(new Label(r)));
return labels;
}
@Override
public Label getLabelFor(Map<String, Object> input) {
return new Label(input.get("~label").toString());
}
@Override
public Label getLabelFor(PGResult input) {
return new Label(input.getLabel());
}
@Override
public String[] additionalColumns(String... columns) {
return columns;
}
@Override
public <T> GraphTraversal<? extends Element, T> addAdditionalColumns(GraphTraversal<? extends Element, T> t) {
return t;
}
},
edgeAndVertexLabels {
@Override
public Collection<Label> getLabels(GraphTraversalSource g) {
// Using dedup can cause MemoryLimitExceededException on large datasets, so do the dedup in the set
GraphTraversal<Edge, Map<String, Object>> traversal = g.E()
.project("~fromLabels", "~label", "~toLabels")
.by(outV().label().fold())
.by(label())
.by(inV().label().fold());
logger.info(GremlinQueryDebugger.queryAsString(traversal));
Set<Label> labels = new HashSet<>();
traversal.forEachRemaining(r -> {
labels.add(getLabelFor(r));
});
return labels;
}
@Override
public Label getLabelFor(Map<String, Object> input) {
@SuppressWarnings("unchecked")
Collection<String> fromLabels = (Collection<String>) input.get("~fromLabels");
String label = String.valueOf(input.get("~label"));
@SuppressWarnings("unchecked")
Collection<String> toLabels = (Collection<String>) input.get("~toLabels");
return new Label(label, fromLabels, toLabels);
}
@Override
public Label getLabelFor(PGResult input) {
Collection<String> fromLabels = input.getFromLabels();
String label = input.getLabel().get(0);
Collection<String> toLabels = input.getToLabels();
return new Label(label, fromLabels, toLabels);
}
@Override
public String[] additionalColumns(String... columns) {
return ArrayUtils.addAll(columns, "~fromLabels", "~toLabels");
}
@Override
public <T> GraphTraversal<? extends Element, T> addAdditionalColumns(GraphTraversal<? extends Element, T> t) {
return t.by(outV().label().fold()).by(inV().label().fold());
}
};
private static final Logger logger = LoggerFactory.getLogger(EdgeLabelStrategy.class);
}
| 4,253 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/SchemaSamplingSpecification.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.propertygraph.schema.CreateGraphSchemaCommand;
import com.amazonaws.services.neptune.propertygraph.schema.CreateGraphSchemaFromSample;
import com.amazonaws.services.neptune.propertygraph.schema.CreateGraphSchemaFromScan;
import com.amazonaws.services.neptune.propertygraph.schema.ExportSpecification;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import java.util.Collection;
public class SchemaSamplingSpecification {
private final boolean sample;
private final long sampleSize;
public SchemaSamplingSpecification(boolean sample, long sampleSize) {
this.sample = sample;
this.sampleSize = sampleSize;
}
public CreateGraphSchemaCommand createSchemaCommand(Collection<ExportSpecification> exportSpecifications,
GraphTraversalSource g) {
if (sample) {
return new CreateGraphSchemaFromSample(exportSpecifications, g, sampleSize);
} else {
return new CreateGraphSchemaFromScan(exportSpecifications, g);
}
}
}
| 4,254 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/AllLabels.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import com.amazonaws.services.neptune.propertygraph.schema.PropertySchema;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.structure.Element;
import java.util.*;
public class AllLabels implements LabelsFilter {
private final LabelStrategy labelStrategy;
public AllLabels(LabelStrategy labelStrategy) {
this.labelStrategy = labelStrategy;
}
@Override
public GraphTraversal<? extends Element, ?> apply(GraphTraversal<? extends Element, ?> traversal, FeatureToggles featureToggles, GraphElementType graphElementType) {
return traversal;
}
@Override
public Collection<Label> getLabelsUsing(GraphClient<?> graphClient) {
return graphClient.labels(labelStrategy);
}
@Override
public String[] getPropertiesForLabels(GraphElementSchemas graphElementSchemas) {
Set<String> properties = new HashSet<>();
Iterable<Label> labels = graphElementSchemas.labels();
for (Label label : labels) {
LabelSchema labelSchema = graphElementSchemas.getSchemaFor(label);
for (PropertySchema propertySchema : labelSchema.propertySchemas()) {
properties.add(propertySchema.nameWithoutDataType());
}
}
return properties.toArray(new String[]{});
}
@Override
public Label getLabelFor(Map<String, Object> input) {
return labelStrategy.getLabelFor(input);
}
@Override
public Label getLabelFor(PGResult input) {
return labelStrategy.getLabelFor(input);
}
@Override
public String[] addAdditionalColumnNames(String... columns) {
return labelStrategy.additionalColumns(columns);
}
@Override
public <T> GraphTraversal<? extends Element, T> addAdditionalColumns(GraphTraversal<? extends Element, T> t) {
return labelStrategy.addAdditionalColumns(t);
}
@Override
public LabelsFilter filterFor(Label label) {
return new SpecifiedLabels(Collections.singletonList(label), labelStrategy);
}
@Override
public LabelsFilter intersection(Collection<Label> labels) {
return new SpecifiedLabels(labels, labelStrategy);
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public String description(String element) {
return String.format("all %s", element);
}
@Override
public Collection<LabelsFilter> split() {
return Collections.singletonList(this);
}
}
| 4,255 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/NodeLabelStrategy.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.structure.Element;
import org.apache.tinkerpop.gremlin.structure.Vertex;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
public enum NodeLabelStrategy implements LabelStrategy {
nodeLabelsOnly {
@Override
public Collection<Label> getLabels(GraphTraversalSource g) {
// Using dedup can cause MemoryLimitExceededException on large datasets, so do the dedup in the set
GraphTraversal<Vertex, String> traversal = g.V().label();
logger.info(GremlinQueryDebugger.queryAsString(traversal));
Set<Label> labels = new HashSet<>();
traversal.forEachRemaining(r -> labels.add(new Label(r)));
return labels;
}
@Override
public Label getLabelFor(Map<String, Object> input) {
@SuppressWarnings("unchecked")
List<String> labels = (List<String>) input.get("~label");
labels = Label.fixLabelsIssue(labels);
return new Label(labels);
}
@Override
public Label getLabelFor(PGResult input) {
List<String> labels = input.getLabel();
labels = Label.fixLabelsIssue(labels);
return new Label(labels);
}
@Override
public String[] additionalColumns(String... columns) {
return columns;
}
@Override
public <T> GraphTraversal<? extends Element, T> addAdditionalColumns(GraphTraversal<? extends Element, T> t) {
return t;
}
};
private static final Logger logger = LoggerFactory.getLogger(NodeLabelStrategy.class);
}
| 4,256 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/ExportStats.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.propertygraph.io.Jsonizable;
import com.amazonaws.services.neptune.propertygraph.schema.*;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
public class ExportStats implements Jsonizable<GraphSchema> {
private long nodeCount = 0;
private long edgeCount = 0;
private final ConcurrentHashMap<Label, LabelStats> nodeStats = new ConcurrentHashMap<>();
private final ConcurrentHashMap<Label, LabelStats> edgeStats = new ConcurrentHashMap<>();
public void setNodeCount(long value) {
nodeCount = value;
}
public void setEdgeCount(long value) {
edgeCount = value;
}
public void incrementNodeStats(Label label) {
nodeStats.computeIfAbsent(label, LabelStats::new).increment();
}
public void incrementEdgeStats(Label label) {
edgeStats.computeIfAbsent(label, LabelStats::new).increment();
}
public String formatStats(GraphSchema graphSchema) {
StringBuilder sb = new StringBuilder();
sb.append("Source:").append(System.lineSeparator());
sb.append(" Nodes: ").append(nodeCount).append(System.lineSeparator());
sb.append(" Edges: ").append(edgeCount).append(System.lineSeparator());
sb.append("Export:").append(System.lineSeparator());
sb.append(" Nodes: ").append(nodeStats.values().stream().map(LabelStats::count).reduce(0L, Long::sum)).append(System.lineSeparator());
sb.append(" Edges: ").append(edgeStats.values().stream().map(LabelStats::count).reduce(0L, Long::sum)).append(System.lineSeparator());
sb.append(" Properties: ").append(getNumberOfProperties(graphSchema)).append(System.lineSeparator());
sb.append("Details:").append(System.lineSeparator());
sb.append(" Nodes: ").append(System.lineSeparator());
GraphElementSchemas nodeSchemas = graphSchema.graphElementSchemasFor(GraphElementType.nodes);
for (Map.Entry<Label, LabelStats> entry : nodeStats.entrySet()) {
Label label = entry.getKey();
LabelStats labelStats = entry.getValue();
LabelSchema labelSchema = nodeSchemas.getSchemaFor(label);
sb.append(" ").append(labelStats.toString()).append(System.lineSeparator());
for (PropertySchemaStats stats : labelSchema.propertySchemaStats()) {
sb.append(" |_ ").append(stats.toString()).append(System.lineSeparator());
}
}
sb.append(" Edges: ").append(System.lineSeparator());
GraphElementSchemas edgeSchemas = graphSchema.graphElementSchemasFor(GraphElementType.edges);
for (Map.Entry<Label, LabelStats> entry : edgeStats.entrySet()) {
Label label = entry.getKey();
LabelStats labelStats = entry.getValue();
LabelSchema labelSchema = edgeSchemas.getSchemaFor(label);
sb.append(" ").append(labelStats.toString()).append(System.lineSeparator());
for (PropertySchemaStats stats : labelSchema.propertySchemaStats()) {
sb.append(" |_ ").append(stats.toString()).append(System.lineSeparator());
}
}
return sb.toString();
}
private Long getNumberOfProperties(GraphSchema graphSchema) {
return graphSchema.graphElementSchemas().stream()
.map(s -> s.labelSchemas().stream()
.map(l -> l.propertySchemaStats().stream()
.map(p -> (long) p.observationCount()).reduce(0L, Long::sum))
.reduce(0L, Long::sum))
.reduce(0L, Long::sum);
}
public void addTo(ObjectNode rootNode, GraphSchema graphSchema) {
ObjectNode statsNode = JsonNodeFactory.instance.objectNode();
rootNode.set("stats", statsNode);
statsNode.put("nodes", nodeStats.values().stream().map(LabelStats::count).reduce(0L, Long::sum));
statsNode.put("edges", edgeStats.values().stream().map(LabelStats::count).reduce(0L, Long::sum));
statsNode.put("properties", getNumberOfProperties(graphSchema));
ObjectNode detailsNode = JsonNodeFactory.instance.objectNode();
statsNode.set("details", detailsNode);
ArrayNode nodesArrayNode = JsonNodeFactory.instance.arrayNode();
detailsNode.set("nodes", nodesArrayNode);
GraphElementSchemas nodeSchemas = graphSchema.graphElementSchemasFor(GraphElementType.nodes);
for (Map.Entry<Label, LabelStats> entry : nodeStats.entrySet()) {
Label label = entry.getKey();
LabelStats labelStats = entry.getValue();
LabelSchema labelSchema = nodeSchemas.getSchemaFor(label);
ObjectNode nodeNode = JsonNodeFactory.instance.objectNode();
nodesArrayNode.add(nodeNode);
nodeNode.put("description", label.fullyQualifiedLabel());
nodeNode.set("labels", arrayNodeFromList(label.labels()));
nodeNode.put("count", labelStats.count());
ArrayNode propertiesArray = JsonNodeFactory.instance.arrayNode();
for (PropertySchemaStats stats : labelSchema.propertySchemaStats()) {
PropertySchema propertySchema = labelSchema.getPropertySchema(stats.property());
ObjectNode propertyNode = JsonNodeFactory.instance.objectNode();
propertyNode.put("name", stats.property().toString());
propertyNode.put("count", stats.observationCount());
propertyNode.put("numberOfRecords", stats.numberValuesCount());
propertyNode.put("minCardinality", stats.minCardinality());
propertyNode.put("maxCardinality", stats.maxCardinality());
propertyNode.put("isNullable", propertySchema.isNullable());
ObjectNode dataTypesNode = JsonNodeFactory.instance.objectNode();
ArrayNode dataTypeCountsNode = JsonNodeFactory.instance.arrayNode();
for (Map.Entry<DataType, Integer> e : stats.dataTypeCounts().entrySet()) {
ObjectNode n = JsonNodeFactory.instance.objectNode();
n.put(e.getKey().name(), e.getValue());
dataTypeCountsNode.add(n);
}
dataTypesNode.put("inferred", propertySchema.dataType().name());
dataTypesNode.set("counts", dataTypeCountsNode);
propertyNode.set("dataTypes", dataTypesNode);
propertiesArray.add(propertyNode);
}
nodeNode.set("properties", propertiesArray);
}
ArrayNode edgesArrayNode = JsonNodeFactory.instance.arrayNode();
detailsNode.set("edges", edgesArrayNode);
GraphElementSchemas edgeSchemas = graphSchema.graphElementSchemasFor(GraphElementType.edges);
for (Map.Entry<Label, LabelStats> entry : edgeStats.entrySet()) {
Label label = entry.getKey();
LabelStats labelStats = entry.getValue();
LabelSchema labelSchema = edgeSchemas.getSchemaFor(label);
ObjectNode edgeNode = JsonNodeFactory.instance.objectNode();
edgesArrayNode.add(edgeNode);
edgeNode.put("description", label.fullyQualifiedLabel());
ObjectNode labelsNode = JsonNodeFactory.instance.objectNode();
if (label.hasFromLabels()) {
labelsNode.set("from", arrayNodeFromList(label.fromLabels().labels()));
}
labelsNode.set("edge", arrayNodeFromList(label.labels()));
if (label.hasToLabels()) {
labelsNode.set("to", arrayNodeFromList(label.toLabels().labels()));
}
edgeNode.set("labels", labelsNode);
edgeNode.put("count", labelStats.count());
ArrayNode propertiesArray = JsonNodeFactory.instance.arrayNode();
for (PropertySchemaStats stats : labelSchema.propertySchemaStats()) {
PropertySchema propertySchema = labelSchema.getPropertySchema(stats.property());
ObjectNode propertyNode = JsonNodeFactory.instance.objectNode();
propertyNode.put("name", stats.property().toString());
propertyNode.put("count", stats.observationCount());
propertyNode.put("numberOfRecords", stats.numberValuesCount());
propertyNode.put("minCardinality", stats.minCardinality());
propertyNode.put("maxCardinality", stats.maxCardinality());
propertyNode.put("isNullable", propertySchema.isNullable());
ObjectNode dataTypesNode = JsonNodeFactory.instance.objectNode();
ArrayNode dataTypeCountsNode = JsonNodeFactory.instance.arrayNode();
for (Map.Entry<DataType, Integer> e : stats.dataTypeCounts().entrySet()) {
ObjectNode n = JsonNodeFactory.instance.objectNode();
n.put(e.getKey().name(), e.getValue());
dataTypeCountsNode.add(n);
}
dataTypesNode.put("inferred", propertySchema.dataType().name());
dataTypesNode.set("counts", dataTypeCountsNode);
propertyNode.set("dataTypes", dataTypesNode);
propertiesArray.add(propertyNode);
}
edgeNode.set("properties", propertiesArray);
}
}
private ArrayNode arrayNodeFromList(Collection<String> c) {
ArrayNode arrayNode = JsonNodeFactory.instance.arrayNode();
for (String s : c) {
arrayNode.add(s);
}
return arrayNode;
}
@Override
public JsonNode toJson(GraphSchema o) {
ObjectNode json = JsonNodeFactory.instance.objectNode();
addTo(json, o);
return json;
}
private static class LabelStats {
private final Label label;
private final AtomicLong count = new AtomicLong(0);
private LabelStats(Label label) {
this.label = label;
}
public void increment() {
count.incrementAndGet();
}
public long count() {
return count.get();
}
public Label label() {
return label;
}
@Override
public String toString() {
return String.format("%s: %s", label.fullyQualifiedLabel(), count.get());
}
}
}
| 4,257 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/RangeFactory.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.cluster.ConcurrencyConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicLong;
import static java.lang.Math.min;
public class RangeFactory {
private static final Logger logger = LoggerFactory.getLogger(RangeFactory.class);
public static RangeFactory create(GraphClient<?> graphClient,
LabelsFilter labelsFilter,
GremlinFilters gremlinFilters,
RangeConfig rangeConfig,
ConcurrencyConfig concurrencyConfig) {
String description = labelsFilter.description(String.format("%ss", graphClient.description()));
logger.info("Calculating ranges for {}", description);
long estimatedNumberOfItemsInGraph = graphClient.approxCount(labelsFilter, rangeConfig, gremlinFilters);
int effectiveConcurrency = estimatedNumberOfItemsInGraph < 1000 ?
1 :
concurrencyConfig.concurrency();
long rangeSize = concurrencyConfig.isUnboundedParallelExecution(rangeConfig) ?
(estimatedNumberOfItemsInGraph / effectiveConcurrency) + 1:
rangeConfig.rangeSize();
logger.info("Estimated number of {} to export: {}, Range size: {}, Effective concurrency: {}",
description,
estimatedNumberOfItemsInGraph,
rangeSize,
effectiveConcurrency);
return new RangeFactory(
rangeSize,
rangeConfig.numberOfItemsToExport(),
rangeConfig.numberOfItemsToSkip(),
estimatedNumberOfItemsInGraph,
effectiveConcurrency);
}
private final long rangeSize;
private final boolean exportAll;
private final int concurrency;
private final long rangeUpperBound;
private final AtomicLong currentEnd;
private final long numberOfItemsToExport;
private RangeFactory(long rangeSize,
long limit,
long skip,
long estimatedNumberOfItemsInGraph,
int concurrency) {
this.rangeSize = rangeSize;
this.exportAll = limit == Long.MAX_VALUE;
this.concurrency = concurrency;
if (exportAll){
this.rangeUpperBound = estimatedNumberOfItemsInGraph;
this.numberOfItemsToExport = estimatedNumberOfItemsInGraph - skip;
} else {
this.rangeUpperBound = limit + skip;
this.numberOfItemsToExport = limit;
}
this.currentEnd = new AtomicLong(skip);
}
public Range nextRange() {
if (isExhausted()){
return new Range(-1, -1);
}
long proposedEnd = currentEnd.accumulateAndGet(rangeSize, (left, right) -> left + right);
long start = min(proposedEnd - rangeSize, rangeUpperBound);
long actualEnd = min(proposedEnd, rangeUpperBound);
if ((proposedEnd >= rangeUpperBound) && exportAll){
actualEnd = -1;
}
return new Range(start, actualEnd);
}
public long numberOfItemsToExport() {
return numberOfItemsToExport;
}
public boolean isExhausted() {
long end = currentEnd.get();
return end == -1 || end >= rangeUpperBound;
}
public int concurrency() {
return concurrency;
}
}
| 4,258 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/Range.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.structure.Element;
import static java.lang.Math.abs;
public class Range {
public static final Range ALL = new Range(0, -1);
private final long start;
private final long end;
public Range(long start, long end) {
this.start = start;
this.end = end;
}
public GraphTraversal<? extends Element, ?> applyRange(GraphTraversal<? extends Element, ?> traversal) {
if (isAll()) {
return traversal;
} else {
return traversal.range(start, end);
}
}
public long difference() {
return end - start;
}
public boolean isEmpty() {
return start == -1 && end == -1;
}
public boolean isAll(){
return start == 0 && end == -1;
}
@Override
public String toString() {
return "range(" + start + ", " + end + ")";
}
public boolean sizeExceeds(long value) {
if (isEmpty()){
return false;
}
if (isAll()){
return true;
}
return value < (end - start);
}
}
| 4,259 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/Scope.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.schema.ExportSpecification;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import com.amazonaws.services.neptune.propertygraph.schema.TokensOnly;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
public enum Scope {
all {
@Override
public Collection<ExportSpecification> exportSpecifications(GraphSchema graphSchema,
Collection<Label> nodeLabels,
Collection<Label> edgeLabels,
GremlinFilters gremlinFilters,
TokensOnly tokensOnly,
EdgeLabelStrategy edgeLabelStrategy,
ExportStats stats,
FeatureToggles featureToggles) {
Collection<ExportSpecification> results = new ArrayList<>();
if (graphSchema.isEmpty()) {
results.add(new ExportSpecification(
GraphElementType.nodes,
Scope.labelsFilter(nodeLabels, NodeLabelStrategy.nodeLabelsOnly),
gremlinFilters,
stats,
tokensOnly.nodeTokensOnly(),
featureToggles));
results.add(new ExportSpecification(
GraphElementType.edges,
Scope.labelsFilter(edgeLabels, edgeLabelStrategy),
gremlinFilters,
stats,
tokensOnly.edgeTokensOnly(),
featureToggles));
} else {
if (graphSchema.hasNodeSchemas()) {
LabelsFilter labelsFilter = Scope.labelsFilter(nodeLabels, NodeLabelStrategy.nodeLabelsOnly)
.intersection(graphSchema.graphElementSchemasFor(GraphElementType.nodes).labels());
if (!labelsFilter.isEmpty()) {
results.add(new ExportSpecification(
GraphElementType.nodes,
labelsFilter,
gremlinFilters, stats,
tokensOnly.nodeTokensOnly(),
featureToggles));
}
}
if (graphSchema.hasEdgeSchemas()) {
LabelsFilter labelsFilter = Scope.labelsFilter(edgeLabels, edgeLabelStrategy)
.intersection(graphSchema.graphElementSchemasFor(GraphElementType.edges).labels());
if (!labelsFilter.isEmpty()) {
results.add(new ExportSpecification(
GraphElementType.edges,
labelsFilter,
gremlinFilters, stats,
tokensOnly.edgeTokensOnly(),
featureToggles));
}
}
}
return results;
}
},
nodes {
@Override
public Collection<ExportSpecification> exportSpecifications(GraphSchema graphSchema,
Collection<Label> nodeLabels,
Collection<Label> edgeLabels,
GremlinFilters gremlinFilters,
TokensOnly tokensOnly,
EdgeLabelStrategy edgeLabelStrategy,
ExportStats stats,
FeatureToggles featureToggles) {
if (graphSchema.isEmpty()) {
return Collections.singletonList(
new ExportSpecification(
GraphElementType.nodes,
Scope.labelsFilter(nodeLabels, NodeLabelStrategy.nodeLabelsOnly),
gremlinFilters, stats, tokensOnly.nodeTokensOnly(),
featureToggles)
);
} else if (graphSchema.hasNodeSchemas()) {
LabelsFilter labelsFilter = Scope.labelsFilter(nodeLabels, NodeLabelStrategy.nodeLabelsOnly)
.intersection(graphSchema.graphElementSchemasFor(GraphElementType.nodes).labels());
if (!labelsFilter.isEmpty()) {
return Collections.singletonList(
new ExportSpecification(
GraphElementType.nodes,
labelsFilter,
gremlinFilters,
stats,
tokensOnly.nodeTokensOnly(),
featureToggles)
);
} else {
return Collections.emptyList();
}
} else {
return Collections.emptyList();
}
}
},
edges {
@Override
public Collection<ExportSpecification> exportSpecifications(GraphSchema graphSchema,
Collection<Label> nodeLabels,
Collection<Label> edgeLabels,
GremlinFilters gremlinFilters,
TokensOnly tokensOnly,
EdgeLabelStrategy edgeLabelStrategy,
ExportStats stats,
FeatureToggles featureToggles) {
if (graphSchema.isEmpty()) {
return Collections.singletonList(
new ExportSpecification(
GraphElementType.edges,
Scope.labelsFilter(edgeLabels, edgeLabelStrategy),
gremlinFilters,
stats,
tokensOnly.edgeTokensOnly(),
featureToggles)
);
} else if (graphSchema.hasEdgeSchemas()) {
LabelsFilter labelsFilter = Scope.labelsFilter(edgeLabels, edgeLabelStrategy)
.intersection(graphSchema.graphElementSchemasFor(GraphElementType.edges).labels());
if (!labelsFilter.isEmpty()) {
return Collections.singletonList(
new ExportSpecification(
GraphElementType.edges,
labelsFilter,
gremlinFilters,
stats,
tokensOnly.edgeTokensOnly(),
featureToggles)
);
} else {
return Collections.emptyList();
}
} else {
return Collections.emptyList();
}
}
};
private static LabelsFilter labelsFilter(Collection<Label> labels, LabelStrategy labelStrategy) {
if (labels.isEmpty()) {
return new AllLabels(labelStrategy);
}
return new SpecifiedLabels(labels, labelStrategy);
}
public abstract Collection<ExportSpecification> exportSpecifications(
GraphSchema graphSchema,
Collection<Label> nodeLabels,
Collection<Label> edgeLabels,
GremlinFilters gremlinFilters,
TokensOnly tokensOnly,
EdgeLabelStrategy edgeLabelStrategy,
ExportStats stats,
FeatureToggles featureToggles);
}
| 4,260 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/RangeConfig.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
public class RangeConfig {
private final long rangeSize;
private final long numberOfItemsToSkip;
private final long numberOfItemsToExport;
private final long approxNodeCount;
private final long approxEdgeCount;
public RangeConfig(long rangeSize,
long numberOfItemsToSkip,
long numberOfItemsToExport,
long approxNodeCount,
long approxEdgeCount) {
this.rangeSize = rangeSize;
this.numberOfItemsToSkip = numberOfItemsToSkip;
this.numberOfItemsToExport = numberOfItemsToExport;
this.approxNodeCount = approxNodeCount;
this.approxEdgeCount = approxEdgeCount;
}
public long rangeSize() {
return rangeSize;
}
public long numberOfItemsToSkip() {
return numberOfItemsToSkip;
}
public long numberOfItemsToExport() {
return numberOfItemsToExport;
}
public long approxNodeCount() {
return approxNodeCount;
}
public long approxEdgeCount() {
return approxEdgeCount;
}
}
| 4,261 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/TokenPrefix.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
public class TokenPrefix {
private final String prefix;
public TokenPrefix() {
this("~");
}
public TokenPrefix(String prefix) {
this.prefix = prefix;
}
public String format(String s) {
return String.format("%s%s", prefix, s);
}
}
| 4,262 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/LabelStrategy.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversal;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.apache.tinkerpop.gremlin.structure.Element;
import java.util.Collection;
import java.util.Map;
public interface LabelStrategy {
Collection<Label> getLabels(GraphTraversalSource g);
Label getLabelFor(Map<String, Object> input);
Label getLabelFor(PGResult input);
String[] additionalColumns(String... columns);
<T> GraphTraversal<? extends Element, T> addAdditionalColumns(GraphTraversal<? extends Element, T> t);
}
| 4,263 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/PropertyGraphTargetConfig.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.cluster.ConcurrencyConfig;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.io.KinesisConfig;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.io.Target;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import com.amazonaws.services.neptune.propertygraph.schema.MasterLabelSchemas;
import java.io.IOException;
import java.nio.file.Path;
import java.util.function.Supplier;
public class PropertyGraphTargetConfig {
private final Directories directories;
private final KinesisConfig kinesisConfig;
private final PrinterOptions printerOptions;
private final boolean inferSchema;
private final PropertyGraphExportFormat format;
private final Target output;
private final boolean mergeFiles;
private final boolean perLabelDirectories;
public PropertyGraphTargetConfig(Directories directories,
KinesisConfig kinesisConfig,
PrinterOptions printerOptions,
PropertyGraphExportFormat format,
Target output,
boolean mergeFiles,
boolean perLabelDirectories,
boolean inferSchema) {
this.directories = directories;
this.kinesisConfig = kinesisConfig;
this.printerOptions = printerOptions;
this.inferSchema = inferSchema;
this.format = format;
this.output = output;
this.mergeFiles = mergeFiles;
this.perLabelDirectories = perLabelDirectories;
}
public Target output() {
return output;
}
public PropertyGraphExportFormat format() {
return format;
}
public boolean mergeFiles() {
return mergeFiles;
}
public PropertyGraphPrinter createPrinterForQueries(String name, LabelSchema labelSchema) throws IOException {
return createPrinterForQueries(() -> directories.createQueryResultsFilePath(labelSchema.label().labelsAsString(), name, format), labelSchema);
}
private PropertyGraphPrinter createPrinterForQueries(Supplier<Path> pathSupplier, LabelSchema labelSchema) throws IOException {
OutputWriter outputWriter = output.createOutputWriter(pathSupplier, kinesisConfig);
return createPrinter(labelSchema, outputWriter);
}
public PropertyGraphPrinter createPrinterForEdges(String name, LabelSchema labelSchema) throws IOException {
return createPrinterForEdges(() -> directories.createEdgesFilePath(name, format, labelSchema.label(), perLabelDirectories), labelSchema);
}
private PropertyGraphPrinter createPrinterForEdges(Supplier<Path> pathSupplier, LabelSchema labelSchema) throws IOException {
OutputWriter outputWriter = output.createOutputWriter(pathSupplier, kinesisConfig);
return createPrinter(labelSchema, outputWriter);
}
public PropertyGraphPrinter createPrinterForNodes(String name, LabelSchema labelSchema) throws IOException {
return createPrinterForNodes(() -> directories.createNodesFilePath(name, format, labelSchema.label(), perLabelDirectories), labelSchema);
}
private PropertyGraphPrinter createPrinterForNodes(Supplier<Path> pathSupplier, LabelSchema labelSchema) throws IOException {
OutputWriter outputWriter = output.createOutputWriter(pathSupplier, kinesisConfig);
return createPrinter(labelSchema, outputWriter);
}
public PropertyGraphTargetConfig forFileConsolidation() {
return new PropertyGraphTargetConfig(directories, kinesisConfig, printerOptions, format, output, mergeFiles, perLabelDirectories, false);
}
private PropertyGraphPrinter createPrinter(LabelSchema labelSchema, OutputWriter outputWriter) throws IOException {
if (inferSchema) {
return format.createPrinterForInferredSchema(outputWriter, labelSchema, printerOptions);
} else {
return format.createPrinter(outputWriter, labelSchema, printerOptions);
}
}
public RewriteCommand createRewriteCommand(ConcurrencyConfig concurrencyConfig, FeatureToggles featureToggles) {
if (output.isFileBased()) {
return format.createRewriteCommand(this, concurrencyConfig, inferSchema, featureToggles);
} else {
return masterLabelSchemas -> masterLabelSchemas;
}
}
public long freeSpaceInGigabytes(){
return directories.freeSpaceInGigabytes();
}
}
| 4,264 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/NodesWriterFactory.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import java.io.IOException;
public class NodesWriterFactory implements WriterFactory<PGResult> {
@Override
public PropertyGraphPrinter createPrinter(String name, LabelSchema labelSchema, PropertyGraphTargetConfig targetConfig) throws IOException {
PropertyGraphPrinter propertyGraphPrinter = targetConfig.createPrinterForNodes(name, labelSchema);
propertyGraphPrinter.printHeaderMandatoryColumns("id", "label");
propertyGraphPrinter.printHeaderRemainingColumns(labelSchema.propertySchemas());
return propertyGraphPrinter;
}
@Override
public LabelWriter<PGResult> createLabelWriter(PropertyGraphPrinter propertyGraphPrinter, Label label) {
return new NodeWriter(propertyGraphPrinter);
}
}
| 4,265 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/LabelWriters.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
public class LabelWriters<T extends PGResult> implements AutoCloseable {
private static final Logger logger = LoggerFactory.getLogger(LabelWriters.class);
private final int maxFileDescriptorCount;
private final AtomicInteger fileDescriptorCount;
private final LinkedHashMap<Label, LabelWriter<T>> labelWriters = new LinkedHashMap<>(16, 0.75f, true);
public LabelWriters(AtomicInteger fileDescriptorCount, int maxFileDescriptorCount) {
this.fileDescriptorCount = fileDescriptorCount;
this.maxFileDescriptorCount = maxFileDescriptorCount;
}
public boolean containsKey(Label label){
return labelWriters.containsKey(label);
}
public void put(Label label, LabelWriter<T> labelWriter) throws Exception {
if (fileDescriptorCount.get() > maxFileDescriptorCount && labelWriters.size() > 1){
Label leastRecentlyAccessedLabel = labelWriters.keySet().iterator().next();
LabelWriter<T> leastRecentlyAccessedLabelWriter = labelWriters.remove(leastRecentlyAccessedLabel);
logger.info("Closing writer for label {} for output {} so as to conserve file descriptors", leastRecentlyAccessedLabel.labelsAsString(), leastRecentlyAccessedLabelWriter.outputId());
leastRecentlyAccessedLabelWriter.close();
fileDescriptorCount.decrementAndGet();
}
logger.debug("Adding writer for label {} for output {}", label.labelsAsString(), labelWriter.outputId());
labelWriters.put(label, labelWriter);
fileDescriptorCount.incrementAndGet();
}
@Override
public void close() throws Exception {
for (LabelWriter<T> writer : labelWriters.values()) {
logger.info("Closing file: {}", writer.outputId());
writer.close();
fileDescriptorCount.decrementAndGet();
}
}
public LabelWriter<T> get(Label label) {
return labelWriters.get(label);
}
}
| 4,266 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/GraphElementHandler.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import java.io.IOException;
public interface GraphElementHandler<T> extends AutoCloseable {
void handle(T element, boolean allowTokens) throws IOException;
}
| 4,267 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/ExportPropertyGraphJob.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.cluster.ConcurrencyConfig;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.io.Status;
import com.amazonaws.services.neptune.io.StatusOutputFormat;
import com.amazonaws.services.neptune.propertygraph.GremlinFilters;
import com.amazonaws.services.neptune.propertygraph.RangeConfig;
import com.amazonaws.services.neptune.propertygraph.RangeFactory;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.*;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
import org.apache.tinkerpop.gremlin.process.traversal.dsl.graph.GraphTraversalSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
public class ExportPropertyGraphJob {
private static final Logger logger = LoggerFactory.getLogger(ExportPropertyGraphJob.class);
private final Collection<ExportSpecification> exportSpecifications;
private final GraphSchema graphSchema;
private final GraphTraversalSource g;
private final RangeConfig rangeConfig;
private final GremlinFilters gremlinFilters;
private final ConcurrencyConfig concurrencyConfig;
private final PropertyGraphTargetConfig targetConfig;
private final FeatureToggles featureToggles;
private final int maxFileDescriptorCount;
public ExportPropertyGraphJob(Collection<ExportSpecification> exportSpecifications,
GraphSchema graphSchema,
GraphTraversalSource g,
RangeConfig rangeConfig,
GremlinFilters gremlinFilters,
ConcurrencyConfig concurrencyConfig,
PropertyGraphTargetConfig targetConfig,
FeatureToggles featureToggles,
int maxFileDescriptorCount) {
this.exportSpecifications = exportSpecifications;
this.graphSchema = graphSchema;
this.g = g;
this.rangeConfig = rangeConfig;
this.gremlinFilters = gremlinFilters;
this.concurrencyConfig = concurrencyConfig;
this.targetConfig = targetConfig;
this.featureToggles = featureToggles;
this.maxFileDescriptorCount = maxFileDescriptorCount;
}
public GraphSchema execute() throws Exception {
Map<GraphElementType, GraphElementSchemas> revisedGraphElementSchemas = new HashMap<>();
for (ExportSpecification exportSpecification : exportSpecifications) {
MasterLabelSchemas masterLabelSchemas =
Timer.timedActivity("exporting " + exportSpecification.description(),
(CheckedActivity.Callable<MasterLabelSchemas>) () -> export(exportSpecification));
revisedGraphElementSchemas.put(masterLabelSchemas.graphElementType(), masterLabelSchemas.toGraphElementSchemas());
}
return new GraphSchema(revisedGraphElementSchemas);
}
private MasterLabelSchemas export(ExportSpecification exportSpecification) throws Exception {
Collection<FileSpecificLabelSchemas> fileSpecificLabelSchemas = new ArrayList<>();
AtomicInteger fileDescriptorCount = new AtomicInteger();
for (ExportSpecification labelSpecificExportSpecification : exportSpecification.splitByLabel()) {
Collection<Future<FileSpecificLabelSchemas>> futures = new ArrayList<>();
RangeFactory rangeFactory = labelSpecificExportSpecification.createRangeFactory(g, rangeConfig, concurrencyConfig);
Status status = new Status(
StatusOutputFormat.Description,
String.format("%s: %s total", labelSpecificExportSpecification.description(), rangeFactory.numberOfItemsToExport()),
() -> String.format(" [%s GB free space]", targetConfig.freeSpaceInGigabytes()));
String description = String.format("writing %s as %s to %s",
labelSpecificExportSpecification.description(),
targetConfig.format().description(),
targetConfig.output().name());
System.err.println("Started " + description);
AtomicInteger fileIndex = new AtomicInteger();
Timer.timedActivity(description, (CheckedActivity.Runnable) () -> {
ExecutorService taskExecutor = Executors.newFixedThreadPool(rangeFactory.concurrency());
for (int index = 1; index <= rangeFactory.concurrency(); index++) {
ExportPropertyGraphTask exportTask = labelSpecificExportSpecification.createExportTask(
graphSchema,
g,
targetConfig,
gremlinFilters,
rangeFactory,
status,
fileIndex,
fileDescriptorCount,
maxFileDescriptorCount
);
futures.add(taskExecutor.submit(exportTask));
}
taskExecutor.shutdown();
try {
if (!taskExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS)) {
logger.warn("Timeout expired with uncompleted tasks");
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
updateFileSpecificLabelSchemas(futures, fileSpecificLabelSchemas);
});
}
MasterLabelSchemas masterLabelSchemas = exportSpecification.createMasterLabelSchemas(fileSpecificLabelSchemas);
RewriteCommand rewriteCommand = targetConfig.createRewriteCommand(concurrencyConfig, featureToggles);
return rewriteCommand.execute(masterLabelSchemas);
}
private void updateFileSpecificLabelSchemas(
Collection<Future<FileSpecificLabelSchemas>> futures,
Collection<FileSpecificLabelSchemas> fileSpecificLabelSchemas) throws Exception {
for (Future<FileSpecificLabelSchemas> future : futures) {
if (future.isCancelled()) {
throw new IllegalStateException("Unable to complete job because at least one task was cancelled");
}
if (!future.isDone()) {
throw new IllegalStateException("Unable to complete job because at least one task has not completed");
}
fileSpecificLabelSchemas.add(future.get());
}
}
}
| 4,268 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/JsonResource.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.io.CommandWriter;
import com.amazonaws.services.neptune.util.S3ObjectInfo;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
import org.apache.commons.lang.StringUtils;
import java.io.*;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.net.URI;
import java.nio.file.Files;
import static java.nio.charset.StandardCharsets.UTF_8;
public class JsonResource<T extends Jsonizable<E>, E> {
private final String title;
private final URI resourcePath;
private final Class<? extends Jsonizable> clazz;
public JsonResource(String title, URI resourcePath, Class<? extends Jsonizable> clazz) {
this.title = title;
this.resourcePath = resourcePath;
this.clazz = clazz;
}
public void save(Jsonizable<E> object, E param) throws IOException {
if (resourcePath == null) {
return;
}
if (resourcePath.getScheme() != null &&
(resourcePath.getScheme().equals("s3") || resourcePath.getScheme().equals("https"))) {
return;
}
File resourceFile = new File(resourcePath);
try (Writer writer = new BufferedWriter(new OutputStreamWriter(Files.newOutputStream(resourceFile.toPath()), UTF_8))) {
ObjectWriter objectWriter = new ObjectMapper().writer().withDefaultPrettyPrinter();
String json = objectWriter.writeValueAsString(object.toJson(param));
writer.write(json);
}
}
public T get() throws IOException {
if (resourcePath == null) {
throw new IllegalStateException("Resource path is null");
}
JsonNode json = readJson();
try {
Method method = clazz.getMethod("fromJson", JsonNode.class);
Object o = method.invoke(null, json);
@SuppressWarnings("unchecked")
T returnValue = (T) o;
return returnValue;
} catch (NoSuchMethodException e) {
throw new RuntimeException("Jsonizable object must have a static fromJson(JsonNode) method");
} catch (IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
}
public void writeResourcePathAsMessage(CommandWriter writer) {
if (resourcePath == null) {
return;
}
writer.writeMessage(title + " : " + resourcePath.toString());
}
private JsonNode readJson() throws IOException {
String scheme = StringUtils.isNotEmpty(resourcePath.getScheme()) ? resourcePath.getScheme() : "file";
switch (scheme) {
case "https":
return getFromHttps();
case "s3":
return getFromS3();
default:
return getFromFile();
}
}
private JsonNode getFromFile() throws IOException {
String pathname = resourcePath.toString();
File resourceFile = pathname.startsWith("file://") ? new File(pathname.substring(7)) : new File(pathname);
if (!resourceFile.exists()) {
throw new IllegalStateException(String.format("%s does not exist", resourceFile));
}
if (resourceFile.isDirectory()) {
throw new IllegalStateException(String.format("Expected a file, but found a directory: %s", resourceFile));
}
return new ObjectMapper().readTree(resourceFile);
}
private JsonNode getFromS3() throws IOException {
S3ObjectInfo s3ObjectInfo = new S3ObjectInfo(resourcePath.toString());
AmazonS3 s3 = AmazonS3ClientBuilder.defaultClient();
try (InputStream stream = s3.getObject(s3ObjectInfo.bucket(), s3ObjectInfo.key()).getObjectContent()){
return new ObjectMapper().readTree(stream);
}
}
private JsonNode getFromHttps() throws IOException {
return new ObjectMapper().readTree(resourcePath.toURL());
}
}
| 4,269 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/RewriteAndMergeCsv.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.cluster.ConcurrencyConfig;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.schema.*;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVRecord;
import org.apache.commons.lang3.ArrayUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.Reader;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
public class RewriteAndMergeCsv implements RewriteCommand {
private static final Logger logger = LoggerFactory.getLogger(RewriteAndMergeCsv.class);
private final PropertyGraphTargetConfig targetConfig;
private final ConcurrencyConfig concurrencyConfig;
private final FeatureToggles featureToggles;
public RewriteAndMergeCsv(PropertyGraphTargetConfig targetConfig,
ConcurrencyConfig concurrencyConfig,
FeatureToggles featureToggles) {
this.targetConfig = targetConfig;
this.concurrencyConfig = concurrencyConfig;
this.featureToggles = featureToggles;
}
@Override
public MasterLabelSchemas execute(MasterLabelSchemas masterLabelSchemas) throws Exception {
GraphElementType graphElementType = masterLabelSchemas.graphElementType();
System.err.println(String.format("Rewriting and merging %s files...", graphElementType.name()));
return Timer.timedActivity(String.format("rewriting and merging %s files", graphElementType.name()),
(CheckedActivity.Callable<MasterLabelSchemas>) () ->
rewriteFiles(masterLabelSchemas, graphElementType, targetConfig));
}
private MasterLabelSchemas rewriteFiles(MasterLabelSchemas masterLabelSchemas,
GraphElementType graphElementType,
PropertyGraphTargetConfig targetConfig) throws Exception {
Map<Label, MasterLabelSchema> updatedSchemas = new HashMap<>();
Collection<Future<MasterLabelSchema>> futures = new ArrayList<>();
ExecutorService taskExecutor = Executors.newFixedThreadPool(concurrencyConfig.concurrency());
for (MasterLabelSchema masterLabelSchema : masterLabelSchemas.schemas()) {
futures.add(taskExecutor.submit(() -> rewriteAndMerge(targetConfig, graphElementType, masterLabelSchema)));
}
taskExecutor.shutdown();
try {
taskExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
for (Future<MasterLabelSchema> future : futures) {
if (future.isCancelled()) {
throw new IllegalStateException("Unable to complete rewrite because at least one task was cancelled");
}
if (!future.isDone()) {
throw new IllegalStateException("Unable to complete rewrite because at least one task has not completed");
}
MasterLabelSchema masterLabelSchema = future.get();
updatedSchemas.put(masterLabelSchema.labelSchema().label(), masterLabelSchema);
}
return new MasterLabelSchemas(updatedSchemas, graphElementType);
}
private MasterLabelSchema rewriteAndMerge(PropertyGraphTargetConfig targetConfig,
GraphElementType graphElementType,
MasterLabelSchema masterLabelSchema) throws Exception {
LabelSchema masterSchema = masterLabelSchema.labelSchema().createCopy();
masterSchema.initStats();
String targetFilename = Directories.fileName(String.format("%s.consolidated",
masterSchema.label().fullyQualifiedLabel()));
Collection<String> renamedFiles = new ArrayList<>();
try (PropertyGraphPrinter printer = graphElementType.writerFactory().createPrinter(
targetFilename,
masterSchema,
targetConfig.forFileConsolidation())) {
renamedFiles.add(printer.outputId());
for (FileSpecificLabelSchema fileSpecificLabelSchema : masterLabelSchema.fileSpecificLabelSchemas()) {
try (DeletableFile file = new DeletableFile(new File(fileSpecificLabelSchema.outputId()))) {
if (featureToggles.containsFeature(FeatureToggle.Keep_Rewritten_Files)){
file.doNotDelete();
}
LabelSchema labelSchema = fileSpecificLabelSchema.labelSchema();
Label label = labelSchema.label();
String[] additionalElementHeaders = label.hasFromAndToLabels() ?
new String[]{"~fromLabels", "~toLabels"} :
new String[]{};
String[] filePropertyHeaders =
labelSchema.propertySchemas().stream()
.map(p -> p.property().toString())
.collect(Collectors.toList())
.toArray(new String[]{});
String[] fileHeaders = ArrayUtils.addAll(
graphElementType.tokenNames().toArray(new String[]{}),
ArrayUtils.addAll(additionalElementHeaders, filePropertyHeaders));
logger.info("File: {}, Headers: [{}]", fileSpecificLabelSchema.outputId(), fileHeaders);
try (Reader in = file.reader()) {
CSVFormat format = CSVFormat.RFC4180
.withSkipHeaderRecord(false) // files will not have headers
.withHeader(fileHeaders);
Iterable<CSVRecord> records = format.parse(in);
for (CSVRecord record : records) {
printer.printStartRow();
if (graphElementType == GraphElementType.nodes) {
printer.printNode(record.get("~id"), Arrays.asList(record.get("~label").split(";")));
} else {
if (label.hasFromAndToLabels()) {
printer.printEdge(
record.get("~id"),
record.get("~label"),
record.get("~from"),
record.get("~to"),
Arrays.asList(record.get("~fromLabels").split(";")),
Arrays.asList(record.get("~toLabels").split(";")));
} else {
printer.printEdge(record.get("~id"), record.get("~label"), record.get("~from"), record.get("~to"));
}
}
printer.printProperties(record.toMap(), false);
printer.printEndRow();
}
} catch (Exception e) {
logger.error("Error while rewriting file: {}", fileSpecificLabelSchema.outputId(), e);
file.doNotDelete();
throw e;
}
}
}
}
return new MasterLabelSchema(
masterSchema,
renamedFiles.stream().map(f -> new FileSpecificLabelSchema(f, targetConfig.format(), masterSchema)).collect(Collectors.toList()));
}
}
| 4,270 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/ExportPropertyGraphTask.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.io.Status;
import com.amazonaws.services.neptune.propertygraph.*;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.FileSpecificLabelSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicInteger;
public class ExportPropertyGraphTask implements Callable<FileSpecificLabelSchemas> {
private static final Logger logger = LoggerFactory.getLogger(ExportPropertyGraphTask.class);
private final GraphElementSchemas graphElementSchemas;
private final LabelsFilter labelsFilter;
private final GraphClient<? extends PGResult> graphClient;
private final WriterFactory<? extends PGResult> writerFactory;
private final PropertyGraphTargetConfig targetConfig;
private final RangeFactory rangeFactory;
private final GremlinFilters gremlinFilters;
private final Status status;
private final AtomicInteger index;
private final LabelWriters<PGResult> labelWriters;
public ExportPropertyGraphTask(GraphElementSchemas graphElementSchemas,
LabelsFilter labelsFilter,
GraphClient<? extends PGResult> graphClient,
WriterFactory<? extends PGResult> writerFactory,
PropertyGraphTargetConfig targetConfig,
RangeFactory rangeFactory,
GremlinFilters gremlinFilters,
Status status,
AtomicInteger index,
AtomicInteger fileDescriptorCount,
int maxFileDescriptorCount) {
this.graphElementSchemas = graphElementSchemas;
this.labelsFilter = labelsFilter;
this.graphClient = graphClient;
this.writerFactory = writerFactory;
this.targetConfig = targetConfig;
this.rangeFactory = rangeFactory;
this.gremlinFilters = gremlinFilters;
this.status = status;
this.index = index;
this.labelWriters = new LabelWriters<>(fileDescriptorCount, maxFileDescriptorCount);
}
@Override
public FileSpecificLabelSchemas call() {
FileSpecificLabelSchemas fileSpecificLabelSchemas = new FileSpecificLabelSchemas();
CountingHandler handler = new CountingHandler(
new ExportPGTaskHandler(
fileSpecificLabelSchemas,
graphElementSchemas,
targetConfig,
writerFactory,
labelWriters,
graphClient,
status,
index,
labelsFilter
));
try {
while (status.allowContinue()) {
Range range = rangeFactory.nextRange();
if (range.isEmpty()) {
status.halt();
} else {
graphClient.queryForValues(handler, range, labelsFilter, gremlinFilters, graphElementSchemas);
if (range.sizeExceeds(handler.numberProcessed()) || rangeFactory.isExhausted()) {
status.halt();
}
}
}
} finally {
try {
handler.close();
} catch (Exception e) {
logger.error("Error while closing handler", e);
}
}
return fileSpecificLabelSchemas;
}
}
| 4,271 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/JsonPrinterOptions.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.propertygraph.TokenPrefix;
public class JsonPrinterOptions {
public static Builder builder(){
return new Builder();
}
private final boolean strictCardinality;
private final TokenPrefix tokenPrefix;
private JsonPrinterOptions(boolean strictCardinality,
TokenPrefix tokenPrefix) {
this.strictCardinality = strictCardinality;
this.tokenPrefix = tokenPrefix;
}
public boolean strictCardinality() {
return strictCardinality;
}
public TokenPrefix tokenPrefix() {
return tokenPrefix;
}
public Builder copy(){
return new Builder()
.setStrictCardinality(strictCardinality)
.setTokenPrefix(tokenPrefix);
}
public static class Builder{
private boolean strictCardinality = false;
private TokenPrefix tokenPrefix = new TokenPrefix();
public Builder setStrictCardinality(boolean strictCardinality) {
this.strictCardinality = strictCardinality;
return this;
}
public Builder setTokenPrefix(TokenPrefix tokenPrefix){
this.tokenPrefix = tokenPrefix;
return this;
}
public JsonPrinterOptions build(){
return new JsonPrinterOptions(strictCardinality, tokenPrefix);
}
}
}
| 4,272 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/VariableRowCsvPropertyGraphPrinter.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import com.amazonaws.services.neptune.propertygraph.schema.PropertySchema;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
public class VariableRowCsvPropertyGraphPrinter implements PropertyGraphPrinter {
private final CsvPropertyGraphPrinter csvPropertyGraphPrinter;
private final OutputWriter writer;
private final LabelSchema labelSchema;
private boolean isNullable = false;
public VariableRowCsvPropertyGraphPrinter(OutputWriter writer,
LabelSchema labelSchema,
PrinterOptions printerOptions) {
CsvPrinterOptions csvPrinterOptions = CsvPrinterOptions.builder()
.setMultiValueSeparator(printerOptions.csv().multiValueSeparator())
.build();
this.writer = writer;
this.labelSchema = labelSchema;
this.csvPropertyGraphPrinter = new CsvPropertyGraphPrinter(
writer,
labelSchema,
new PrinterOptions(csvPrinterOptions),
true);
}
@Override
public String outputId() {
return csvPropertyGraphPrinter.outputId();
}
@Override
public void printHeaderMandatoryColumns(String... columns) {
// Do nothing
}
@Override
public void printHeaderRemainingColumns(Collection<PropertySchema> remainingColumns) {
// Do nothing
}
@Override
public void printProperties(Map<?, ?> properties) {
// Print known properties
csvPropertyGraphPrinter.printProperties(properties);
// Check to see whether known properties are present
for (PropertySchema propertySchema : labelSchema.propertySchemas()) {
if (!properties.containsKey(propertySchema.property())) {
propertySchema.makeNullable();
}
}
// Print unknown properties
for (Map.Entry<?, ?> property : properties.entrySet()) {
Object key = property.getKey();
if (!labelSchema.containsProperty(key)) {
Object value = property.getValue();
PropertySchema propertySchema = new PropertySchema(key);
PropertySchema.PropertyValueMetadata propertyValueMetadata = propertySchema.accept(value, true);
if (isNullable) {
propertySchema.makeNullable();
}
labelSchema.put(key, propertySchema);
labelSchema.recordObservation(propertySchema, value, propertyValueMetadata);
csvPropertyGraphPrinter.printProperty(propertySchema, value);
}
}
isNullable = true;
}
@Override
public void printProperties(Map<?, ?> properties, boolean applyFormatting) throws IOException {
printProperties(properties);
}
@Override
public void printProperties(String id, String streamOperation, Map<?, ?> properties) throws IOException {
printProperties(properties);
}
@Override
public void printEdge(String id, String label, String from, String to) throws IOException {
printEdge(id, label, from, to, null, null);
}
@Override
public void printEdge(String id, String label, String from, String to, Collection<String> fromLabels, Collection<String> toLabels) throws IOException {
csvPropertyGraphPrinter.printEdge(id, label, from, to, fromLabels, toLabels);
}
@Override
public void printNode(String id, List<String> labels) {
csvPropertyGraphPrinter.printNode(id, labels);
}
@Override
public void printStartRow() {
csvPropertyGraphPrinter.printStartRow();
}
@Override
public void printEndRow() {
csvPropertyGraphPrinter.printEndRow();
}
@Override
public void close() throws Exception {
writer.close();
}
}
| 4,273 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/WriterFactory.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import java.io.IOException;
public interface WriterFactory<T> {
PropertyGraphPrinter createPrinter(String name, LabelSchema labelSchema, PropertyGraphTargetConfig targetConfig) throws IOException;
LabelWriter<T> createLabelWriter(PropertyGraphPrinter propertyGraphPrinter, Label label);
}
| 4,274 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/PropertyGraphExportFormat.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.cluster.ConcurrencyConfig;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.io.FileExtension;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.util.MinimalPrettyPrinter;
import org.apache.commons.io.FilenameUtils;
import java.io.IOException;
public enum PropertyGraphExportFormat implements FileExtension {
json {
@Override
public String extension() {
return "json";
}
@Override
PropertyGraphPrinter createPrinter(OutputWriter writer, LabelSchema labelSchema, PrinterOptions printerOptions) throws IOException {
JsonGenerator generator = createJsonGenerator(writer, writer.lineSeparator());
return new JsonPropertyGraphPrinter(writer, generator, labelSchema, printerOptions);
}
@Override
PropertyGraphPrinter createPrinterForInferredSchema(OutputWriter writer, LabelSchema labelSchema, PrinterOptions printerOptions) throws IOException {
JsonGenerator generator = createJsonGenerator(writer, writer.lineSeparator());
return new JsonPropertyGraphPrinter(writer, generator, labelSchema, printerOptions, true);
}
@Override
public String description() {
return "JSON";
}
@Override
public RewriteCommand createRewriteCommand(PropertyGraphTargetConfig targetConfig, ConcurrencyConfig concurrencyConfig, boolean inferSchema, FeatureToggles featureToggles) {
return RewriteCommand.NULL_COMMAND;
}
},
csv {
@Override
public String extension() {
return "csv";
}
@Override
PropertyGraphPrinter createPrinter(OutputWriter writer, LabelSchema labelSchema, PrinterOptions printerOptions) {
PrinterOptions newPrinterOptions = new PrinterOptions(
printerOptions.csv().copy()
.setIncludeHeaders(true)
.build());
return new CsvPropertyGraphPrinter(writer, labelSchema, newPrinterOptions);
}
@Override
PropertyGraphPrinter createPrinterForInferredSchema(OutputWriter writer, LabelSchema labelSchema, PrinterOptions printerOptions) throws IOException {
return new VariableRowCsvPropertyGraphPrinter(writer, labelSchema, printerOptions);
}
@Override
public String description() {
return "CSV";
}
@Override
public RewriteCommand createRewriteCommand(PropertyGraphTargetConfig targetConfig, ConcurrencyConfig concurrencyConfig, boolean inferSchema, FeatureToggles featureToggles) {
if (targetConfig.mergeFiles()) {
return new RewriteAndMergeCsv(targetConfig, concurrencyConfig, featureToggles);
} else {
if (inferSchema) {
return new RewriteCsv(targetConfig, concurrencyConfig, featureToggles);
} else {
return RewriteCommand.NULL_COMMAND;
}
}
}
},
csvNoHeaders {
@Override
public String extension() {
return "csv";
}
@Override
PropertyGraphPrinter createPrinter(OutputWriter writer, LabelSchema labelSchema, PrinterOptions printerOptions) {
PrinterOptions newPrinterOptions = new PrinterOptions(
printerOptions.csv().copy()
.setIncludeHeaders(false)
.build());
return new CsvPropertyGraphPrinter(writer, labelSchema, newPrinterOptions);
}
@Override
PropertyGraphPrinter createPrinterForInferredSchema(OutputWriter writer, LabelSchema labelSchema, PrinterOptions printerOptions) throws IOException {
return new VariableRowCsvPropertyGraphPrinter(writer, labelSchema, printerOptions);
}
@Override
public String description() {
return "CSV (no headers)";
}
@Override
public RewriteCommand createRewriteCommand(PropertyGraphTargetConfig targetConfig, ConcurrencyConfig concurrencyConfig, boolean inferSchema, FeatureToggles featureToggles) {
if (targetConfig.mergeFiles()) {
return new RewriteAndMergeCsv(targetConfig, concurrencyConfig, featureToggles);
} else {
if (inferSchema) {
return new RewriteCsv(targetConfig, concurrencyConfig, featureToggles);
} else {
return RewriteCommand.NULL_COMMAND;
}
}
}
},
neptuneStreamsJson {
@Override
public String extension() {
return "json";
}
@Override
PropertyGraphPrinter createPrinter(OutputWriter writer, LabelSchema labelSchema, PrinterOptions printerOptions) throws IOException {
JsonGenerator generator = createJsonGenerator(writer, "");
return new NeptuneStreamsJsonPropertyGraphPrinter(writer, generator);
}
@Override
PropertyGraphPrinter createPrinterForInferredSchema(OutputWriter writer, LabelSchema labelSchema, PrinterOptions printerOptions) throws IOException {
return createPrinter(writer, labelSchema, printerOptions);
}
@Override
public String description() {
return "JSON (Neptune Streams format)";
}
@Override
public RewriteCommand createRewriteCommand(PropertyGraphTargetConfig targetConfig, ConcurrencyConfig concurrencyConfig, boolean inferSchema, FeatureToggles featureToggles) {
return RewriteCommand.NULL_COMMAND;
}
},
neptuneStreamsSimpleJson {
@Override
public String extension() {
return "json";
}
@Override
PropertyGraphPrinter createPrinter(OutputWriter writer, LabelSchema labelSchema, PrinterOptions printerOptions) throws IOException {
JsonGenerator generator = createJsonGenerator(writer, "");
return new NeptuneStreamsSimpleJsonPropertyGraphPrinter(writer, generator);
}
@Override
PropertyGraphPrinter createPrinterForInferredSchema(OutputWriter writer, LabelSchema labelSchema, PrinterOptions printerOptions) throws IOException {
return createPrinter(writer, labelSchema, printerOptions);
}
@Override
public String description() {
return "JSON (Neptune Streams simple format)";
}
@Override
public RewriteCommand createRewriteCommand(PropertyGraphTargetConfig targetConfig, ConcurrencyConfig concurrencyConfig, boolean inferSchema, FeatureToggles featureToggles) {
return RewriteCommand.NULL_COMMAND;
}
};
private static JsonGenerator createJsonGenerator(OutputWriter writer, String s) throws IOException {
JsonGenerator generator = new JsonFactory().createGenerator(writer.writer());
generator.setPrettyPrinter(new MinimalPrettyPrinter(s));
generator.disable(JsonGenerator.Feature.FLUSH_PASSED_TO_STREAM);
return generator;
}
abstract PropertyGraphPrinter createPrinter(OutputWriter writer, LabelSchema labelSchema, PrinterOptions printerOptions) throws IOException;
abstract PropertyGraphPrinter createPrinterForInferredSchema(OutputWriter writer, LabelSchema labelSchema, PrinterOptions printerOptions) throws IOException;
public abstract String description();
public abstract RewriteCommand createRewriteCommand(PropertyGraphTargetConfig targetConfig, ConcurrencyConfig concurrencyConfig, boolean inferSchema, FeatureToggles featureToggles);
public String replaceExtension(String filename, String replacement){
return String.format("%s.%s", FilenameUtils.removeExtension(filename), replacement);
}
}
| 4,275 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/CommaPrinter.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.io.OutputWriter;
class CommaPrinter {
private final OutputWriter outputWriter;
private boolean printComma = false;
CommaPrinter(OutputWriter outputWriter) {
this.outputWriter = outputWriter;
}
void printComma() {
if (printComma) {
outputWriter.print(",");
} else {
printComma = true;
}
}
void init() {
printComma = false;
}
}
| 4,276 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/CsvPropertyGraphPrinter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.propertygraph.TokenPrefix;
import com.amazonaws.services.neptune.propertygraph.schema.DataType;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import com.amazonaws.services.neptune.propertygraph.schema.PropertySchema;
import com.amazonaws.services.neptune.util.SemicolonUtils;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
public class CsvPropertyGraphPrinter implements PropertyGraphPrinter {
private final OutputWriter writer;
private final LabelSchema labelSchema;
private final CsvPrinterOptions printerOptions;
private final boolean allowUpdateSchema;
private final CommaPrinter commaPrinter;
public CsvPropertyGraphPrinter(OutputWriter writer,
LabelSchema labelSchema,
PrinterOptions printerOptions) {
this(writer, labelSchema, printerOptions, false);
}
public CsvPropertyGraphPrinter(OutputWriter writer,
LabelSchema labelSchema,
PrinterOptions printerOptions,
boolean allowUpdateSchema) {
this.writer = writer;
this.labelSchema = labelSchema;
this.printerOptions = printerOptions.csv();
this.commaPrinter = new CommaPrinter(writer);
this.allowUpdateSchema = allowUpdateSchema;
}
@Override
public String outputId() {
return writer.outputId();
}
@Override
public void printHeaderMandatoryColumns(String... columns) {
if (printerOptions.includeHeaders() && writer.isNewTarget()) {
TokenPrefix tokenPrefix = printerOptions.tokenPrefix();
for (String column : columns) {
commaPrinter.printComma();
writer.print(tokenPrefix.format(column));
}
}
}
@Override
public void printHeaderRemainingColumns(Collection<PropertySchema> remainingColumns) {
if (printerOptions.includeHeaders() && writer.isNewTarget()) {
for (PropertySchema property : remainingColumns) {
commaPrinter.printComma();
if (printerOptions.includeTypeDefinitions()) {
writer.print(property.nameWithDataType(printerOptions.escapeCsvHeaders()));
} else {
writer.print(property.nameWithoutDataType(printerOptions.escapeCsvHeaders()));
}
}
writer.print(writer.lineSeparator());
}
}
@Override
public void printProperties(Map<?, ?> properties) {
printProperties(properties, true);
}
@Override
public void printProperties(Map<?, ?> properties, boolean applyFormatting) {
for (PropertySchema propertySchema : labelSchema.propertySchemas()) {
Object property = propertySchema.property();
if (properties.containsKey(property)) {
Object value = properties.get(property);
PropertySchema.PropertyValueMetadata propertyValueMetadata = propertySchema.accept(value, allowUpdateSchema);
labelSchema.recordObservation(propertySchema, value, propertyValueMetadata);
printProperty(propertySchema, value, applyFormatting);
} else {
commaPrinter.printComma();
}
}
}
public void printProperty(PropertySchema schema, Object value) {
printProperty(schema, value, true);
}
private void printProperty(PropertySchema schema, Object value, boolean applyFormatting) {
DataType dataType = schema.dataType();
commaPrinter.printComma();
if (applyFormatting) {
String formattedValue = isList(value) ?
formatList(value, dataType, printerOptions) :
dataType.format(value, printerOptions.escapeNewline());
writer.print(formattedValue);
} else {
if (dataType == DataType.String) {
if (isSingleValueColumnWithSemicolonSeparator(schema)) {
writer.print(DataType.String.format(SemicolonUtils.unescape(value.toString()), printerOptions.escapeNewline()));
} else {
writer.print(DataType.String.format(value, printerOptions.escapeNewline()));
}
} else {
writer.print(String.valueOf(value));
}
}
}
private boolean isSingleValueColumnWithSemicolonSeparator(PropertySchema schema) {
return !schema.isMultiValue() && printerOptions.isSemicolonSeparator();
}
@Override
public void printProperties(String id, String streamOperation, Map<?, ?> properties) throws IOException {
printProperties(properties);
}
@Override
public void printEdge(String id, String label, String from, String to) throws IOException {
printEdge(id, label, from, to, null, null);
}
@Override
public void printEdge(String id, String label, String from, String to, Collection<String> fromLabels, Collection<String> toLabels) throws IOException {
commaPrinter.printComma();
writer.print(DataType.String.format(id, printerOptions.escapeNewline()));
commaPrinter.printComma();
writer.print(DataType.String.format(label, printerOptions.escapeNewline()));
commaPrinter.printComma();
writer.print(DataType.String.format(from, printerOptions.escapeNewline()));
commaPrinter.printComma();
writer.print(DataType.String.format(to, printerOptions.escapeNewline()));
if (fromLabels != null) {
commaPrinter.printComma();
writer.print(DataType.String.formatList(fromLabels, printerOptions));
}
if (toLabels != null) {
commaPrinter.printComma();
writer.print(DataType.String.formatList(toLabels, printerOptions));
}
}
@Override
public void printNode(String id, List<String> labels) {
commaPrinter.printComma();
writer.print(DataType.String.format(id, printerOptions.escapeNewline()));
commaPrinter.printComma();
writer.print(DataType.String.formatList(labels, printerOptions));
}
@Override
public void printStartRow() {
writer.startCommit();
commaPrinter.init();
}
@Override
public void printEndRow() {
writer.print(writer.lineSeparator());
writer.endCommit();
}
private String formatList(Object value, DataType dataType, CsvPrinterOptions options) {
List<?> values = (List<?>) value;
return dataType.formatList(values, options);
}
private boolean isList(Object value) {
return value instanceof List<?>;
}
@Override
public void close() throws Exception {
writer.close();
}
}
| 4,277 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/RenameableFiles.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import java.io.File;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
class RenameableFiles {
private final Map<File, String> entries = new HashMap<>();
public void add(File file, String filename) {
entries.put(file, filename);
}
public Collection<File> rename() {
Collection<File> renamedFiles = new ArrayList<>();
for (Map.Entry<File, String> entry : entries.entrySet()) {
File file = entry.getKey();
File renamedFile = new File(file.getParentFile(), entry.getValue());
file.renameTo(renamedFile);
renamedFiles.add(renamedFile);
}
return renamedFiles;
}
}
| 4,278 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/Jsonizable.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.fasterxml.jackson.databind.JsonNode;
public interface Jsonizable<T> {
JsonNode toJson(T o);
}
| 4,279 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/RewriteCommand.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.propertygraph.schema.MasterLabelSchemas;
public interface RewriteCommand {
RewriteCommand NULL_COMMAND = masterLabelSchemas -> masterLabelSchemas;
MasterLabelSchemas execute(MasterLabelSchemas masterLabelSchemas) throws Exception;
}
| 4,280 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/DeletableFile.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.Reader;
class DeletableFile implements AutoCloseable {
private final File file;
private boolean allowDelete = true;
DeletableFile(File file) {
this.file = file;
}
public Reader reader() throws FileNotFoundException {
return new FileReader(file);
}
public String name() {
return file.getName();
}
public void doNotDelete(){
allowDelete = false;
}
@Override
public void close() {
if (file.exists() && allowDelete){
boolean deletedOriginalFile = file.delete();
if (!deletedOriginalFile) {
throw new IllegalStateException("Unable to delete file: " + file.getAbsolutePath());
}
}
}
@Override
public String toString() {
return file.getAbsolutePath();
}
}
| 4,281 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/NeptuneStreamsJsonPropertyGraphPrinter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.propertygraph.schema.DataType;
import com.amazonaws.services.neptune.propertygraph.schema.PropertySchema;
import com.fasterxml.jackson.core.JsonGenerator;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
public class NeptuneStreamsJsonPropertyGraphPrinter implements PropertyGraphPrinter {
private static final AtomicLong COMMIT_NUM_GENERATOR = new AtomicLong(1);
private final OutputWriter writer;
private final JsonGenerator generator;
private long commitNum = 1;
private int opNum = 1;
public NeptuneStreamsJsonPropertyGraphPrinter(OutputWriter writer, JsonGenerator generator) throws IOException {
this.writer = writer;
this.generator = generator;
}
@Override
public String outputId() {
return writer.outputId();
}
@Override
public void printHeaderMandatoryColumns(String... columns) {
// Do nothing
}
@Override
public void printHeaderRemainingColumns(Collection<PropertySchema> remainingColumns) {
// Do nothing
}
@Override
public void printProperties(Map<?, ?> properties) throws IOException {
throw new RuntimeException("Neptune Streams JSON is not supported for this command");
}
@Override
public void printProperties(Map<?, ?> properties, boolean applyFormatting) throws IOException {
printProperties(properties);
}
@Override
public void printProperties(String id, String streamOperation, Map<?, ?> properties) throws IOException {
for (Map.Entry<?, ?> entry : properties.entrySet()) {
String key = String.valueOf(entry.getKey());
Object value = entry.getValue();
if (isList(value)) {
List<?> values = (List<?>) value;
for (Object o : values) {
PropertySchema propertySchema = new PropertySchema(key);
propertySchema.accept(o, true);
printRecord(id, streamOperation, key, o, propertySchema.dataType());
}
} else {
PropertySchema propertySchema = new PropertySchema(key);
propertySchema.accept(value, true);
printRecord(id, streamOperation, key, value, propertySchema.dataType());
}
}
}
@Override
public void printEdge(String id, String label, String from, String to) throws IOException {
printEdge(id, label, from, to, null, null);
}
@Override
public void printEdge(String id, String label, String from, String to, Collection<String> fromLabels, Collection<String> toLabels) throws IOException {
printRecord(id, "e", "label", label, DataType.String, from, to);
}
@Override
public void printNode(String id, List<String> labels) throws IOException {
for (String l : labels) {
printRecord(id, "vl", "label", l, DataType.String);
}
}
@Override
public void printStartRow() throws IOException {
commitNum = COMMIT_NUM_GENERATOR.getAndIncrement();
opNum = 1;
writer.startCommit();
}
@Override
public void printEndRow() throws IOException {
generator.flush();
writer.endCommit();
}
@Override
public void close() throws Exception {
generator.close();
writer.close();
}
private void printRecord(String id, String streamOperation, String key, Object value, DataType dataType) throws IOException {
printRecord(id, streamOperation, key, value, dataType, null, null);
}
private void printRecord(String id, String streamOperation, String key, Object value, DataType dataType, String from, String to) throws IOException {
writer.startOp();
generator.writeStartObject();
generator.writeObjectFieldStart("eventId");
generator.writeNumberField("commitNum", commitNum);
generator.writeNumberField("opNum", opNum++);
generator.writeEndObject();
generator.writeObjectFieldStart("data");
generator.writeStringField("id", id);
generator.writeStringField("type", streamOperation);
generator.writeStringField("key", key);
generator.writeObjectFieldStart("value");
dataType.printAsStringTo(generator, "value", value);
generator.writeStringField("dataType", dataType.name());
generator.writeEndObject();
if (from != null) {
generator.writeStringField("from", from);
}
if (to != null) {
generator.writeStringField("to", to);
}
generator.writeEndObject();
generator.writeStringField("op", "ADD");
generator.writeEndObject();
generator.flush();
writer.endOp();
}
private boolean isList(Object value) {
return value instanceof List<?>;
}
}
| 4,282 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/CountingHandler.java | package com.amazonaws.services.neptune.propertygraph.io;
import java.io.IOException;
class CountingHandler<T> implements GraphElementHandler<T> {
private final GraphElementHandler<T> parent;
private long counter = 0;
CountingHandler(GraphElementHandler<T> parent) {
this.parent = parent;
}
@Override
public void handle(T input, boolean allowTokens) throws IOException {
parent.handle(input, allowTokens);
counter++;
}
long numberProcessed() {
return counter;
}
@Override
public void close() throws Exception {
parent.close();
}
}
| 4,283 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/NodeWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public class NodeWriter implements LabelWriter<PGResult> {
private final PropertyGraphPrinter propertyGraphPrinter;
public NodeWriter(PropertyGraphPrinter propertyGraphPrinter) {
this.propertyGraphPrinter = propertyGraphPrinter;
}
@Override
public void handle(PGResult node, boolean allowTokens) throws IOException {
Map<?, Object> properties = node.getProperties();
String id = String.valueOf(node.getId());
List<String> labels = node.getLabel();
labels = Label.fixLabelsIssue(labels);
propertyGraphPrinter.printStartRow();
propertyGraphPrinter.printNode(id, labels);
propertyGraphPrinter.printProperties(id, "vp", properties);
propertyGraphPrinter.printEndRow();
}
@Override
public void close() throws Exception {
propertyGraphPrinter.close();
}
@Override
public String outputId() {
return propertyGraphPrinter.outputId();
}
}
| 4,284 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/CsvPrinterOptions.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.propertygraph.TokenPrefix;
public class CsvPrinterOptions {
public static Builder builder(){
return new Builder();
}
private final String multiValueSeparator;
private final boolean includeTypeDefinitions;
private final boolean escapeCsvHeaders;
private final boolean includeHeaders;
private final boolean isSemicolonSeparator;
private final boolean escapeNewline;
private final TokenPrefix tokenPrefix;
private CsvPrinterOptions(String multiValueSeparator,
boolean includeTypeDefinitions,
boolean escapeCsvHeaders,
boolean includeHeaders,
boolean escapeNewline,
TokenPrefix tokenPrefix) {
this.multiValueSeparator = multiValueSeparator;
this.includeTypeDefinitions = includeTypeDefinitions;
this.escapeCsvHeaders = escapeCsvHeaders;
this.includeHeaders = includeHeaders;
this.escapeNewline = escapeNewline;
this.isSemicolonSeparator = multiValueSeparator.equalsIgnoreCase(";");
this.tokenPrefix = tokenPrefix;
}
public String multiValueSeparator() {
return multiValueSeparator;
}
public boolean includeTypeDefinitions() {
return includeTypeDefinitions;
}
public boolean escapeCsvHeaders() {
return escapeCsvHeaders;
}
public boolean includeHeaders() {
return includeHeaders;
}
public boolean escapeNewline() {
return escapeNewline;
}
public boolean isSemicolonSeparator() {
return isSemicolonSeparator;
}
public TokenPrefix tokenPrefix() {
return tokenPrefix;
}
public Builder copy(){
return new Builder()
.setMultiValueSeparator(multiValueSeparator)
.setIncludeTypeDefinitions(includeTypeDefinitions)
.setEscapeCsvHeaders(escapeCsvHeaders)
.setIncludeHeaders(includeHeaders)
.setEscapeNewline(escapeNewline)
.setTokenPrefix(tokenPrefix);
}
public static class Builder {
private String multiValueSeparator = "";
private boolean includeTypeDefinitions = false;
private boolean escapeCsvHeaders = false;
private boolean includeHeaders = false;
private boolean escapeNewline = false;
private TokenPrefix tokenPrefix = new TokenPrefix();
public Builder setMultiValueSeparator(String multiValueSeparator) {
this.multiValueSeparator = multiValueSeparator;
return this;
}
public Builder setIncludeTypeDefinitions(boolean includeTypeDefinitions) {
this.includeTypeDefinitions = includeTypeDefinitions;
return this;
}
public Builder setEscapeCsvHeaders(boolean escapeCsvHeaders) {
this.escapeCsvHeaders = escapeCsvHeaders;
return this;
}
public Builder setIncludeHeaders(boolean includeHeaders) {
this.includeHeaders = includeHeaders;
return this;
}
public Builder setEscapeNewline(boolean escapeNewline) {
this.escapeNewline = escapeNewline;
return this;
}
public Builder setTokenPrefix(TokenPrefix tokenPrefix){
this.tokenPrefix = tokenPrefix;
return this;
}
public CsvPrinterOptions build(){
return new CsvPrinterOptions(multiValueSeparator, includeTypeDefinitions, escapeCsvHeaders, includeHeaders, escapeNewline, tokenPrefix);
}
}
}
| 4,285 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/PrinterOptions.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
public class PrinterOptions {
public static final PrinterOptions NULL_OPTIONS = new PrinterOptions(
CsvPrinterOptions.builder().build(),
JsonPrinterOptions.builder().build());
private final CsvPrinterOptions csvPrinterOptions;
private final JsonPrinterOptions jsonPrinterOptions;
public PrinterOptions(CsvPrinterOptions csvPrinterOptions) {
this(csvPrinterOptions, JsonPrinterOptions.builder().build());
}
public PrinterOptions(JsonPrinterOptions jsonPrinterOptions) {
this(CsvPrinterOptions.builder().build(), jsonPrinterOptions);
}
public PrinterOptions(CsvPrinterOptions csvPrinterOptions, JsonPrinterOptions jsonPrinterOptions) {
this.csvPrinterOptions = csvPrinterOptions;
this.jsonPrinterOptions = jsonPrinterOptions;
}
public CsvPrinterOptions csv() {
return csvPrinterOptions;
}
public JsonPrinterOptions json() {
return jsonPrinterOptions;
}
}
| 4,286 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/QueriesWriterFactory.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import java.io.IOException;
import java.util.Map;
public class QueriesWriterFactory implements WriterFactory<Map<?, ?>> {
@Override
public PropertyGraphPrinter createPrinter(String name, LabelSchema labelSchema, PropertyGraphTargetConfig targetConfig) throws IOException {
PropertyGraphPrinter propertyGraphPrinter = targetConfig.createPrinterForQueries(name, labelSchema);
propertyGraphPrinter.printHeaderRemainingColumns(labelSchema.propertySchemas());
return propertyGraphPrinter;
}
@Override
public LabelWriter<Map<?, ?>> createLabelWriter(PropertyGraphPrinter propertyGraphPrinter, Label label) {
return new QueryWriter(propertyGraphPrinter);
}
}
| 4,287 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/LabelWriter.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
public interface LabelWriter<T> extends GraphElementHandler<T> {
String outputId();
}
| 4,288 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/QueryTask.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.io.Status;
import com.amazonaws.services.neptune.propertygraph.AllLabels;
import com.amazonaws.services.neptune.propertygraph.EdgeLabelStrategy;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.LabelsFilter;
import com.amazonaws.services.neptune.propertygraph.NamedQuery;
import com.amazonaws.services.neptune.propertygraph.NeptuneGremlinClient;
import com.amazonaws.services.neptune.propertygraph.NodeLabelStrategy;
import com.amazonaws.services.neptune.propertygraph.io.result.PGEdgeResult;
import com.amazonaws.services.neptune.propertygraph.io.result.QueriesEdgeResult;
import com.amazonaws.services.neptune.propertygraph.io.result.QueriesNodeResult;
import com.amazonaws.services.neptune.propertygraph.schema.FileSpecificLabelSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import com.amazonaws.services.neptune.util.Activity;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
import org.apache.tinkerpop.gremlin.driver.ResultSet;
import org.apache.tinkerpop.gremlin.structure.Direction;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.Callable;
import java.util.concurrent.atomic.AtomicInteger;
public class QueryTask implements Callable<Map<GraphElementType, FileSpecificLabelSchemas>> {
private static final Logger logger = LoggerFactory.getLogger(QueryTask.class);
private final Queue<NamedQuery> queries;
private final NeptuneGremlinClient.QueryClient queryClient;
private final PropertyGraphTargetConfig targetConfig;
private final boolean twoPassAnalysis;
private final Long timeoutMillis;
private final Status status;
private final AtomicInteger index;
private final boolean structuredOutput;
private final LabelsFilter nodeLabelFilter;
private final LabelsFilter edgeLabelFilter;
public QueryTask(Queue<NamedQuery> queries,
NeptuneGremlinClient.QueryClient queryClient,
PropertyGraphTargetConfig targetConfig,
boolean twoPassAnalysis,
Long timeoutMillis,
Status status,
AtomicInteger index,
boolean structuredOutput,
LabelsFilter nodeLabelFilter,
LabelsFilter edgeLabelFilter) {
this.queries = queries;
this.queryClient = queryClient;
this.targetConfig = targetConfig;
this.twoPassAnalysis = twoPassAnalysis;
this.timeoutMillis = timeoutMillis;
this.status = status;
this.index = index;
this.structuredOutput = structuredOutput;
this.nodeLabelFilter = nodeLabelFilter;
this.edgeLabelFilter = edgeLabelFilter;
}
@Override
public Map<GraphElementType, FileSpecificLabelSchemas> call() throws Exception {
QueriesWriterFactory writerFactory = new QueriesWriterFactory();
Map<Label, LabelWriter<Map<?, ?>>> labelWriters = new HashMap<>();
Map<GraphElementType, FileSpecificLabelSchemas> fileSpecificLabelSchemasMap = new HashMap<>();
fileSpecificLabelSchemasMap.put(GraphElementType.nodes, new FileSpecificLabelSchemas());
fileSpecificLabelSchemasMap.put(GraphElementType.edges, new FileSpecificLabelSchemas());
try {
while (status.allowContinue()) {
try {
NamedQuery namedQuery = queries.poll();
if (!(namedQuery == null)) {
final GraphElementSchemas graphElementSchemas = new GraphElementSchemas();
if (twoPassAnalysis) {
Timer.timedActivity(String.format("generating schema for query [%s]", namedQuery.query()),
(Activity.Runnable) () -> updateSchema(namedQuery, graphElementSchemas));
}
Timer.timedActivity(String.format("executing query [%s]", namedQuery.query()),
(CheckedActivity.Runnable) () ->
executeQuery(namedQuery, writerFactory, labelWriters, graphElementSchemas, fileSpecificLabelSchemasMap));
} else {
status.halt();
}
} catch (IllegalStateException e) {
logger.warn("Unexpected result value. {}. Proceeding with next query.", e.getMessage());
}
}
} finally {
for (LabelWriter<Map<?, ?>> labelWriter : labelWriters.values()) {
try {
labelWriter.close();
} catch (Exception e) {
logger.warn("Error closing label writer: {}.", e.getMessage());
}
}
}
return fileSpecificLabelSchemasMap;
}
private void updateSchema(NamedQuery namedQuery, GraphElementSchemas graphElementSchemas) {
ResultSet firstPassResults = queryClient.submit(namedQuery.query(), timeoutMillis);
firstPassResults.stream().
map(r -> castToMap(r.getObject())).
forEach(r -> {
graphElementSchemas.update(new Label(namedQuery.name()), r, true);
});
}
private void executeQuery(NamedQuery namedQuery,
QueriesWriterFactory writerFactory,
Map<Label, LabelWriter<Map<?, ?>>> labelWriters,
GraphElementSchemas graphElementSchemas,
Map<GraphElementType, FileSpecificLabelSchemas> fileSpecificLabelSchemasMap) {
ResultSet results = queryClient.submit(namedQuery.query(), timeoutMillis);
GraphElementHandler<Map<?, ?>> handler;
if(structuredOutput) {
handler = new QueriesResultWrapperHandler(
new CountingHandler<QueriesNodeResult>(
new ExportPGTaskHandler<QueriesNodeResult>(
fileSpecificLabelSchemasMap.get(GraphElementType.nodes),
graphElementSchemas,
targetConfig,
(WriterFactory<QueriesNodeResult>) GraphElementType.nodes.writerFactory(),
new LabelWriters<>(new AtomicInteger(),0),
null,
status,
index,
nodeLabelFilter)
),
new CountingHandler<QueriesEdgeResult>(
new ExportPGTaskHandler<QueriesEdgeResult>(
fileSpecificLabelSchemasMap.get(GraphElementType.edges),
graphElementSchemas,
targetConfig,
(WriterFactory<QueriesEdgeResult>) GraphElementType.edges.writerFactory(),
new LabelWriters<>(new AtomicInteger(),0),
null,
status,
index,
edgeLabelFilter)
)
);
}
else {
ResultsHandler resultsHandler = new ResultsHandler(
new Label(namedQuery.name()),
labelWriters,
writerFactory,
graphElementSchemas);
handler = new StatusHandler(resultsHandler, status);
}
results.stream().
map(r -> castToMap(r.getObject())).
forEach(r -> {
try {
handler.handle(r, true);
} catch (IOException e) {
throw new RuntimeException(e);
}
});
}
private HashMap<?, ?> castToMap(Object o) {
if (Map.class.isAssignableFrom(o.getClass())) {
return (HashMap<?, ?>) o;
}
throw new IllegalStateException("Expected Map, found " + o.getClass().getSimpleName());
}
private class ResultsHandler implements GraphElementHandler<Map<?, ?>> {
private final Label label;
private final Map<Label, LabelWriter<Map<?, ?>>> labelWriters;
private final QueriesWriterFactory writerFactory;
private final GraphElementSchemas graphElementSchemas;
private ResultsHandler(Label label,
Map<Label, LabelWriter<Map<?, ?>>> labelWriters,
QueriesWriterFactory writerFactory,
GraphElementSchemas graphElementSchemas) {
this.label = label;
this.labelWriters = labelWriters;
this.writerFactory = writerFactory;
this.graphElementSchemas = graphElementSchemas;
}
private void createWriter(Map<?, ?> properties, boolean allowStructuralElements) {
try {
if (!graphElementSchemas.hasSchemaFor(label)) {
graphElementSchemas.update(label, properties, allowStructuralElements);
}
LabelSchema labelSchema = graphElementSchemas.getSchemaFor(label);
PropertyGraphPrinter propertyGraphPrinter =
writerFactory.createPrinter(Directories.fileName(label.fullyQualifiedLabel(), index), labelSchema, targetConfig);
labelWriters.put(label, writerFactory.createLabelWriter(propertyGraphPrinter, label));
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public void handle(Map<?, ?> properties, boolean allowTokens) throws IOException {
if (!labelWriters.containsKey(label)) {
createWriter(properties, allowTokens);
}
labelWriters.get(label).handle(properties, allowTokens);
}
@Override
public void close() throws Exception {
// Do nothing
}
}
private static class StatusHandler implements GraphElementHandler<Map<?, ?>> {
private final GraphElementHandler<Map<?, ?>> parent;
private final Status status;
private StatusHandler(GraphElementHandler<Map<?, ?>> parent, Status status) {
this.parent = parent;
this.status = status;
}
@Override
public void handle(Map<?, ?> input, boolean allowTokens) throws IOException {
parent.handle(input, allowTokens);
status.update();
}
@Override
public void close() throws Exception {
parent.close();
}
}
private static class QueriesResultWrapperHandler implements GraphElementHandler<Map<?, ?>> {
private final GraphElementHandler<QueriesNodeResult> nodeParent;
private final GraphElementHandler<QueriesEdgeResult> edgeParent;
private QueriesResultWrapperHandler(GraphElementHandler<QueriesNodeResult> nodeParent, GraphElementHandler<QueriesEdgeResult> edgeParent) {
this.nodeParent = nodeParent;
this.edgeParent = edgeParent;
}
@Override
public void handle(Map<?, ?> input, boolean allowTokens) throws IOException {
if(isEdge(input)) {
edgeParent.handle(getQueriesEdgeResult(input), allowTokens);
}
else {
nodeParent.handle(getQueriesNodeResult(input), allowTokens);
}
}
@Override
public void close() throws Exception {
nodeParent.close();
}
private boolean isEdge(Map<?, ?> input) {
return input.containsKey(Direction.IN) && input.containsKey(Direction.OUT);
}
private QueriesNodeResult getQueriesNodeResult(Map<?, ?> map) {
return new QueriesNodeResult(map);
}
private QueriesEdgeResult getQueriesEdgeResult(Map<?, ?> map) {
return new QueriesEdgeResult(map);
}
}
}
| 4,289 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/SerializationConfig.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import org.apache.tinkerpop.gremlin.driver.Cluster;
import org.apache.tinkerpop.gremlin.driver.MessageSerializer;
import org.apache.tinkerpop.gremlin.driver.ser.GraphBinaryMessageSerializerV1;
import org.apache.tinkerpop.gremlin.driver.ser.GraphSONMessageSerializerV3d0;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
public class SerializationConfig {
private final String serializer;
private final int maxContentLength;
private final int batchSize;
private final boolean useJanusSerializer;
public SerializationConfig(String serializer, int maxContentLength, int batchSize, boolean useJanusSerializer) {
this.serializer = serializer;
this.maxContentLength = maxContentLength;
this.batchSize = batchSize;
this.useJanusSerializer = useJanusSerializer;
}
public Cluster.Builder apply(Cluster.Builder builder) {
Cluster.Builder b = builder.resultIterationBatchSize(batchSize)
.maxContentLength(maxContentLength);
if (useJanusSerializer) {
Map<String, Object> config = new HashMap<>();
config.put("ioRegistries", Collections.singletonList("org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry"));
MessageSerializer s = new GraphSONMessageSerializerV3d0();
s.configure(config, null);
return b.serializer(s);
} else {
return b.serializer(serializer);
}
}
}
| 4,290 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/ExportPGTaskHandler.java | package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.io.Status;
import com.amazonaws.services.neptune.propertygraph.GraphClient;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.LabelsFilter;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.FileSpecificLabelSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
class ExportPGTaskHandler<T extends PGResult> implements GraphElementHandler<T> {
private static final Logger logger = LoggerFactory.getLogger(ExportPGTaskHandler.class);
private final FileSpecificLabelSchemas fileSpecificLabelSchemas;
private final GraphElementSchemas graphElementSchemas;
private final PropertyGraphTargetConfig targetConfig;
private final WriterFactory<T> writerFactory;
private final LabelWriters<T> labelWriters;
private final GraphClient<T> graphClient;
private final Status status;
private final AtomicInteger index;
private final LabelsFilter labelsFilter;
ExportPGTaskHandler(FileSpecificLabelSchemas fileSpecificLabelSchemas,
GraphElementSchemas graphElementSchemas,
PropertyGraphTargetConfig targetConfig,
WriterFactory<T> writerFactory,
LabelWriters<T> labelWriters,
GraphClient<T> graphClient,
Status status,
AtomicInteger index,
LabelsFilter labelsFilter) {
this.fileSpecificLabelSchemas = fileSpecificLabelSchemas;
this.graphElementSchemas = graphElementSchemas;
this.targetConfig = targetConfig;
this.writerFactory = writerFactory;
this.labelWriters = labelWriters;
this.graphClient = graphClient;
this.status = status;
this.index = index;
this.labelsFilter = labelsFilter;
}
@Override
public void handle(T input, boolean allowTokens) throws IOException {
status.update();
Label label = labelsFilter.getLabelFor(input);
if (!labelWriters.containsKey(label)) {
createWriterFor(label);
}
if(graphClient != null) {
graphClient.updateStats(label);
}
labelWriters.get(label).handle(input, allowTokens);
}
@Override
public void close() {
try {
labelWriters.close();
} catch (Exception e) {
logger.warn("Error closing label writer: {}.", e.getMessage());
}
}
private void createWriterFor(Label label) {
try {
LabelSchema labelSchema = graphElementSchemas.getSchemaFor(label);
PropertyGraphPrinter propertyGraphPrinter = writerFactory.createPrinter(
Directories.fileName(label.fullyQualifiedLabel(), index),
labelSchema,
targetConfig);
LabelWriter<T> labelWriter = writerFactory.createLabelWriter(propertyGraphPrinter, labelSchema.label());
labelWriters.put(label, labelWriter);
fileSpecificLabelSchemas.add(labelWriter.outputId(), targetConfig.format(), labelSchema);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
| 4,291 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/EdgeWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.LabelsFilter;
import com.amazonaws.services.neptune.propertygraph.io.result.PGEdgeResult;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import java.io.IOException;
import java.util.*;
public class EdgeWriter implements LabelWriter<PGResult> {
private final PropertyGraphPrinter propertyGraphPrinter;
private final boolean hasFromAndToLabels;
public EdgeWriter(PropertyGraphPrinter propertyGraphPrinter, Label label) {
this.propertyGraphPrinter = propertyGraphPrinter;
this.hasFromAndToLabels = label.hasFromAndToLabels();
}
@Override
public void handle(PGResult edge, boolean allowTokens) throws IOException {
String from = edge.getFrom();
String to = edge.getTo();
Map<?, Object> properties = edge.getProperties();
String id = edge.getId();
String label = edge.getLabel().get(0);
propertyGraphPrinter.printStartRow();
if (hasFromAndToLabels){
List<String> fromLabels = edge.getFromLabels();
List<String> toLabels = edge.getToLabels();
// Temp fix for concatenated label issue
fromLabels = Label.fixLabelsIssue(fromLabels);
toLabels = Label.fixLabelsIssue(toLabels);
propertyGraphPrinter.printEdge(id, label, from, to, fromLabels, toLabels);
} else {
propertyGraphPrinter.printEdge(id, label, from, to);
}
propertyGraphPrinter.printProperties(id, "ep", properties);
propertyGraphPrinter.printEndRow();
}
@Override
public void close() throws Exception {
propertyGraphPrinter.close();
}
@Override
public String outputId() {
return propertyGraphPrinter.outputId();
}
}
| 4,292 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/PropertyGraphPrinter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.propertygraph.schema.PropertySchema;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
public interface PropertyGraphPrinter extends AutoCloseable {
String outputId();
void printHeaderMandatoryColumns(String... columns);
void printHeaderRemainingColumns(Collection<PropertySchema> remainingColumns);
void printProperties(Map<?, ?> properties) throws IOException;
void printProperties(Map<?, ?> properties, boolean applyFormatting) throws IOException;
void printProperties(String id, String streamOperation, Map<?, ?> properties) throws IOException;
void printEdge(String id, String label, String from, String to) throws IOException;
void printEdge(String id, String label, String from, String to, Collection<String> fromLabels, Collection<String> toLabels) throws IOException;
void printNode(String id, List<String> labels) throws IOException;
void printStartRow() throws IOException;
void printEndRow() throws IOException;
}
| 4,293 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/RewriteCsv.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.cluster.ConcurrencyConfig;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.schema.*;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVRecord;
import org.apache.commons.lang3.ArrayUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.Reader;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
public class RewriteCsv implements RewriteCommand {
private static final Logger logger = LoggerFactory.getLogger(RewriteCsv.class);
private final PropertyGraphTargetConfig targetConfig;
private final ConcurrencyConfig concurrencyConfig;
private final FeatureToggles featureToggles;
public RewriteCsv(PropertyGraphTargetConfig targetConfig,
ConcurrencyConfig concurrencyConfig,
FeatureToggles featureToggles) {
this.targetConfig = targetConfig;
this.concurrencyConfig = concurrencyConfig;
this.featureToggles = featureToggles;
}
@Override
public MasterLabelSchemas execute(MasterLabelSchemas masterLabelSchemas) throws Exception {
GraphElementType graphElementType = masterLabelSchemas.graphElementType();
System.err.println(String.format("Rewriting %s files...", graphElementType.name()));
return Timer.timedActivity(String.format("rewriting %s files", graphElementType.name()),
(CheckedActivity.Callable<MasterLabelSchemas>) () ->
rewriteFiles(masterLabelSchemas, graphElementType, targetConfig));
}
private MasterLabelSchemas rewriteFiles(MasterLabelSchemas masterLabelSchemas,
GraphElementType graphElementType,
PropertyGraphTargetConfig targetConfig) throws Exception {
Map<Label, MasterLabelSchema> updatedSchemas = new HashMap<>();
Collection<Future<MasterLabelSchema>> futures = new ArrayList<>();
ExecutorService taskExecutor = Executors.newFixedThreadPool(concurrencyConfig.concurrency());
for (MasterLabelSchema masterLabelSchema : masterLabelSchemas.schemas()) {
futures.add(taskExecutor.submit(() -> rewrite(targetConfig, graphElementType, masterLabelSchema)));
}
taskExecutor.shutdown();
try {
taskExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
for (Future<MasterLabelSchema> future : futures) {
if (future.isCancelled()) {
throw new IllegalStateException("Unable to complete rewrite because at least one task was cancelled");
}
if (!future.isDone()) {
throw new IllegalStateException("Unable to complete rewrite because at least one task has not completed");
}
MasterLabelSchema masterLabelSchema = future.get();
updatedSchemas.put(masterLabelSchema.labelSchema().label(), masterLabelSchema);
}
return new MasterLabelSchemas(updatedSchemas, graphElementType);
}
private MasterLabelSchema rewrite(PropertyGraphTargetConfig targetConfig,
GraphElementType graphElementType,
MasterLabelSchema masterLabelSchema) throws Exception {
LabelSchema originalLabelSchema = masterLabelSchema.labelSchema();
LabelSchema masterSchema = originalLabelSchema.createCopy();
masterSchema.initStats();
Collection<String> renamedFiles = new ArrayList<>();
for (FileSpecificLabelSchema fileSpecificLabelSchema : masterLabelSchema.fileSpecificLabelSchemas()) {
LabelSchema labelSchema = fileSpecificLabelSchema.labelSchema();
Label label = labelSchema.label();
File sourceCsvFile = new File(fileSpecificLabelSchema.outputId());
if (!sourceCsvFile.exists()) {
if (label.labels().size() > 1) {
logger.warn("Skipping multi-label file {} because it has already been rewritten under another label", sourceCsvFile);
continue;
}
}
String[] additionalElementHeaders = label.hasFromAndToLabels() ?
new String[]{"~fromLabels", "~toLabels"} :
new String[]{};
String[] filePropertyHeaders =
labelSchema.propertySchemas().stream()
.map(p -> p.property().toString())
.collect(Collectors.toList())
.toArray(new String[]{});
String[] fileHeaders = ArrayUtils.addAll(
graphElementType.tokenNames().toArray(new String[]{}),
ArrayUtils.addAll(additionalElementHeaders, filePropertyHeaders));
try (DeletableFile sourceFile = new DeletableFile(sourceCsvFile);
Reader in = sourceFile.reader();
PropertyGraphPrinter target = graphElementType.writerFactory().createPrinter(
targetConfig.format().replaceExtension(sourceCsvFile.getName(), "modified"),
masterSchema,
targetConfig.forFileConsolidation());
) {
if (featureToggles.containsFeature(FeatureToggle.Keep_Rewritten_Files)){
sourceFile.doNotDelete();
}
renamedFiles.add(target.outputId());
CSVFormat format = CSVFormat.RFC4180.withHeader(fileHeaders);
Iterable<CSVRecord> records = format.parse(in);
int recordCount = 0;
for (CSVRecord record : records) {
target.printStartRow();
if (graphElementType == GraphElementType.nodes) {
target.printNode(record.get("~id"), Arrays.asList(record.get("~label").split(";")));
} else {
if (label.hasFromAndToLabels()) {
target.printEdge(
record.get("~id"),
record.get("~label"),
record.get("~from"),
record.get("~to"),
Arrays.asList(record.get("~fromLabels").split(";")),
Arrays.asList(record.get("~toLabels").split(";")));
} else {
target.printEdge(record.get("~id"), record.get("~label"), record.get("~from"), record.get("~to"));
}
}
target.printProperties(record.toMap(), false);
target.printEndRow();
recordCount++;
}
logger.info("Original: {}, Rewritten: {}, RecordCount: {}", sourceFile, target.outputId(), recordCount);
}
}
return new MasterLabelSchema(
masterSchema,
renamedFiles.stream().map(f -> new FileSpecificLabelSchema(f, targetConfig.format(), masterSchema)).collect(Collectors.toList()));
}
}
| 4,294 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/EdgesWriterFactory.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.propertygraph.Label;
import com.amazonaws.services.neptune.propertygraph.io.result.PGEdgeResult;
import com.amazonaws.services.neptune.propertygraph.io.result.PGResult;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import java.io.IOException;
import java.util.Map;
public class EdgesWriterFactory implements WriterFactory<PGResult> {
@Override
public PropertyGraphPrinter createPrinter(String name, LabelSchema labelSchema, PropertyGraphTargetConfig targetConfig) throws IOException {
PropertyGraphPrinter propertyGraphPrinter = targetConfig.createPrinterForEdges(name, labelSchema);
if (labelSchema.label().hasFromAndToLabels()){
propertyGraphPrinter.printHeaderMandatoryColumns("id", "label", "from", "to", "fromLabels", "toLabels");
} else {
propertyGraphPrinter.printHeaderMandatoryColumns("id", "label", "from", "to");
}
propertyGraphPrinter.printHeaderRemainingColumns(labelSchema.propertySchemas());
return propertyGraphPrinter;
}
@Override
public LabelWriter<PGResult> createLabelWriter(PropertyGraphPrinter propertyGraphPrinter, Label label) {
return new EdgeWriter(propertyGraphPrinter, label);
}
}
| 4,295 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/QueryJob.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.io.Status;
import com.amazonaws.services.neptune.io.StatusOutputFormat;
import com.amazonaws.services.neptune.propertygraph.AllLabels;
import com.amazonaws.services.neptune.propertygraph.EdgeLabelStrategy;
import com.amazonaws.services.neptune.propertygraph.LabelsFilter;
import com.amazonaws.services.neptune.propertygraph.NamedQuery;
import com.amazonaws.services.neptune.cluster.ConcurrencyConfig;
import com.amazonaws.services.neptune.propertygraph.NeptuneGremlinClient;
import com.amazonaws.services.neptune.propertygraph.NodeLabelStrategy;
import com.amazonaws.services.neptune.propertygraph.schema.ExportSpecification;
import com.amazonaws.services.neptune.propertygraph.schema.FileSpecificLabelSchemas;
import com.amazonaws.services.neptune.propertygraph.schema.GraphElementType;
import com.amazonaws.services.neptune.propertygraph.schema.MasterLabelSchemas;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
public class QueryJob {
private final Queue<NamedQuery> queries;
private final NeptuneGremlinClient.QueryClient queryClient;
private final ConcurrencyConfig concurrencyConfig;
private final PropertyGraphTargetConfig targetConfig;
private final boolean twoPassAnalysis;
private final Long timeoutMillis;
private final Collection<ExportSpecification> exportSpecifications;
private final FeatureToggles featureToggles;
private final boolean structuredOutput;
public QueryJob(Collection<NamedQuery> queries,
NeptuneGremlinClient.QueryClient queryClient,
ConcurrencyConfig concurrencyConfig,
PropertyGraphTargetConfig targetConfig,
boolean twoPassAnalysis,
Long timeoutMillis,
Collection<ExportSpecification> exportSpecifications,
FeatureToggles featureToggles,
boolean structuredOutput){
this.queries = new ConcurrentLinkedQueue<>(queries);
this.queryClient = queryClient;
this.concurrencyConfig = concurrencyConfig;
this.targetConfig = targetConfig;
this.twoPassAnalysis = twoPassAnalysis;
this.timeoutMillis = timeoutMillis;
this.exportSpecifications = exportSpecifications;
this.featureToggles = featureToggles;
this.structuredOutput = structuredOutput;
}
public void execute() throws Exception {
Timer.timedActivity("exporting results from queries", (CheckedActivity.Runnable) this::export);
}
private void export() throws ExecutionException, InterruptedException {
System.err.println("Writing query results to " + targetConfig.output().name() + " as " + targetConfig.format().description());
Status status = new Status(StatusOutputFormat.Description, "query results");
ExecutorService taskExecutor = Executors.newFixedThreadPool(concurrencyConfig.concurrency());
Collection<Future<Map<GraphElementType, FileSpecificLabelSchemas>>> futures = new ArrayList<>();
Collection<FileSpecificLabelSchemas> nodesFileSpecificLabelSchemas = new ArrayList<>();
Collection<FileSpecificLabelSchemas> edgesFileSpecificLabelSchemas = new ArrayList<>();
LabelsFilter nodeLabelFilter = new AllLabels(NodeLabelStrategy.nodeLabelsOnly);
LabelsFilter edgeLabelFilter = new AllLabels(EdgeLabelStrategy.edgeLabelsOnly);
for(ExportSpecification exportSpecification : exportSpecifications) {
if (exportSpecification.getGraphElementType() == GraphElementType.nodes) {
nodeLabelFilter = exportSpecification.getLabelsFilter();
}
else {
edgeLabelFilter = exportSpecification.getLabelsFilter();
}
}
AtomicInteger fileIndex = new AtomicInteger();
for (int index = 1; index <= concurrencyConfig.concurrency(); index++) {
QueryTask queryTask = new QueryTask(
queries,
queryClient,
targetConfig,
twoPassAnalysis,
timeoutMillis,
status,
fileIndex,
structuredOutput,
nodeLabelFilter,
edgeLabelFilter);
futures.add(taskExecutor.submit(queryTask));
}
taskExecutor.shutdown();
try {
taskExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
for (Future<Map<GraphElementType, FileSpecificLabelSchemas>> future : futures) {
if (future.isCancelled()) {
throw new IllegalStateException("Unable to complete job because at least one task was cancelled");
}
if (!future.isDone()) {
throw new IllegalStateException("Unable to complete job because at least one task has not completed");
}
Map<GraphElementType, FileSpecificLabelSchemas> result = future.get();
nodesFileSpecificLabelSchemas.add(result.get(GraphElementType.nodes));
edgesFileSpecificLabelSchemas.add(result.get(GraphElementType.edges));
}
RewriteCommand rewriteCommand = targetConfig.createRewriteCommand(concurrencyConfig, featureToggles);
for(ExportSpecification exportSpecification : exportSpecifications) {
MasterLabelSchemas masterLabelSchemas = exportSpecification.createMasterLabelSchemas(
exportSpecification.getGraphElementType().equals(GraphElementType.nodes) ?
nodesFileSpecificLabelSchemas : edgesFileSpecificLabelSchemas
);
try {
rewriteCommand.execute(masterLabelSchemas);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
}
| 4,296 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/JsonPropertyGraphPrinter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.propertygraph.TokenPrefix;
import com.amazonaws.services.neptune.propertygraph.schema.DataType;
import com.amazonaws.services.neptune.propertygraph.schema.LabelSchema;
import com.amazonaws.services.neptune.propertygraph.schema.PropertySchema;
import com.fasterxml.jackson.core.JsonGenerator;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
public class JsonPropertyGraphPrinter implements PropertyGraphPrinter {
private final OutputWriter writer;
private final JsonGenerator generator;
private final LabelSchema labelSchema;
private final boolean allowUpdateSchema;
private final PrinterOptions printerOptions;
private boolean isNullable = false;
private final TokenPrefix tokenPrefix;
public JsonPropertyGraphPrinter(OutputWriter writer, JsonGenerator generator, LabelSchema labelSchema, PrinterOptions printerOptions) throws IOException {
this(writer, generator, labelSchema, printerOptions, false);
}
public JsonPropertyGraphPrinter(OutputWriter writer, JsonGenerator generator, LabelSchema labelSchema, PrinterOptions printerOptions, boolean allowUpdateSchema) throws IOException {
this.writer = writer;
this.generator = generator;
this.labelSchema = labelSchema;
this.allowUpdateSchema = allowUpdateSchema;
this.printerOptions = printerOptions;
this.tokenPrefix = printerOptions.json().tokenPrefix();
}
@Override
public String outputId() {
return writer.outputId();
}
@Override
public void printHeaderMandatoryColumns(String... columns) {
// Do nothing
}
@Override
public void printHeaderRemainingColumns(Collection<PropertySchema> remainingColumns) {
// Do nothing
}
@Override
public void printProperties(Map<?, ?> properties) throws IOException {
// print known properties
for (PropertySchema propertySchema : labelSchema.propertySchemas()) {
Object key = propertySchema.property();
Object value = properties.get(key);
if (properties.containsKey(key)) {
PropertySchema.PropertyValueMetadata propertyValueMetadata = propertySchema.accept(value, allowUpdateSchema);
labelSchema.recordObservation(propertySchema, value, propertyValueMetadata);
printProperty(value, propertySchema);
} else {
if (allowUpdateSchema) {
propertySchema.makeNullable();
}
}
}
// Print unknown properties
if (allowUpdateSchema) {
for (Map.Entry<?, ?> property : properties.entrySet()) {
Object key = property.getKey();
if (!labelSchema.containsProperty(key)) {
Object value = property.getValue();
PropertySchema propertySchema = new PropertySchema(key);
PropertySchema.PropertyValueMetadata propertyValueMetadata = propertySchema.accept(value, true);
if (isNullable) {
propertySchema.makeNullable();
}
labelSchema.put(key, propertySchema);
labelSchema.recordObservation(propertySchema, value, propertyValueMetadata);
printProperty(value, propertySchema);
}
}
}
isNullable = true;
}
private void printProperty(Object value, PropertySchema propertySchema) throws IOException {
DataType dataType = propertySchema.dataType();
String formattedKey = propertySchema.nameWithoutDataType();
boolean isMultiValue = propertySchema.isMultiValue();
printProperty(value, dataType, formattedKey, isMultiValue);
}
private void printProperty(Object value, DataType dataType, String formattedKey, boolean forceMultiValue) throws IOException {
if (forceMultiValue) {
List<?> values = isList(value) ? (List<?>) value : Collections.singletonList(value);
generator.writeFieldName(formattedKey);
generator.writeStartArray();
for (Object v : values) {
dataType.printTo(generator, v);
}
generator.writeEndArray();
} else {
if (isList(value)) {
List<?> values = (List<?>) value;
if (values.size() != 1 || printerOptions.json().strictCardinality()) {
generator.writeFieldName(formattedKey);
generator.writeStartArray();
for (Object v : values) {
dataType.printTo(generator, v);
}
generator.writeEndArray();
} else {
dataType.printTo(generator, formattedKey, values.get(0));
}
} else {
dataType.printTo(generator, formattedKey, value);
}
}
}
@Override
public void printProperties(Map<?, ?> properties, boolean applyFormatting) throws IOException {
printProperties(properties);
}
@Override
public void printProperties(String id, String streamOperation, Map<?, ?> properties) throws IOException {
printProperties(properties);
}
@Override
public void printEdge(String id, String label, String from, String to) throws IOException {
printEdge(id, label, from, to, null, null);
}
@Override
public void printEdge(String id, String label, String from, String to, Collection<String> fromLabels, Collection<String> toLabels) throws IOException {
generator.writeStringField( tokenPrefix.format("id"), id);
generator.writeStringField(tokenPrefix.format("label"), label);
generator.writeStringField(tokenPrefix.format("from"), from);
generator.writeStringField(tokenPrefix.format("to"), to);
if (fromLabels != null) {
printProperty(fromLabels, DataType.String, tokenPrefix.format("fromLabels"), true);
}
if (toLabels != null) {
printProperty(toLabels, DataType.String, tokenPrefix.format("toLabels"), true);
}
}
@Override
public void printNode(String id, List<String> labels) throws IOException {
generator.writeStringField(tokenPrefix.format("id"), id);
printProperty(labels, DataType.String, tokenPrefix.format("label"), true);
}
@Override
public void printStartRow() throws IOException {
writer.startCommit();
generator.writeStartObject();
}
@Override
public void printEndRow() throws IOException {
generator.writeEndObject();
generator.flush();
writer.endCommit();
}
@Override
public void close() throws Exception {
generator.close();
writer.close();
}
private boolean isList(Object value) {
return value instanceof List<?>;
}
}
| 4,297 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/QueryWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import java.io.IOException;
import java.util.Map;
public class QueryWriter implements LabelWriter<Map<?, ?>> {
private final PropertyGraphPrinter propertyGraphPrinter;
public QueryWriter(PropertyGraphPrinter propertyGraphPrinter) {
this.propertyGraphPrinter = propertyGraphPrinter;
}
@Override
public void handle(Map<?, ?> properties, boolean allowTokens) throws IOException {
propertyGraphPrinter.printStartRow();
propertyGraphPrinter.printProperties(properties);
propertyGraphPrinter.printEndRow();
}
@Override
public void close() throws Exception {
propertyGraphPrinter.close();
}
@Override
public String outputId() {
return propertyGraphPrinter.outputId();
}
} | 4,298 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/propertygraph/io/NeptuneStreamsSimpleJsonPropertyGraphPrinter.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.propertygraph.io;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.propertygraph.schema.DataType;
import com.amazonaws.services.neptune.propertygraph.schema.PropertySchema;
import com.fasterxml.jackson.core.JsonGenerator;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
public class NeptuneStreamsSimpleJsonPropertyGraphPrinter implements PropertyGraphPrinter {
// private static final AtomicLong COMMIT_NUM_GENERATOR = new AtomicLong(1);
private final OutputWriter writer;
private final JsonGenerator generator;
// private long commitNum = 1;
// private int opNum = 1;
public NeptuneStreamsSimpleJsonPropertyGraphPrinter(OutputWriter writer,
JsonGenerator generator) throws IOException {
this.writer = writer;
this.generator = generator;
}
@Override
public String outputId() {
return writer.outputId();
}
@Override
public void printHeaderMandatoryColumns(String... columns) {
// Do nothing
}
@Override
public void printHeaderRemainingColumns(Collection<PropertySchema> remainingColumns) {
// Do nothing
}
@Override
public void printProperties(Map<?, ?> properties) throws IOException {
throw new RuntimeException("Neptune Streams simple JSON is not supported for this command");
}
@Override
public void printProperties(Map<?, ?> properties, boolean applyFormatting) throws IOException {
printProperties(properties);
}
@Override
public void printProperties(String id, String streamOperation, Map<?, ?> properties) throws IOException {
for (Map.Entry<?, ?> entry : properties.entrySet()) {
String key = String.valueOf(entry.getKey());
Object value = entry.getValue();
if (isList(value)) {
List<?> values = (List<?>) value;
for (Object o : values) {
PropertySchema propertySchema = new PropertySchema(key);
propertySchema.accept(o, true);
printRecord(id, streamOperation, key, o, propertySchema.dataType());
}
} else {
PropertySchema propertySchema = new PropertySchema(key);
propertySchema.accept(value, true);
printRecord(id, streamOperation, key, value, propertySchema.dataType());
}
}
}
@Override
public void printEdge(String id, String label, String from, String to) throws IOException {
printEdge(id, label, from, to, null, null);
}
@Override
public void printEdge(String id, String label, String from, String to, Collection<String> fromLabels, Collection<String> toLabels) throws IOException {
printRecord(id, "e", "label", label, DataType.String, from, to);
}
@Override
public void printNode(String id, List<String> labels) throws IOException {
for (String l : labels) {
printRecord(id, "vl", "label", l, DataType.String);
}
}
@Override
public void printStartRow() throws IOException {
// commitNum = COMMIT_NUM_GENERATOR.getAndIncrement();
// opNum = 1;
writer.startCommit();
}
@Override
public void printEndRow() throws IOException {
generator.flush();
writer.endCommit();
}
@Override
public void close() throws Exception {
generator.close();
writer.close();
}
private void printRecord(String id, String streamOperation, String key, Object value, DataType dataType) throws IOException {
printRecord(id, streamOperation, key, value, dataType, null, null);
}
private void printRecord(String id, String streamOperation, String key, Object value, DataType dataType, String from, String to) throws IOException {
writer.startOp();
generator.writeStartObject();
//generator.writeNumberField("commitNum", commitNum);
//generator.writeNumberField("opNum", opNum++);
generator.writeStringField("id", id);
if (from != null) {
generator.writeStringField("from", from);
} else {
generator.writeStringField("from", "");
}
if (to != null) {
generator.writeStringField("to", to);
} else {
generator.writeStringField("to", "");
}
generator.writeStringField("type", streamOperation);
generator.writeStringField("key", key);
dataType.printAsStringTo(generator, "value", value);
generator.writeStringField("dataType", dataType.name());
generator.writeStringField("s", "");
generator.writeStringField("p", "");
generator.writeStringField("o", "");
generator.writeStringField("g", "");
generator.writeStringField("stmt", "");
//generator.writeStringField("op", "ADD");
generator.writeEndObject();
generator.writeRaw(writer.lineSeparator());
generator.flush();
writer.endOp();
}
private boolean isList(Object value) {
return value instanceof List<?>;
}
}
| 4,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.