index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/ParamConverter.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.export;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import java.util.Iterator;
public class ParamConverter {
private static final String REGEX = "([a-z])([A-Z]+)";
private static final String REPLACEMENT = "$1-$2";
public static String toCliArg(String v) {
return v.replaceAll(REGEX, REPLACEMENT).toLowerCase();
}
public static String singularize(String v) {
if (v.endsWith("ies")) {
return String.format("%sy", v.substring(0, v.length() - 3));
}
else if (v.endsWith("s")) {
return v.substring(0, v.length() - 1);
} else {
return v;
}
}
public static Args fromJson(String cmd, JsonNode json) {
Args args = new Args(cmd);
ObjectNode params = (ObjectNode) json;
Iterator<String> paramNamesIterator = params.fieldNames();
while (paramNamesIterator.hasNext()) {
String paramName = paramNamesIterator.next();
String argName = toCliArg(paramName);
JsonNode paramNode = params.get(paramName);
if (paramNode.isArray()) {
argName = singularize(argName);
ArrayNode arrayNode = (ArrayNode) paramNode;
for (JsonNode jsonNode : arrayNode) {
addArg(argName, jsonNode, args);
}
} else {
addArg(argName, paramNode, args);
}
}
return args;
}
private static void addArg(String argName, JsonNode argValue, Args args) {
String prefix = argName.startsWith("-") ? "" : "--";
argName = String.format("%s%s", prefix, argName);
if (argValue.isBoolean()) {
if (argValue.asBoolean()){
args.addFlag(argName);
}
} else if (argValue.isObject()) {
String value = String.format("'%s'", argValue.toPrettyString());
args.addOption(argName, value);
} else if (argValue.isTextual()){
String value = String.format("'%s'", argValue.textValue());
args.addOption(argName, value);
}
else {
String value = argValue.toString();
args.addOption(argName, value);
}
}
}
| 4,400 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/JobSize.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.export;
public enum JobSize {
small {
@Override
public int maxConcurrency() {
return 8;
}
},
medium {
@Override
public int maxConcurrency() {
return 32;
}
},
large {
@Override
public int maxConcurrency() {
return 64;
}
},
xlarge {
@Override
public int maxConcurrency() {
return 96;
}
};
public static JobSize parse(String value) {
try {
return JobSize.valueOf(value.toLowerCase());
} catch (IllegalArgumentException e) {
return small;
}
}
public abstract int maxConcurrency();
}
| 4,401 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/NeptuneExportRunner.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.export;
import com.amazonaws.services.neptune.NeptuneExportCli;
import com.amazonaws.services.neptune.NeptuneExportCommand;
import com.amazonaws.services.neptune.NeptuneExportEventHandlerHost;
import com.amazonaws.services.neptune.util.GitProperties;
import org.apache.commons.lang.StringUtils;
import static com.amazonaws.services.neptune.export.NeptuneExportService.MAX_FILE_DESCRIPTOR_COUNT;
public class NeptuneExportRunner {
private final String[] args;
private final NeptuneExportEventHandler eventHandler;
private final boolean isCliInvocation;
private final int maxFileDescriptorCount;
public NeptuneExportRunner(String[] args) {
this(args, NeptuneExportEventHandler.NULL_EVENT_HANDLER, true, MAX_FILE_DESCRIPTOR_COUNT);
}
public NeptuneExportRunner(String[] args, NeptuneExportEventHandler eventHandler,
boolean isCliInvocation, int maxFileDescriptorCount) {
this.args = args;
this.eventHandler = eventHandler;
this.isCliInvocation = isCliInvocation;
this.maxFileDescriptorCount = maxFileDescriptorCount;
}
public void run() {
System.err.println(String.format("neptune-export.jar: %s", GitProperties.fromResource()) );
Args argsCollection = new Args(this.args);
if (argsCollection.contains("--log-level")){
String logLevel = argsCollection.getFirstOptionValue("--log-level");
if (StringUtils.isNotEmpty(logLevel)){
System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", logLevel);
}
}
com.github.rvesse.airline.Cli<Runnable> cli = new com.github.rvesse.airline.Cli<>(NeptuneExportCli.class);
try {
Runnable cmd = cli.parse(this.args);
if (NeptuneExportEventHandlerHost.class.isAssignableFrom(cmd.getClass())) {
NeptuneExportEventHandlerHost eventHandlerHost = (NeptuneExportEventHandlerHost) cmd;
eventHandlerHost.setEventHandler(eventHandler);
}
if (NeptuneExportCommand.class.isAssignableFrom(cmd.getClass())){
((NeptuneExportCommand) cmd).setIsCliInvocation(isCliInvocation);
}
if (NeptuneExportCommand.class.isAssignableFrom(cmd.getClass())){
((NeptuneExportCommand) cmd).setMaxFileDescriptorCount(maxFileDescriptorCount);
}
cmd.run();
} catch (Exception e) {
System.err.println(e.getMessage());
System.err.println();
System.exit(-1);
}
}
}
| 4,402 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/NeptuneExportServiceEventHandler.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.export;
public interface NeptuneExportServiceEventHandler extends NeptuneExportEventHandler
{
void onBeforeExport(Args args, ExportToS3NeptuneExportEventHandler.S3UploadParams s3UploadParams);
}
| 4,403 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/CompletionFileWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.export;
import com.fasterxml.jackson.databind.node.ObjectNode;
public interface CompletionFileWriter {
void updateCompletionFile(ObjectNode completionFilePayload);
}
| 4,404 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/ExportToS3NeptuneExportEventHandler.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.export;
import com.amazonaws.AmazonClientException;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.event.ProgressEvent;
import com.amazonaws.event.ProgressListener;
import com.amazonaws.services.neptune.cluster.Cluster;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.propertygraph.ExportStats;
import com.amazonaws.services.neptune.propertygraph.schema.GraphSchema;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.S3ObjectInfo;
import com.amazonaws.services.neptune.util.Timer;
import com.amazonaws.services.neptune.util.TransferManagerWrapper;
import com.amazonaws.services.s3.Headers;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.ObjectTagging;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.SSEAlgorithm;
import com.amazonaws.services.s3.model.Tag;
import com.amazonaws.services.s3.transfer.*;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang.StringUtils;
import org.slf4j.LoggerFactory;
import java.io.*;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static com.amazonaws.services.neptune.export.NeptuneExportService.NEPTUNE_EXPORT_TAGS;
import static java.nio.charset.StandardCharsets.UTF_8;
public class ExportToS3NeptuneExportEventHandler implements NeptuneExportEventHandler {
public static class S3UploadParams {
private boolean createExportSubdirectory = true;
private boolean overwriteExisting = false;
public boolean createExportSubdirectory() {
return createExportSubdirectory;
}
public S3UploadParams setCreateExportSubdirectory(boolean createExportSubdirectory) {
this.createExportSubdirectory = createExportSubdirectory;
return this;
}
public boolean overwriteExisting() {
return overwriteExisting;
}
public S3UploadParams setOverwriteExisting(boolean overwriteExisting) {
this.overwriteExisting = overwriteExisting;
return this;
}
@Override
public String toString() {
return "{" +
"createExportSubdirectory=" + createExportSubdirectory +
", overwriteExisting=" + overwriteExisting +
'}';
}
}
public static ObjectTagging createObjectTags(Collection<String> profiles) {
List<Tag> tags = new ArrayList<>(NEPTUNE_EXPORT_TAGS);
if (!profiles.isEmpty()) {
String profilesTagValue = String.join(":", profiles);
tags.add(new Tag("neptune-export:profiles", profilesTagValue));
}
return new ObjectTagging(tags);
}
private static final org.slf4j.Logger logger = LoggerFactory.getLogger(ExportToS3NeptuneExportEventHandler.class);
private final String localOutputPath;
private final String outputS3Path;
private final String s3Region;
private final String completionFileS3Path;
private final ObjectNode completionFilePayload;
private final boolean uploadToS3OnError;
private final S3UploadParams s3UploadParams;
private final Collection<String> profiles;
private final Collection<CompletionFileWriter> completionFileWriters;
private final AtomicReference<S3ObjectInfo> result = new AtomicReference<>();
private static final Pattern STATUS_CODE_5XX_PATTERN = Pattern.compile("Status Code: (5\\d+)");
private final String sseKmsKeyId;
private final AWSCredentialsProvider s3CredentialsProvider;
public ExportToS3NeptuneExportEventHandler(String localOutputPath,
String outputS3Path,
String s3Region,
String completionFileS3Path,
ObjectNode completionFilePayload,
boolean uploadToS3OnError,
S3UploadParams s3UploadParams,
Collection<String> profiles,
Collection<CompletionFileWriter> completionFileWriters,
String sseKmsKeyId,
AWSCredentialsProvider s3CredentialsProvider) {
this.localOutputPath = localOutputPath;
this.outputS3Path = outputS3Path;
this.s3Region = s3Region;
this.completionFileS3Path = completionFileS3Path;
this.completionFilePayload = completionFilePayload;
this.uploadToS3OnError = uploadToS3OnError;
this.s3UploadParams = s3UploadParams;
this.profiles = profiles;
this.completionFileWriters = completionFileWriters;
this.sseKmsKeyId = sseKmsKeyId;
this.s3CredentialsProvider = s3CredentialsProvider;
}
@Override
public void onExportComplete(Directories directories, ExportStats stats, Cluster cluster) throws Exception {
onExportComplete(directories, stats, cluster, new GraphSchema());
}
@Override
public void onExportComplete(Directories directories, ExportStats stats, Cluster cluster, GraphSchema graphSchema) throws Exception {
try {
long size = Files.walk(directories.rootDirectory()).mapToLong(p -> p.toFile().length()).sum();
logger.info("Total size of exported files: {}", FileUtils.byteCountToDisplaySize(size));
} catch (Exception e) {
// Ignore
}
if (StringUtils.isEmpty(outputS3Path)) {
return;
}
logger.info("S3 upload params: {}", s3UploadParams);
try (TransferManagerWrapper transferManager = new TransferManagerWrapper(s3Region, s3CredentialsProvider)) {
File outputDirectory = directories.rootDirectory().toFile();
S3ObjectInfo outputS3ObjectInfo = calculateOutputS3Path(outputDirectory);
Timer.timedActivity("uploading files to S3", (CheckedActivity.Runnable) () -> {
deleteS3Directories(directories, outputS3ObjectInfo);
uploadExportFilesToS3(transferManager.get(), outputDirectory, outputS3ObjectInfo);
uploadCompletionFileToS3(transferManager.get(), outputDirectory, outputS3ObjectInfo, stats, graphSchema);
});
result.set(outputS3ObjectInfo);
}
}
public S3ObjectInfo result() {
return result.get();
}
@Override
public void onError() {
if (!uploadToS3OnError) {
return;
}
logger.warn("Uploading results of failed export to S3");
if (StringUtils.isEmpty(outputS3Path)) {
logger.warn("S3 output path is empty");
return;
}
try {
Path outputPath = Paths.get(localOutputPath);
long size = Files.walk(outputPath).mapToLong(p -> p.toFile().length()).sum();
logger.warn("Total size of failed export files: {}", FileUtils.byteCountToDisplaySize(size));
try (TransferManagerWrapper transferManager = new TransferManagerWrapper(s3Region, s3CredentialsProvider)) {
String s3Suffix = UUID.randomUUID().toString().replace("-", "");
File outputDirectory = outputPath.toFile();
S3ObjectInfo outputS3ObjectInfo = calculateOutputS3Path(outputDirectory)
.replaceOrAppendKey("/tmp", "/failed")
.withNewKeySuffix(s3Suffix);
Timer.timedActivity("uploading failed export files to S3", (CheckedActivity.Runnable) () -> {
uploadExportFilesToS3(transferManager.get(), outputDirectory, outputS3ObjectInfo);
uploadGcLogToS3(transferManager.get(), outputDirectory, outputS3ObjectInfo);
});
logger.warn("Failed export S3 location: {}", outputS3ObjectInfo.toString());
}
} catch (Exception e) {
logger.error("Failed to upload failed export files to S3", e);
}
}
private void uploadGcLogToS3(TransferManager transferManager,
File directory,
S3ObjectInfo outputS3ObjectInfo) throws IOException {
File gcLog = new File(directory, "./../gc.log");
if (!gcLog.exists()) {
logger.warn("Ignoring request to upload GC log to S3 because GC log does not exist");
return;
}
S3ObjectInfo gcLogS3ObjectInfo = outputS3ObjectInfo.withNewKeySuffix("gc.log");
try (InputStream inputStream = new FileInputStream(gcLog)) {
PutObjectRequest putObjectRequest = new PutObjectRequest(gcLogS3ObjectInfo.bucket(),
gcLogS3ObjectInfo.key(),
inputStream,
S3ObjectInfo.createObjectMetadata(gcLog.length(), sseKmsKeyId)).withTagging(createObjectTags(profiles));
Upload upload = transferManager.upload(putObjectRequest);
upload.waitForUploadResult();
} catch (InterruptedException e) {
logger.warn(e.getMessage());
Thread.currentThread().interrupt();
}
}
private S3ObjectInfo calculateOutputS3Path(File outputDirectory) {
S3ObjectInfo outputBaseS3ObjectInfo = new S3ObjectInfo(outputS3Path);
if (s3UploadParams.createExportSubdirectory()) {
return outputBaseS3ObjectInfo.withNewKeySuffix(outputDirectory.getName());
} else {
return outputBaseS3ObjectInfo;
}
}
private void uploadCompletionFileToS3(TransferManager transferManager,
File directory,
S3ObjectInfo outputS3ObjectInfo,
ExportStats stats,
GraphSchema graphSchema) throws IOException {
if (StringUtils.isEmpty(completionFileS3Path)) {
return;
}
if (directory == null || !directory.exists()) {
logger.warn("Ignoring request to upload completion file to S3 because directory from which to upload files does not exist");
return;
}
String completionFilename = s3UploadParams.createExportSubdirectory() ?
directory.getName() :
String.valueOf(System.currentTimeMillis());
File completionFile = new File(localOutputPath, completionFilename + ".json");
ObjectNode neptuneExportNode = JsonNodeFactory.instance.objectNode();
completionFilePayload.set("neptuneExport", neptuneExportNode);
neptuneExportNode.put("outputS3Path", outputS3ObjectInfo.toString());
stats.addTo(neptuneExportNode, graphSchema);
for (CompletionFileWriter completionFileWriter : completionFileWriters) {
completionFileWriter.updateCompletionFile(completionFilePayload);
}
try (Writer writer = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(completionFile), UTF_8))) {
ObjectWriter objectWriter = new ObjectMapper().writer().withDefaultPrettyPrinter();
writer.write(objectWriter.writeValueAsString(completionFilePayload));
}
S3ObjectInfo completionFileS3ObjectInfo =
new S3ObjectInfo(completionFileS3Path).replaceOrAppendKey(
"_COMPLETION_ID_",
FilenameUtils.getBaseName(completionFile.getName()),
completionFile.getName());
logger.info("Uploading completion file to {}", completionFileS3ObjectInfo.key());
try (InputStream inputStream = new FileInputStream(completionFile)) {
PutObjectRequest putObjectRequest = new PutObjectRequest(completionFileS3ObjectInfo.bucket(),
completionFileS3ObjectInfo.key(),
inputStream,
S3ObjectInfo.createObjectMetadata(completionFile.length(), sseKmsKeyId))
.withTagging(createObjectTags(profiles));
Upload upload = transferManager.upload(putObjectRequest);
upload.waitForUploadResult();
} catch (InterruptedException e) {
logger.warn(e.getMessage());
Thread.currentThread().interrupt();
}
}
private void uploadExportFilesToS3(TransferManager transferManager, File directory, S3ObjectInfo outputS3ObjectInfo) {
if (directory == null || !directory.exists()) {
logger.warn("Ignoring request to upload files to S3 because upload directory from which to upload files does not exist");
return;
}
boolean allowRetry = true;
int retryCount = 0;
while (allowRetry){
try {
ObjectMetadataProvider metadataProvider = (file, objectMetadata) -> {
S3ObjectInfo.createObjectMetadata(file.length(), sseKmsKeyId, objectMetadata);
};
ObjectTaggingProvider taggingProvider = uploadContext -> createObjectTags(profiles);
logger.info("Uploading export files to {}", outputS3ObjectInfo.toString());
MultipleFileUpload upload = transferManager.uploadDirectory(
outputS3ObjectInfo.bucket(),
outputS3ObjectInfo.key(),
directory,
true,
metadataProvider,
taggingProvider);
AmazonClientException amazonClientException = upload.waitForException();
if (amazonClientException != null){
String errorMessage = amazonClientException.getMessage();
Matcher exMsgStatusCodeMatcher = STATUS_CODE_5XX_PATTERN.matcher(errorMessage);
logger.error("Upload to S3 failed: {}", errorMessage);
// only retry if exception is retryable, the status code is 5xx, and we have retry counts left
if (amazonClientException.isRetryable() && exMsgStatusCodeMatcher.find() && retryCount <= 2) {
retryCount++;
logger.info("Retrying upload to S3 [RetryCount: {}]", retryCount);
} else {
allowRetry = false;
logger.warn("Cancelling upload to S3 [RetryCount: {}]", retryCount);
throw new RuntimeException(String.format("Upload to S3 failed [Directory: %s, S3 location: %s, Reason: %s, RetryCount: %s]", directory, outputS3ObjectInfo, errorMessage, retryCount));
}
} else {
allowRetry = false;
}
} catch (InterruptedException e) {
logger.warn(e.getMessage());
Thread.currentThread().interrupt();
}
}
}
private void deleteS3Directories(Directories directories, S3ObjectInfo outputS3ObjectInfo) {
if (!s3UploadParams.overwriteExisting()) {
return;
}
List<S3ObjectInfo> leafS3Directories = new ArrayList<>();
Path rootDirectory = directories.rootDirectory();
for (Path subdirectory : directories.subdirectories()) {
String newKey = rootDirectory.relativize(subdirectory).toString();
leafS3Directories.add(outputS3ObjectInfo.withNewKeySuffix(newKey));
}
}
}
| 4,405 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/Args.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.export;
import org.codehaus.plexus.util.cli.CommandLineUtils;
import java.util.*;
public class Args {
private final List<String> args = new ArrayList<>();
public Args(String[] args) {
this.args.addAll(Arrays.asList(args));
}
public Args(String cmd) {
String[] values;
try {
values = CommandLineUtils.translateCommandline(cmd);
} catch (Exception e) {
throw new RuntimeException(e);
}
args.addAll(Arrays.asList(values));
}
public void removeOptions(String... options) {
for (String option : options) {
int index = args.indexOf(option);
while (index >= 0) {
args.remove(index + 1);
args.remove(index);
index = args.indexOf(option);
}
}
}
public void removeFlags(String... flags) {
for (String flag : flags) {
int index = args.indexOf(flag);
while (index >= 0) {
args.remove(index);
index = args.indexOf(flag);
}
}
}
public void addOption(String option, String value) {
args.add(option);
args.add(value);
}
public boolean contains(String name) {
for (String arg : args) {
if (arg.equals(name)) {
return true;
}
}
return false;
}
public boolean contains(String name, String value) {
Iterator<String> iterator = args.iterator();
while (iterator.hasNext()) {
String arg = iterator.next();
if (arg.equals(name)) {
if (iterator.hasNext() && iterator.next().equals(value)) {
return true;
}
}
}
return false;
}
public String[] values() {
return args.toArray(new String[]{});
}
@Override
public String toString() {
return String.join(" ", args);
}
public void addFlag(String flag) {
args.add(flag);
}
public void replace(String original, String replacement) {
args.replaceAll(s -> {
if (s.equals(original)) {
return replacement;
} else {
return s;
}
});
}
public boolean containsAny(String... values) {
for (String value : values) {
if (args.contains(value)) {
return true;
}
}
return false;
}
public String getFirstOptionValue(String name) {
Iterator<String> iterator = args.iterator();
while (iterator.hasNext()) {
String arg = iterator.next();
if (arg.equals(name)) {
if (iterator.hasNext()) {
return iterator.next();
}
}
}
return null;
}
public Collection<String> getOptionValues(String name) {
Collection<String> values = new ArrayList<>();
Iterator<String> iterator = args.iterator();
while (iterator.hasNext()) {
String arg = iterator.next();
if (arg.equals(name)) {
if (iterator.hasNext()) {
values.add(iterator.next());
}
}
}
return values;
}
}
| 4,406 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/FeatureToggle.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.export;
public enum FeatureToggle {
FilterByPropertyKeys,
ExportByIndividualLabels,
NeptuneML_V2,
Edge_Features,
Inject_Fault,
Simulate_Cloned_Cluster,
Keep_Rewritten_Files,
Infer_RDF_Prefixes,
}
| 4,407 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/export/NeptuneExportLambda.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.export;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.services.neptune.util.AWSCredentialsUtil;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang.StringUtils;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.RequestStreamHandler;
import com.amazonaws.services.neptune.util.EnvironmentVariableUtils;
import com.amazonaws.services.neptune.util.S3ObjectInfo;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import static com.amazonaws.services.neptune.RunNeptuneExportSvc.DEFAULT_MAX_FILE_DESCRIPTOR_COUNT;
import static java.nio.charset.StandardCharsets.UTF_8;
public class NeptuneExportLambda implements RequestStreamHandler {
public static final String TEMP_PATH = "/tmp/neptune";
private final String localOutputPath;
private final boolean cleanOutputPath;
private final int maxFileDescriptorCount;
public NeptuneExportLambda() {
this(TEMP_PATH, true, DEFAULT_MAX_FILE_DESCRIPTOR_COUNT);
}
public NeptuneExportLambda(String localOutputPath, boolean cleanOutputPath, int maxFileDescriptorCount) {
this.localOutputPath = localOutputPath;
this.cleanOutputPath = cleanOutputPath;
this.maxFileDescriptorCount = maxFileDescriptorCount;
}
@Override
public void handleRequest(InputStream inputStream, OutputStream outputStream, Context context) throws IOException {
Logger logger = s -> context.getLogger().log(s);
ObjectMapper objectMapper = new ObjectMapper();
JsonNode json = objectMapper.readTree(IOUtils.toString(inputStream, UTF_8.name()));
String cmd = json.has("command") ?
json.path("command").textValue() :
EnvironmentVariableUtils.getOptionalEnv("COMMAND", "export-pg");
ObjectNode params = json.has("params") ?
(ObjectNode) json.get("params") :
objectMapper.readTree("{}").deepCopy();
String outputS3Path = json.has("outputS3Path") ?
json.path("outputS3Path").textValue() :
EnvironmentVariableUtils.getOptionalEnv("OUTPUT_S3_PATH", "");
String sseKmsKeyId = json.has("sseKmsKeyId") ?
json.path("sseKmsKeyId").textValue() :
EnvironmentVariableUtils.getOptionalEnv("SSE_KMS_KEY_ID", "");
boolean createExportSubdirectory = Boolean.parseBoolean(
json.has("createExportSubdirectory") ?
json.path("createExportSubdirectory").toString() :
EnvironmentVariableUtils.getOptionalEnv("CREATE_EXPORT_SUBDIRECTORY", "true"));
boolean overwriteExisting = Boolean.parseBoolean(
json.has("overwriteExisting") ?
json.path("overwriteExisting").toString() :
EnvironmentVariableUtils.getOptionalEnv("OVERWRITE_EXISTING", "false"));
boolean uploadToS3OnError = Boolean.parseBoolean(
json.has("uploadToS3OnError") ?
json.path("uploadToS3OnError").toString() :
EnvironmentVariableUtils.getOptionalEnv("UPLOAD_TO_S3_ON_ERROR", "true"));
String configFileS3Path = json.has("configFileS3Path") ?
json.path("configFileS3Path").textValue() :
EnvironmentVariableUtils.getOptionalEnv("CONFIG_FILE_S3_PATH", "");
String queriesFileS3Path = json.has("queriesFileS3Path") ?
json.path("queriesFileS3Path").textValue() :
EnvironmentVariableUtils.getOptionalEnv("QUERIES_FILE_S3_PATH", "");
String completionFileS3Path = json.has("completionFileS3Path") ?
json.path("completionFileS3Path").textValue() :
EnvironmentVariableUtils.getOptionalEnv("COMPLETION_FILE_S3_PATH", "");
String s3Region = json.has("s3Region") ?
json.path("s3Region").textValue() :
EnvironmentVariableUtils.getOptionalEnv("S3_REGION",
EnvironmentVariableUtils.getOptionalEnv("AWS_REGION", ""));
ObjectNode completionFilePayload = json.has("completionFilePayload") ?
json.path("completionFilePayload").deepCopy() :
objectMapper.readTree(
EnvironmentVariableUtils.getOptionalEnv(
"COMPLETION_FILE_PAYLOAD",
"{}")).
deepCopy();
ObjectNode additionalParams = json.has("additionalParams") ?
json.path("additionalParams").deepCopy() :
objectMapper.readTree("{}").deepCopy();
int maxConcurrency = json.has("jobSize") ?
JobSize.parse(json.path("jobSize").textValue()).maxConcurrency() :
-1;
// We are masking 3/4 of the KMS Key ID as it is potentially sensitive information.
String maskedKeyId = StringUtils.isBlank(sseKmsKeyId) ?
sseKmsKeyId :
sseKmsKeyId.substring(0, sseKmsKeyId.length()/4) +
sseKmsKeyId.substring(sseKmsKeyId.length()/4).replaceAll("\\w","*");
AWSCredentialsProvider s3CredentialsProvider = getS3CredentialsProvider(json, params, s3Region);
logger.log("cmd : " + cmd);
logger.log("params : " + params.toPrettyString());
logger.log("outputS3Path : " + outputS3Path);
logger.log("createExportSubdirectory : " + createExportSubdirectory);
logger.log("overwriteExisting : " + overwriteExisting);
logger.log("uploadToS3OnError : " + uploadToS3OnError);
logger.log("configFileS3Path : " + configFileS3Path);
logger.log("queriesFileS3Path : " + queriesFileS3Path);
logger.log("completionFileS3Path : " + completionFileS3Path);
logger.log("s3Region : " + s3Region);
logger.log("sseKmsKeyId : " + maskedKeyId);
logger.log("completionFilePayload : " + completionFilePayload.toPrettyString());
logger.log("additionalParams : " + additionalParams.toPrettyString());
logger.log("maxFileDescriptorCount : " + maxFileDescriptorCount);
if (!cmd.contains(" ") && !params.isEmpty()) {
cmd = ParamConverter.fromJson(cmd, params).toString();
}
logger.log("revised cmd : " + cmd);
NeptuneExportService neptuneExportService = new NeptuneExportService(
cmd,
localOutputPath,
cleanOutputPath,
outputS3Path,
createExportSubdirectory,
overwriteExisting,
uploadToS3OnError,
configFileS3Path,
queriesFileS3Path,
completionFileS3Path,
completionFilePayload,
additionalParams,
maxConcurrency,
s3Region,
maxFileDescriptorCount,
sseKmsKeyId,
s3CredentialsProvider);
S3ObjectInfo outputS3ObjectInfo = neptuneExportService.execute();
if (StringUtils.isEmpty(outputS3Path)) {
return;
}
if (outputS3ObjectInfo != null) {
try (Writer writer = new BufferedWriter(new OutputStreamWriter(outputStream, UTF_8))) {
writer.write(outputS3ObjectInfo.toString());
}
} else {
System.exit(-1);
}
}
private AWSCredentialsProvider getS3CredentialsProvider(JsonNode json, ObjectNode params, String region) {
String s3RoleArn = json.has("s3RoleArn") ?
json.path("s3RoleArn").textValue() :
EnvironmentVariableUtils.getOptionalEnv("S3_ROLE_ARN", "");
String s3RoleSessionName = json.has("s3RoleSessionName") ?
json.path("s3RoleSessionName").textValue() :
EnvironmentVariableUtils.getOptionalEnv("S3_ROLE_SESSION_NAME", "Neptune-Export");
String s3RoleExternalId = json.has("s3RoleExternalId") ?
json.path("s3RoleExternalId").textValue() :
EnvironmentVariableUtils.getOptionalEnv("S3_ROLE_EXTERNAL_ID", "");
String credentialsProfile = params.has("credentials-profile") ?
params.path("credentials-profile").textValue() :
EnvironmentVariableUtils.getOptionalEnv("CREDENTIALS_PROFILE", "");
String credentialsConfigFilePath = params.has("credentials-config-file") ?
params.path("credentials-config-file").textValue() :
EnvironmentVariableUtils.getOptionalEnv("CREDENTIALS_CONFIG_FILE", "");
AWSCredentialsProvider sourceCredentialsProvider = AWSCredentialsUtil.getProfileCredentialsProvider(credentialsProfile, credentialsConfigFilePath);
if (StringUtils.isEmpty(s3RoleArn)) {
return sourceCredentialsProvider;
}
return AWSCredentialsUtil.getSTSAssumeRoleCredentialsProvider(s3RoleArn, s3RoleSessionName, s3RoleExternalId, sourceCredentialsProvider, region);
}
}
| 4,408 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/TupleQueryHandler.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf;
import org.apache.commons.lang.StringUtils;
import org.eclipse.rdf4j.model.*;
import org.eclipse.rdf4j.query.BindingSet;
import org.eclipse.rdf4j.query.QueryResultHandlerException;
import org.eclipse.rdf4j.query.TupleQueryResultHandler;
import org.eclipse.rdf4j.query.TupleQueryResultHandlerException;
import org.eclipse.rdf4j.repository.RepositoryConnection;
import org.eclipse.rdf4j.rio.RDFWriter;
import java.util.List;
class TupleQueryHandler implements TupleQueryResultHandler {
private final RDFWriter writer;
private final ValueFactory factory;
public TupleQueryHandler(RDFWriter writer, ValueFactory factory) {
this.writer = writer;
this.factory = factory;
}
@Override
public void handleBoolean(boolean value) throws QueryResultHandlerException {
}
@Override
public void handleLinks(List<String> linkUrls) throws QueryResultHandlerException {
}
@Override
public void startQueryResult(List<String> bindingNames) throws TupleQueryResultHandlerException {
writer.startRDF();
}
@Override
public void endQueryResult() throws TupleQueryResultHandlerException {
writer.endRDF();
}
@Override
public void handleSolution(BindingSet bindingSet) throws TupleQueryResultHandlerException {
Value s = bindingSet.getValue("s");
Value p = bindingSet.getValue("p");
Value o = bindingSet.getValue("o");
Value g = bindingSet.getValue("g");
if (s == null || p == null || o == null || g == null){
throw new IllegalArgumentException("SPARQL query must return results with s, p, o and g values. For example: SELECT * FROM NAMED <http://aws.amazon.com/neptune/vocab/v01/DefaultNamedGraph> WHERE { GRAPH ?g {?s a <http://kelvinlawrence.net/air-routes/class/Airport>. ?s ?p ?o}} LIMIT 10");
}
Resource subject = s.isIRI() ? factory.createIRI(s.stringValue()) : factory.createBNode(s.stringValue());
IRI predicate = factory.createIRI(p.stringValue());
IRI graph = getNonDefaultNamedGraph(g, factory);
Statement statement = factory.createStatement(subject, predicate, o, graph);
writer.handleStatement(statement);
}
private IRI getNonDefaultNamedGraph(Value g, ValueFactory factory) {
String s = g.stringValue();
if (StringUtils.isEmpty(s) || s.equalsIgnoreCase("http://aws.amazon.com/neptune/vocab/v01/DefaultNamedGraph")) {
return null;
}
return factory.createIRI(s);
}
}
| 4,409 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/ExportRdfEdgesJob.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf;
import com.amazonaws.services.neptune.rdf.io.RdfTargetConfig;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
public class ExportRdfEdgesJob implements ExportRdfJob {
private final NeptuneSparqlClient client;
private final RdfTargetConfig targetConfig;
public ExportRdfEdgesJob(NeptuneSparqlClient client, RdfTargetConfig targetConfig) {
this.client = client;
this.targetConfig = targetConfig;
}
@Override
public void execute() throws Exception {
Timer.timedActivity("exporting RDF edges as " + targetConfig.format().description(),
(CheckedActivity.Runnable) () -> {
System.err.println("Creating edge statement files");
client.executeGraphQuery("CONSTRUCT {\n" +
" ?s ?p ?o \n" +
"}\n" +
"WHERE {\n" +
" ?s ?p ?o . \n" +
" FILTER(!isLiteral(?o))\n" +
"}", targetConfig);
});
}
}
| 4,410 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/NeptuneSparqlClient.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.neptune.auth.NeptuneSigV4SignerException;
import com.amazonaws.services.neptune.cluster.ConnectionConfig;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.rdf.io.NeptuneExportSparqlRepository;
import com.amazonaws.services.neptune.rdf.io.RdfTargetConfig;
import com.amazonaws.services.neptune.util.EnvironmentVariableUtils;
import org.apache.http.client.HttpClient;
import org.eclipse.rdf4j.http.client.HttpClientSessionManager;
import org.eclipse.rdf4j.http.client.RDF4JProtocolSession;
import org.eclipse.rdf4j.http.client.SPARQLProtocolSession;
import org.eclipse.rdf4j.model.ValueFactory;
import org.eclipse.rdf4j.query.resultio.TupleQueryResultFormat;
import org.eclipse.rdf4j.repository.RepositoryConnection;
import org.eclipse.rdf4j.repository.base.AbstractRepository;
import org.eclipse.rdf4j.repository.sparql.SPARQLRepository;
import org.eclipse.rdf4j.rio.ParserConfig;
import org.eclipse.rdf4j.rio.RDFFormat;
import org.eclipse.rdf4j.rio.RDFWriter;
import org.eclipse.rdf4j.rio.helpers.BasicParserSettings;
import org.joda.time.DateTime;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import java.util.stream.Collectors;
public class NeptuneSparqlClient implements AutoCloseable {
private static final ParserConfig PARSER_CONFIG = new ParserConfig().addNonFatalError(BasicParserSettings.VERIFY_URI_SYNTAX);
public static NeptuneSparqlClient create(ConnectionConfig config, FeatureToggles featureToggles) {
String serviceRegion = config.useIamAuth() ? EnvironmentVariableUtils.getMandatoryEnv("SERVICE_REGION") : null;
AWSCredentialsProvider credentialsProvider = config.useIamAuth() ? config.getCredentialsProvider() : null;
return new NeptuneSparqlClient(
config.endpoints().stream()
.map(e -> {
try {
return updateParser(new NeptuneExportSparqlRepository(
sparqlEndpoint(e, config.port()),
credentialsProvider,
serviceRegion,
config));
} catch (NeptuneSigV4SignerException e1) {
throw new RuntimeException(e1);
}
}
)
.peek(AbstractRepository::init)
.collect(Collectors.toList()),
featureToggles);
}
private static SPARQLRepository updateParser(SPARQLRepository repository) {
HttpClientSessionManager sessionManager = repository.getHttpClientSessionManager();
repository.setHttpClientSessionManager(new HttpClientSessionManager() {
@Override
public HttpClient getHttpClient() {
return sessionManager.getHttpClient();
}
@Override
public SPARQLProtocolSession createSPARQLProtocolSession(String s, String s1) {
SPARQLProtocolSession session = sessionManager.createSPARQLProtocolSession(s, s1);
session.setParserConfig(PARSER_CONFIG);
session.setPreferredTupleQueryResultFormat(TupleQueryResultFormat.JSON);
return session;
}
@Override
public RDF4JProtocolSession createRDF4JProtocolSession(String s) {
return sessionManager.createRDF4JProtocolSession(s);
}
@Override
public void shutDown() {
sessionManager.shutDown();
}
});
return repository;
}
private static String sparqlEndpoint(String endpoint, int port) {
return String.format("https://%s:%s", endpoint, port);
}
private final List<SPARQLRepository> repositories;
private final Random random = new Random(DateTime.now().getMillis());
private final FeatureToggles featureToggles;
private NeptuneSparqlClient(List<SPARQLRepository> repositories, FeatureToggles featureToggles) {
this.repositories = repositories;
this.featureToggles = featureToggles;
}
public void executeTupleQuery(String sparql, RdfTargetConfig targetConfig) throws IOException {
SPARQLRepository repository = chooseRepository();
ValueFactory factory = repository.getValueFactory();
try (RepositoryConnection connection = repository.getConnection();
OutputWriter outputWriter = targetConfig.createOutputWriter()) {
RDFWriter writer = targetConfig.createRDFWriter(outputWriter, featureToggles);
connection.prepareTupleQuery(sparql).evaluate(new TupleQueryHandler(writer, factory));
} catch (Exception e) {
if (repository instanceof NeptuneExportSparqlRepository) {
throw new RuntimeException(((NeptuneExportSparqlRepository) repository).getErrorMessageFromTrailers(), e);
}
else {
throw new RuntimeException(e);
}
}
}
public void executeGraphQuery(String sparql, RdfTargetConfig targetConfig) throws IOException {
SPARQLRepository repository = chooseRepository();
try (RepositoryConnection connection = repository.getConnection();
OutputWriter outputWriter = targetConfig.createOutputWriter()) {
RDFWriter writer = targetConfig.createRDFWriter(outputWriter, featureToggles);
connection.prepareGraphQuery(sparql).evaluate(new GraphQueryHandler(writer));
} catch (Exception e) {
if (repository instanceof NeptuneExportSparqlRepository) {
throw new RuntimeException(((NeptuneExportSparqlRepository) repository).getErrorMessageFromTrailers(), e);
}
else {
throw new RuntimeException(e);
}
}
}
private SPARQLRepository chooseRepository() {
return repositories.get(random.nextInt(repositories.size()));
}
@Override
public void close() {
repositories.forEach(AbstractRepository::shutDown);
}
}
| 4,411 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/GraphQueryHandler.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf;
import org.eclipse.rdf4j.model.Statement;
import org.eclipse.rdf4j.repository.RepositoryConnection;
import org.eclipse.rdf4j.rio.RDFHandler;
import org.eclipse.rdf4j.rio.RDFHandlerException;
import org.eclipse.rdf4j.rio.RDFWriter;
class GraphQueryHandler implements RDFHandler {
private final RDFWriter writer;
public GraphQueryHandler(RDFWriter writer) {
this.writer = writer;
}
@Override
public void startRDF() throws RDFHandlerException {
writer.startRDF();
}
@Override
public void endRDF() throws RDFHandlerException {
writer.endRDF();
}
@Override
public void handleNamespace(String s, String s1) throws RDFHandlerException {
}
@Override
public void handleStatement(Statement statement) throws RDFHandlerException {
writer.handleStatement(statement);
}
@Override
public void handleComment(String s) throws RDFHandlerException {
}
}
| 4,412 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/ExportRdfJob.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf;
public interface ExportRdfJob {
void execute() throws Exception;
}
| 4,413 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/ExportRdfFromQuery.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf;
import com.amazonaws.services.neptune.rdf.io.RdfTargetConfig;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
public class ExportRdfFromQuery implements ExportRdfJob {
private final NeptuneSparqlClient client;
private final RdfTargetConfig targetConfig;
private final String query;
public ExportRdfFromQuery(NeptuneSparqlClient client, RdfTargetConfig targetConfig, String query) {
this.client = client;
this.targetConfig = targetConfig;
this.query = query;
}
@Override
public void execute() throws Exception {
Timer.timedActivity("exporting RDF from query as " + targetConfig.format().description(),
(CheckedActivity.Runnable) () -> {
System.err.println("Creating edge statement files");
if (query.contains("CONSTRUCT ") || query.contains("DESCRIBE ")) {
client.executeGraphQuery(query, targetConfig);
} else {
client.executeTupleQuery(query, targetConfig);
}
});
}
}
| 4,414 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/ExportRdfGraphJob.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf;
import com.amazonaws.services.neptune.rdf.io.RdfTargetConfig;
import com.amazonaws.services.neptune.util.CheckedActivity;
import com.amazonaws.services.neptune.util.Timer;
public class ExportRdfGraphJob implements ExportRdfJob {
private final NeptuneSparqlClient client;
private final RdfTargetConfig targetConfig;
public ExportRdfGraphJob(NeptuneSparqlClient client, RdfTargetConfig targetConfig) {
this.client = client;
this.targetConfig = targetConfig;
}
@Override
public void execute() throws Exception {
Timer.timedActivity("exporting RDF as " + targetConfig.format().description(),
(CheckedActivity.Runnable) () -> {
System.err.println("Creating statement files");
client.executeTupleQuery("SELECT * WHERE { GRAPH ?g { ?s ?p ?o } }", targetConfig);
});
}
}
| 4,415 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/Prefixes.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf;
import com.amazonaws.services.neptune.export.FeatureToggle;
import com.amazonaws.services.neptune.export.FeatureToggles;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.commons.io.LineIterator;
import org.eclipse.rdf4j.rio.RDFWriter;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.Map;
public class Prefixes {
private final Map<String, String> prefixes = new HashMap<>();
private final int offset;
private final boolean inferPrefixes;
public Prefixes(FeatureToggles featureToggles) {
prefixes.put("http://www.w3.org/1999/02/22-rdf-syntax-ns#", "rdf");
//prefixes.put("http://www.w3.org/2000/01/rdf-schema#", "rdfs");
prefixes.put("http://www.w3.org/2001/XMLSchema#", "xsd");
offset = prefixes.size();
inferPrefixes = featureToggles.containsFeature(FeatureToggle.Infer_RDF_Prefixes);
}
public void parse(String s, RDFWriter writer) {
if(inferPrefixes) {
int i = s.indexOf("#");
if (i > 0 && i < (s.length() - 1)) {
String uri = s.substring(0, i + 1);
if (!prefixes.containsKey(uri)) {
String prefix = "s" + (prefixes.size() - offset);
prefixes.put(uri, prefix);
writer.handleNamespace(prefix, uri);
}
}
}
}
public void addTo(Path filePath) throws IOException {
File source = filePath.toFile();
LineIterator lineIterator = FileUtils.lineIterator(source);
File tempFile = File.createTempFile(source.getName(), ".tmp");
BufferedWriter writer = new BufferedWriter(new FileWriter(tempFile));
try {
writer.write(allHeaders());
while (lineIterator.hasNext()) {
writer.write(lineIterator.next());
writer.write(System.lineSeparator());
}
} finally {
IOUtils.closeQuietly(writer);
LineIterator.closeQuietly(lineIterator);
}
FileUtils.deleteQuietly(source);
FileUtils.moveFile(tempFile, source);
}
private String allHeaders() {
StringBuilder builder = new StringBuilder();
for (Map.Entry<String, String> entry : prefixes.entrySet()) {
builder.append("@prefix ");
builder.append(entry.getValue());
builder.append(": <");
builder.append(entry.getKey());
builder.append("> .");
builder.append(System.lineSeparator());
}
builder.append(System.lineSeparator());
return builder.toString();
}
}
| 4,416 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/RdfExportScope.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf;
public enum RdfExportScope {
graph,
edges,
query
}
| 4,417 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/NeptuneStreamsSimpleJsonNQuadsWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf.io;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.io.Status;
import com.amazonaws.services.neptune.io.StatusOutputFormat;
import com.amazonaws.services.neptune.util.NotImplementedException;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
import org.eclipse.rdf4j.common.text.ASCIIUtil;
import org.eclipse.rdf4j.common.text.StringUtil;
import org.eclipse.rdf4j.model.*;
import org.eclipse.rdf4j.rio.*;
import org.eclipse.rdf4j.rio.helpers.NTriplesUtil;
import org.eclipse.rdf4j.rio.nquads.NQuadsWriter;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Collection;
public class NeptuneStreamsSimpleJsonNQuadsWriter implements RDFWriter {
private static final String REGEX_LAST_NEWLINE = String.format("%s$", System.lineSeparator());
private final JsonGenerator generator;
private final Status status = new Status(StatusOutputFormat.Description, "records");
private final OutputWriter outputWriter;
public NeptuneStreamsSimpleJsonNQuadsWriter(OutputWriter outputWriter) {
this.outputWriter = outputWriter;
try {
this.generator = new JsonFactory().createGenerator(outputWriter.writer());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public RDFFormat getRDFFormat() {
return RDFFormat.NQUADS;
}
@Override
public RDFWriter setWriterConfig(WriterConfig writerConfig) {
throw new NotImplementedException();
}
@Override
public WriterConfig getWriterConfig() {
throw new NotImplementedException();
}
@Override
public Collection<RioSetting<?>> getSupportedSettings() {
throw new NotImplementedException();
}
@Override
public <T> RDFWriter set(RioSetting<T> rioSetting, T t) {
throw new NotImplementedException();
}
@Override
public void startRDF() throws RDFHandlerException {
// Do nothing
}
@Override
public void endRDF() throws RDFHandlerException {
// Do nothing
}
@Override
public void handleNamespace(String s, String s1) throws RDFHandlerException {
// Do nothing
}
@Override
public void handleStatement(Statement statement) throws RDFHandlerException {
try {
outputWriter.startCommit();
generator.writeStartObject();
generator.writeStringField("id", "");
generator.writeStringField("from", "");
generator.writeStringField("to", "");
generator.writeStringField("type", "");
generator.writeStringField("key", "");
generator.writeStringField("value", "");
generator.writeStringField("dataType", "");
// generator.writeStringField("s", getValue(statement.getSubject()));
// generator.writeStringField("p", getValue(statement.getPredicate()));
// generator.writeStringField("o", getValue(statement.getObject()));
//
// if (statement.getContext() != null) {
// generator.writeStringField("g", getValue(statement.getContext()));
// } else {
// generator.writeStringField("g", "");
// }
generator.writeStringField("s", "");
generator.writeStringField("p", "");
generator.writeStringField("o", "");
generator.writeStringField("g", "");
generator.writeFieldName("stmt");
StringWriter stringWriter = new StringWriter();
NQuadsWriter nQuadsWriter = new NQuadsWriter(stringWriter);
nQuadsWriter.startRDF();
nQuadsWriter.handleStatement(statement);
nQuadsWriter.endRDF();
generator.writeString(stringWriter.toString().replaceAll(REGEX_LAST_NEWLINE, ""));
generator.writeStringField("op", "ADD");
generator.writeEndObject();
generator.writeRaw(outputWriter.lineSeparator());
generator.flush();
outputWriter.endCommit();
status.update();
} catch (IOException e) {
throw new RDFHandlerException(e);
}
}
@Override
public void handleComment(String s) throws RDFHandlerException {
// Do nothing
}
private String getValue(Value value) throws IOException {
if (value instanceof IRI) {
return getIRI((IRI) value);
} else if (value instanceof BNode) {
return getBNode((BNode) value);
} else {
if (!(value instanceof Literal)) {
throw new IllegalArgumentException("Unknown value type: " + value.getClass());
}
return getLiteral((Literal) value);
}
}
private String getIRI(IRI iri) throws IOException {
StringWriter appendable = new StringWriter();
StringUtil.simpleEscapeIRI(iri.toString(), appendable, true);
return appendable.toString();
}
private String getBNode(BNode bNode) throws IOException {
StringWriter appendable = new StringWriter();
String nextId = bNode.getID();
appendable.append("_:");
if (nextId.isEmpty()) {
appendable.append("genid");
appendable.append(Integer.toHexString(bNode.hashCode()));
} else {
if (!ASCIIUtil.isLetter(nextId.charAt(0))) {
appendable.append("genid");
appendable.append(Integer.toHexString(nextId.charAt(0)));
}
for (int i = 0; i < nextId.length(); ++i) {
if (ASCIIUtil.isLetterOrNumber(nextId.charAt(i))) {
appendable.append(nextId.charAt(i));
} else {
appendable.append(Integer.toHexString(nextId.charAt(i)));
}
}
}
return appendable.toString();
}
private String getLiteral(Literal lit) throws IOException {
StringWriter appendable = new StringWriter();
NTriplesUtil.append(lit, appendable, true, true);
return appendable.toString();
}
}
| 4,418 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/EnhancedTurtleWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf.io;
import com.amazonaws.services.neptune.io.Status;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.io.StatusOutputFormat;
import com.amazonaws.services.neptune.rdf.Prefixes;
import org.eclipse.rdf4j.model.Resource;
import org.eclipse.rdf4j.model.Statement;
import org.eclipse.rdf4j.rio.RDFHandlerException;
import org.eclipse.rdf4j.rio.turtle.TurtleWriter;
import java.io.IOException;
public class EnhancedTurtleWriter extends TurtleWriter {
private final OutputWriter writer;
private final Prefixes prefixes;
private final Status status = new Status(StatusOutputFormat.Description, "statements");
public EnhancedTurtleWriter(OutputWriter writer, Prefixes prefixes) {
super(writer.writer());
this.writer = writer;
this.prefixes = prefixes;
}
@Override
public void handleStatement(Statement statement) throws RDFHandlerException {
prefixes.parse(statement.getSubject().stringValue(), this);
prefixes.parse(statement.getPredicate().toString(), this);
prefixes.parse(statement.getObject().stringValue(), this);
Resource context = statement.getContext();
if (context != null){
prefixes.parse(context.stringValue(), this);
}
writer.startCommit();
super.handleStatement(statement);
writer.endCommit();
status.update();
}
@Override
protected void writeNamespace(String prefix, String name)
throws IOException {
writer.startCommit();
super.writeNamespace(prefix, name);
writer.endCommit();
}
}
| 4,419 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/NeptuneExportSparqlRepository.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf.io;
import com.amazonaws.auth.AWSCredentialsProvider;
import com.amazonaws.neptune.auth.NeptuneApacheHttpSigV4Signer;
import com.amazonaws.neptune.auth.NeptuneSigV4Signer;
import com.amazonaws.neptune.auth.NeptuneSigV4SignerException;
import com.amazonaws.services.neptune.cluster.ConnectionConfig;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpException;
import org.apache.http.HttpRequestInterceptor;
import org.apache.http.HttpResponseInterceptor;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.conn.EofSensorInputStream;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.io.ChunkedInputStream;
import org.apache.http.protocol.HttpContext;
import org.eclipse.rdf4j.http.client.util.HttpClientBuilders;
import org.eclipse.rdf4j.repository.sparql.SPARQLRepository;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.lang.reflect.Method;
import java.net.URLDecoder;
import java.util.HashMap;
import java.util.Map;
public class NeptuneExportSparqlRepository extends SPARQLRepository {
private final String regionName;
private final AWSCredentialsProvider awsCredentialsProvider;
private final ConnectionConfig config;
private NeptuneSigV4Signer<HttpUriRequest> v4Signer;
private HttpContext lastContext;
public NeptuneExportSparqlRepository(String endpointUrl, AWSCredentialsProvider awsCredentialsProvider, String regionName, ConnectionConfig config) throws NeptuneSigV4SignerException {
super(getSparqlEndpoint(endpointUrl));
if (config == null) {
throw new IllegalArgumentException("ConnectionConfig is required to be non-null");
}
this.config = config;
this.awsCredentialsProvider = awsCredentialsProvider;
this.regionName = regionName;
this.initAuthenticatingHttpClient();
Map<String, String> additionalHeaders = new HashMap<>();
additionalHeaders.put("te", "trailers"); //Asks Neptune to send trailing headers which may contain error messages
this.setAdditionalHttpHeaders(additionalHeaders);
}
protected void initAuthenticatingHttpClient() throws NeptuneSigV4SignerException {
HttpClientBuilder httpClientBuilder = config.useSsl() ?
HttpClientBuilders.getSSLTrustAllHttpClientBuilder() :
HttpClientBuilder.create();
httpClientBuilder.addInterceptorLast((HttpResponseInterceptor) (response, context) -> {
lastContext = context;
HttpEntity entity = response.getEntity();
if (entity != null) {
context.setAttribute("raw-response-inputstream", entity.getContent());
}
});
if (config.useIamAuth()) {
v4Signer = new NeptuneApacheHttpSigV4Signer(regionName, awsCredentialsProvider);
HttpClient v4SigningClient = httpClientBuilder.addInterceptorLast((HttpRequestInterceptor) (req, ctx) -> {
if (req instanceof HttpUriRequest) {
HttpUriRequest httpUriReq = (HttpUriRequest) req;
try {
v4Signer.signRequest(httpUriReq);
} catch (NeptuneSigV4SignerException var5) {
throw new HttpException("Problem signing the request: ", var5);
}
} else {
throw new HttpException("Not an HttpUriRequest");
}
}).build();
setHttpClient(v4SigningClient);
} else {
setHttpClient(httpClientBuilder.build());
}
}
private static String getSparqlEndpoint(String endpointUrl) {
return endpointUrl + "/sparql";
}
/**
* Attempts to extract error messages from trailing headers from the most recent response received by 'repository'.
* If no trailers are found an empty String is returned.
*/
public String getErrorMessageFromTrailers() {
if (this.lastContext == null) {
return "";
}
InputStream responseInStream = (InputStream) this.lastContext.getAttribute("raw-response-inputstream");
ChunkedInputStream chunkedInStream;
if (responseInStream instanceof ChunkedInputStream) {
chunkedInStream = (ChunkedInputStream) responseInStream;
}
else if (responseInStream instanceof EofSensorInputStream) {
// HTTPClient 4.5.13 provides no methods for accessing trailers from a wrapped stream requiring the use of
// reflection to break encapsulation. This bug is being tracked in https://issues.apache.org/jira/browse/HTTPCLIENT-2263.
try {
Method getWrappedStream = EofSensorInputStream.class.getDeclaredMethod("getWrappedStream");
getWrappedStream.setAccessible(true);
chunkedInStream = (ChunkedInputStream) getWrappedStream.invoke(responseInStream);
getWrappedStream.setAccessible(false);
} catch (Exception e) {
return "";
}
}
else {
return "";
}
Header[] trailers = chunkedInStream.getFooters();
StringBuilder messageBuilder = new StringBuilder();
for (Header trailer : trailers) {
try {
messageBuilder.append(URLDecoder.decode(trailer.toString(), "UTF-8"));
} catch (UnsupportedEncodingException e) {
messageBuilder.append(trailer);
}
messageBuilder.append('\n');
}
return messageBuilder.toString();
}
protected void setLastContext(HttpContext context) {
this.lastContext = context;
}
}
| 4,420 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/EnhancedNTriplesWriter.java | /*
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf.io;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.io.Status;
import com.amazonaws.services.neptune.io.StatusOutputFormat;
import com.amazonaws.services.neptune.rdf.Prefixes;
import org.eclipse.rdf4j.model.Resource;
import org.eclipse.rdf4j.model.Statement;
import org.eclipse.rdf4j.rio.RDFHandlerException;
import org.eclipse.rdf4j.rio.ntriples.NTriplesWriter;
public class EnhancedNTriplesWriter extends NTriplesWriter {
private final OutputWriter writer;
private final Prefixes prefixes;
private final Status status = new Status(StatusOutputFormat.Description,"statements");
public EnhancedNTriplesWriter(OutputWriter writer, Prefixes prefixes) {
super(writer.writer());
this.writer = writer;
this.prefixes = prefixes;
}
@Override
public void handleStatement(Statement statement) throws RDFHandlerException {
prefixes.parse(statement.getSubject().stringValue(), this);
prefixes.parse(statement.getPredicate().toString(), this);
prefixes.parse(statement.getObject().stringValue(), this);
Resource context = statement.getContext();
if (context != null) {
prefixes.parse(context.stringValue(), this);
}
writer.startCommit();
super.handleStatement(statement);
writer.endCommit();
status.update();
}
@Override
public void handleNamespace(String prefix, String name) {
writer.startCommit();
super.handleNamespace(prefix, name);
writer.endCommit();
}
}
| 4,421 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/NeptuneStreamsJsonNQuadsWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf.io;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.io.Status;
import com.amazonaws.services.neptune.io.StatusOutputFormat;
import com.amazonaws.services.neptune.util.NotImplementedException;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
import org.eclipse.rdf4j.model.Statement;
import org.eclipse.rdf4j.rio.*;
import org.eclipse.rdf4j.rio.nquads.NQuadsWriter;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Collection;
public class NeptuneStreamsJsonNQuadsWriter implements RDFWriter {
private static final String REGEX_LAST_NEWLINE = String.format("%s$", System.lineSeparator());
private final JsonGenerator generator;
private final Status status = new Status(StatusOutputFormat.Description,"statements");
private final OutputWriter outputWriter;
public NeptuneStreamsJsonNQuadsWriter(OutputWriter outputWriter) {
this.outputWriter = outputWriter;
try {
this.generator = new JsonFactory().createGenerator(outputWriter.writer());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public RDFFormat getRDFFormat() {
return RDFFormat.NQUADS;
}
@Override
public RDFWriter setWriterConfig(WriterConfig writerConfig) {
throw new NotImplementedException();
}
@Override
public WriterConfig getWriterConfig() {
throw new NotImplementedException();
}
@Override
public Collection<RioSetting<?>> getSupportedSettings() {
throw new NotImplementedException();
}
@Override
public <T> RDFWriter set(RioSetting<T> rioSetting, T t) {
throw new NotImplementedException();
}
@Override
public void startRDF() throws RDFHandlerException {
// Do nothing
}
@Override
public void endRDF() throws RDFHandlerException {
// Do nothing
}
@Override
public void handleNamespace(String s, String s1) throws RDFHandlerException {
// Do nothing
}
@Override
public void handleStatement(Statement statement) throws RDFHandlerException {
try {
outputWriter.startCommit();
generator.writeStartObject();
generator.writeObjectFieldStart("eventId");
generator.writeNumberField("commitNum", -1);
generator.writeNumberField("opNum", 0);
generator.writeEndObject();
generator.writeObjectFieldStart("data");
generator.writeFieldName("stmt");
StringWriter stringWriter = new StringWriter();
NQuadsWriter nQuadsWriter = new NQuadsWriter(stringWriter);
nQuadsWriter.startRDF();
nQuadsWriter.handleStatement(statement);
nQuadsWriter.endRDF();
generator.writeString(stringWriter.toString().replaceAll(REGEX_LAST_NEWLINE, ""));
generator.writeEndObject();
generator.writeStringField("op", "ADD");
generator.writeEndObject();
generator.flush();
outputWriter.endCommit();
status.update();
} catch (IOException e) {
throw new RDFHandlerException(e);
}
}
@Override
public void handleComment(String s) throws RDFHandlerException {
// Do nothing
}
}
| 4,422 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/RdfTargetConfig.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf.io;
import com.amazonaws.services.neptune.export.FeatureToggles;
import com.amazonaws.services.neptune.io.Directories;
import com.amazonaws.services.neptune.io.KinesisConfig;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.io.Target;
import com.amazonaws.services.neptune.rdf.Prefixes;
import org.eclipse.rdf4j.rio.RDFWriter;
import java.io.IOException;
public class RdfTargetConfig {
private final Directories directories;
private final Target output;
private final KinesisConfig kinesisConfig;
private final RdfExportFormat format;
public RdfTargetConfig(Directories directories, KinesisConfig kinesisConfig, Target output, RdfExportFormat format) {
this.directories = directories;
this.output = output;
this.kinesisConfig = kinesisConfig;
this.format = format;
}
public OutputWriter createOutputWriter() throws IOException {
return output.createOutputWriter(
() -> directories.createStatementsFilePath("statements", format),
kinesisConfig);
}
public RDFWriter createRDFWriter(OutputWriter outputWriter, FeatureToggles featureToggles) {
return format.createWriter(outputWriter, new Prefixes(featureToggles));
}
public RdfExportFormat format() {
return format;
}
}
| 4,423 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/RdfExportFormat.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf.io;
import com.amazonaws.services.neptune.io.FileExtension;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.rdf.Prefixes;
import org.eclipse.rdf4j.rio.RDFWriter;
public enum RdfExportFormat implements FileExtension {
turtle {
@Override
RDFWriter createWriter(OutputWriter writer, Prefixes prefixes) {
return new EnhancedTurtleWriter(writer, prefixes);
}
@Override
public String extension() {
return "ttl";
}
@Override
public String description() {
return "Turtle";
}
},
nquads {
@Override
RDFWriter createWriter(OutputWriter writer, Prefixes prefixes) {
return new EnhancedNQuadsWriter(writer, prefixes);
}
@Override
public String extension() {
return "nq";
}
@Override
public String description() {
return "NQUADS";
}
},
ntriples {
@Override
RDFWriter createWriter(OutputWriter writer, Prefixes prefixes) {
return new EnhancedNTriplesWriter(writer, prefixes);
}
@Override
public String extension() {
return "nt";
}
@Override
public String description() {
return "NTRIPLES";
}
},
neptuneStreamsJson {
@Override
RDFWriter createWriter(OutputWriter writer, Prefixes prefixes) {
return new NeptuneStreamsJsonNQuadsWriter(writer);
}
@Override
public String extension() {
return "json";
}
@Override
public String description() {
return "JSON (Neptune Streams format)";
}
},
neptuneStreamsSimpleJson {
@Override
RDFWriter createWriter(OutputWriter writer, Prefixes prefixes) {
return new NeptuneStreamsSimpleJsonNQuadsWriter(writer);
}
@Override
public String extension() {
return "json";
}
@Override
public String description() {
return "JSON (Neptune Streams simple format)";
}
};;
abstract RDFWriter createWriter(OutputWriter writer, Prefixes prefixes);
public abstract String description();
}
| 4,424 |
0 | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf | Create_ds/neptune-export/src/main/java/com/amazonaws/services/neptune/rdf/io/EnhancedNQuadsWriter.java | /*
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
*/
package com.amazonaws.services.neptune.rdf.io;
import com.amazonaws.services.neptune.io.OutputWriter;
import com.amazonaws.services.neptune.io.Status;
import com.amazonaws.services.neptune.io.StatusOutputFormat;
import com.amazonaws.services.neptune.rdf.Prefixes;
import org.eclipse.rdf4j.model.Resource;
import org.eclipse.rdf4j.model.Statement;
import org.eclipse.rdf4j.rio.RDFHandlerException;
import org.eclipse.rdf4j.rio.nquads.NQuadsWriter;
public class EnhancedNQuadsWriter extends NQuadsWriter {
private final OutputWriter writer;
private final Prefixes prefixes;
private final Status status = new Status(StatusOutputFormat.Description,"statements");
public EnhancedNQuadsWriter(OutputWriter writer, Prefixes prefixes) {
super(writer.writer());
this.writer = writer;
this.prefixes = prefixes;
}
@Override
public void handleStatement(Statement statement) throws RDFHandlerException {
prefixes.parse(statement.getSubject().stringValue(), this);
prefixes.parse(statement.getPredicate().toString(), this);
prefixes.parse(statement.getObject().stringValue(), this);
Resource context = statement.getContext();
if (context != null){
prefixes.parse(context.stringValue(), this);
}
writer.startCommit();
super.handleStatement(statement);
writer.endCommit();
status.update();
}
@Override
public void handleNamespace(String prefix, String name) {
writer.startCommit();
super.handleNamespace(prefix, name);
writer.endCommit();
}
}
| 4,425 |
0 | Create_ds/dione/dione-hadoop/src/main/java/com/paypal/dione/avro/hadoop | Create_ds/dione/dione-hadoop/src/main/java/com/paypal/dione/avro/hadoop/file/AvroBtreeFile.java | package com.paypal.dione.avro.hadoop.file;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.file.SeekableByteArrayInput;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.mapred.FsInput;
import org.apache.avro.specific.SpecificData;
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.*;
import java.util.stream.Collectors;
/**
* A BtreeKeyValueFile is a b-tree indexed Avro container file of KeyValue records.
* <p>
* it is basically a copy from org.apache.avro.hadoop.file.SortedKeyValueFile with changes so every block in the avro
* file is a b-tree node, and every row has one additional "long" field that points to this record's child node if
* if is not a leaf record
*/
public class AvroBtreeFile {
public static final String DATA_SIZE_KEY = "data_bytes";
public static final String METADATA_COL_NAME = "metadata";
public static final String KEY_VALUE_HEADER_NAME = "btree.spec.kv";
private static final Logger logger = LoggerFactory.getLogger(AvroBtreeFile.class);
// Schema of Long that can be null
public static Schema metadataSchema = SchemaBuilder.unionOf().nullType().and().longType().endUnion();
public static class Reader implements Closeable {
private final Long dataSize;
private final long fileHeaderEnd;
private final DataFileReader<GenericRecord> mFileReader;
private final Schema mKeySchema;
private final Schema mValueSchema;
public Schema getKeySchema() {
return mKeySchema;
}
public Schema getValueSchema() {
return mValueSchema;
}
/**
* A class to encapsulate the options of a Reader.
*/
public static class Options {
private Configuration mConf;
private Path mPath;
public Options withConfiguration(Configuration conf) {
mConf = conf;
return this;
}
public Configuration getConfiguration() {
return mConf;
}
public Options withPath(Path path) {
mPath = path;
return this;
}
public Path getPath() {
return mPath;
}
}
public Reader(Options options) throws IOException {
// Open the data file.
Path dataFilePath = options.getPath();
logger.debug("Loading the data file " + dataFilePath);
DatumReader<GenericRecord> datumReader = GenericData.get().createDatumReader(null);
mFileReader = new DataFileReader<>(new FsInput(dataFilePath, options.getConfiguration()), datumReader);
String[] split = mFileReader.getMetaString(KEY_VALUE_HEADER_NAME).split("\\|");
mKeySchema = projectSchema(mFileReader.getSchema(), split[0].split(","));
mValueSchema = projectSchema(mFileReader.getSchema(), split[1].split(","));
fileHeaderEnd = mFileReader.previousSync();
dataSize = mFileReader.getMetaLong(DATA_SIZE_KEY);
}
// TODO: do we need this sync?
public void sync(Long syncPosition) throws IOException {
mFileReader.sync(syncPosition);
}
/**
* This is the main motivation function of this class.
* Given a key, run on the records in a "b-tree" manner - to fetch the correct value, if exists,
* with minimal number of hops between different position in the file.
* idea is that randomly seeking to a specific position is much more expensive than reading many records
* sequentially.
*/
public Iterator<GenericRecord> get(GenericRecord key) {
logger.debug("searching for key: {}", key);
return new Iterator<GenericRecord>() {
long curOffset;
GenericRecord lastRecord = null;
int counter;
long blockCount;
private RecordProjection projection = new RecordProjection(mKeySchema, mValueSchema);
GenericRecord nxt = getNextFromOffset(0);
@Override
public boolean hasNext() {
return nxt!=null;
}
@Override
public GenericRecord next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
GenericRecord ret = nxt;
nxt = getNext();
return ret;
}
private GenericRecord getNextFromOffset(long offset) {
curOffset = offset;
init();
return getNext();
}
private void init() {
curOffset += fileHeaderEnd;
logger.debug("seeking to position: " + curOffset);
counter = 0;
blockCount = -1;
lastRecord = null;
try {
mFileReader.seek(curOffset);
} catch (IOException e) {
throw new RuntimeException(e);
}
mFileReader.hasNext();
}
private GenericRecord getNext() {
while (mFileReader.hasNext() && (counter < blockCount || blockCount < 0)) {
GenericRecord record = mFileReader.next();
if (blockCount < 0) blockCount = mFileReader.getBlockCount();
counter += 1;
int comparison = GenericData.get().compare(projection.getKey(record), key, mKeySchema);
logger.debug("comparison was: {} with: {} and {}", comparison, projection.getKey(record), key);
if (0 == comparison) {
// We've found it!
logger.debug("Found record for key {}", key);
lastRecord = record;
return projection.getValue(record);
} else if (comparison > 0) {
// We've passed it.
if (lastRecord == null || projection.getMetadata(lastRecord) == null) {
logger.debug("key does not appear in the file: {}", key);
curOffset -= fileHeaderEnd;
return null;
} else {
return getNextFromOffset(getRealOffset(lastRecord));
}
}
lastRecord = record;
}
if (lastRecord != null && projection.getMetadata(lastRecord) != null) {
return getNextFromOffset(getRealOffset(lastRecord));
}
logger.debug("reached end of road. key does not appear in the file: {}", key);
return null;
}
};
}
private Long getRealOffset(GenericRecord record) {
Long offset = dataSize;
Long reversedOffset = (Long) record.get(METADATA_COL_NAME);
if (reversedOffset != null)
offset -= reversedOffset;
return offset;
}
/**
* this iterator runs on the records in sorted order, and not in the "b-tree" order the records are
* saved in the file
*/
public Iterator<GenericRecord> getIterator() {
return new Iterator<GenericRecord>() {
private final RecordProjection projection = new RecordProjection(mKeySchema, mValueSchema);
private Node next = new Node(0);
@Override
public boolean hasNext() {
return next != null;
}
@Override
public GenericRecord next() {
if(!hasNext()) throw new NoSuchElementException();
GenericRecord ret = next.getCurGenericRecord();
if (next.curHasChild()) {
next = next.getChildNode();
} else {
next.curRecord++;
// in case we got to the last record
while (next.curRecord == next.records.size()) {
next = next.parent;
if (next == null)
return ret;
next.curRecord++;
}
}
return ret;
}
class Node {
Node(long offset) {
try {
mFileReader.seek(fileHeaderEnd + offset);
GenericRecord firstRecord = mFileReader.next();
// we only know the block count after the first next()
int blockCount = (int) mFileReader.getBlockCount();
records = new ArrayList<>(blockCount);
records.add(firstRecord);
for (int i=1; i<blockCount; i++) {
records.add(mFileReader.next());
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
boolean curHasChild() {
return projection.getMetadata(records.get(curRecord)) != null;
}
Node getChildNode() {
Node childNode = new Node(getRealOffset(records.get(curRecord)));
childNode.parent = this;
return childNode;
}
GenericRecord getCurGenericRecord() {
return records.get(curRecord);
}
int curRecord = 0;
final List<GenericRecord> records;
Node parent;
}
};
}
@Override
public void close() throws IOException {
mFileReader.close();
}
}
/**
* Writes a SortedKeyValueFile.
*/
public static class Writer implements Closeable {
private final Schema mKeySchema;
private final Schema mValueSchema;
private final Schema mRecordSchema;
private GenericData model;
private final BufferedWriter bufferedWriter;
private FileSystem fileSystem;
private final Path filename;
private final int mInterval;
private final int mHeight;
private GenericRecord mPreviousKey;
private Node curNode = new Node();
private final Node root = curNode;
private class Node {
List<GenericRecord> records;
Node prev;
int height;
public Node() {
records = new ArrayList<>(mInterval);
}
public Node(Node prevNode) {
records = new ArrayList<>(mInterval);
prev = prevNode;
height = prevNode.height + 1;
}
public GenericRecord getCurRecord() {
return records.get(records.size() - 1);
}
public void addRecord(GenericRecord record) throws IOException {
records.add(record);
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
for (GenericRecord md : records) {
sb.append("\n\t\t");
sb.append(" data: " + md.toString());
}
return "Node{" +
"records=" + sb +
", height=" + height +
'}';
}
}
/**
* A class to encapsulate the various options of a Writer.
*/
public static class Options {
private Schema mKeySchema;
private Schema mValueSchema;
private Configuration mConf;
private Path mPath;
private int mInterval = 128;
private int mHeight = 2;
private int initialCapacityMB = 20;
private GenericData model = SpecificData.get();
private CodecFactory codec = CodecFactory.nullCodec();
public Options withKeySchema(Schema keySchema) {
mKeySchema = keySchema;
return this;
}
public Schema getKeySchema() {
return mKeySchema;
}
public Options withValueSchema(Schema valueSchema) {
mValueSchema = valueSchema;
return this;
}
public Schema getValueSchema() {
return mValueSchema;
}
public Options withConfiguration(Configuration conf) {
mConf = conf;
return this;
}
public Configuration getConfiguration() {
return mConf;
}
public Options withPath(Path path) {
mPath = path;
return this;
}
public Path getPath() {
return mPath;
}
public Options withInterval(int interval) {
mInterval = interval;
return this;
}
public int getInterval() {
return mInterval;
}
public Options withHeight(int height) {
mHeight = height;
return this;
}
public int getHeight() {
return mHeight;
}
public Options withDataModel(GenericData model) {
this.model = model;
return this;
}
public GenericData getDataModel() {
return model;
}
public Options withCodec(String codec) {
this.codec = CodecFactory.fromString(codec);
return this;
}
public Options withCodec(CodecFactory codec) {
this.codec = codec;
return this;
}
public Options withInitialBufferSizeMB(int mb) {
this.initialCapacityMB = mb;
return this;
}
public CodecFactory getCodec() {
return this.codec;
}
}
/**
* Creates a writer for a new file.
*
* @param options The options.
* @throws IOException If there is an error.
*/
public Writer(Options options) throws IOException {
this.model = options.getDataModel();
if (null == options.getConfiguration()) {
throw new IllegalArgumentException("Configuration may not be null");
}
fileSystem = options.getPath().getFileSystem(options.getConfiguration());
filename = options.getPath();
// Save the key and value schemas.
mKeySchema = options.getKeySchema();
if (null == mKeySchema) {
throw new IllegalArgumentException("Key schema may not be null");
}
mValueSchema = options.getValueSchema();
if (null == mValueSchema) {
throw new IllegalArgumentException("Value schema may not be null");
}
mInterval = options.getInterval();
mHeight = options.getHeight() - 1;
if (mHeight < 0)
throw new RuntimeException("Height must be positive, given: " + options.getHeight());
// Create the parent directory.
if (!fileSystem.mkdirs(options.getPath().getParent())) {
throw new IOException(
"Unable to create directory: " + options.getPath().getParent());
}
logger.debug("Created directory " + options.getPath());
// Open a writer for the data file.
Path dataFilePath = options.getPath();
logger.debug("Creating writer for avro data file: " + dataFilePath);
List<Schema.Field> schemaFields = new ArrayList<>();
mRecordSchema = createSchema(mKeySchema, mValueSchema);
String keys = String.join(",", mKeySchema.getFields().stream().map(Schema.Field::name).toArray(String[]::new));
String values = String.join(",", mValueSchema.getFields().stream().map(Schema.Field::name).toArray(String[]::new));
String keyValueFields = keys + "|" + values;
bufferedWriter = new BufferedWriter(options, mRecordSchema, keyValueFields);
}
/**
* TODO: add doc
*/
public void append(GenericRecord key, GenericRecord value) throws IOException {
// Make sure the keys are inserted in sorted order.
if (null != mPreviousKey && model.compare(key, mPreviousKey, mKeySchema) < 0) {
throw new IllegalArgumentException("Records must be inserted in sorted key order."
+ " Attempted to insert key " + key + " after " + mPreviousKey + ".");
}
mPreviousKey = model.deepCopy(mKeySchema, key);
// Construct the data record.
GenericData.Record dataRecord = new GenericData.Record(mRecordSchema);
key.getSchema().getFields().stream().map(Schema.Field::name).forEach(f -> {
dataRecord.put(f, key.get(f));
});
value.getSchema().getFields().stream().map(Schema.Field::name).forEach(f -> {
dataRecord.put(f, value.get(f));
});
if (curNode.height == 0 || curNode.records.size() < mInterval) {
curNode.addRecord(dataRecord);
if (curNode.height < mHeight) {
curNode = new Node(curNode);
}
} else {
while (curNode.records.size() == mInterval && curNode.height > 0) {
flush();
}
curNode.addRecord(dataRecord);
curNode = new Node(curNode);
}
}
/**
* {@inheritDoc}
*/
@Override
public void close() throws IOException {
while (curNode != root) {
flush();
}
flush();
bufferedWriter.reverseAndClose(fileSystem.create(filename));
}
private void flush() throws IOException {
int numRecordsWriting = curNode.records.size();
logger.debug("writing {} records in height {}, records: {}", numRecordsWriting, curNode.height, curNode.records);
for (GenericRecord record : curNode.records) {
bufferedWriter.append(record);
}
// the reader will see the blocks backwards, so need to take the sync marker AFTER the block is written:
long position = bufferedWriter.sync();
curNode = curNode.prev;
if (curNode != null) {
if (numRecordsWriting > 0)
curNode.getCurRecord().put(METADATA_COL_NAME, position);
}
}
}
/**
* Buffers Avro blocks in memory, and then writes them backwards to real file.
*/
public static class BufferedWriter {
private final Schema schema;
private final ByteArrayOutputStream recordsBuffer;
private final DataFileWriter<GenericRecord> memoryWriter;
private final Writer.Options options;
private final LinkedList<Long> syncs;
private final String keyValueFields;
private final long headerPosition;
public BufferedWriter(Writer.Options options, Schema schema, String keyValueFields) throws IOException {
// the actual byte buffer to write to:
ByteArrayOutputStream recordsBuffer = new ByteArrayOutputStream(options.initialCapacityMB << 20);
// the file writer:
DataFileWriter<GenericRecord> inMemoryWriter = createMemoryFileWriter(options, schema, recordsBuffer);
this.schema = schema;
this.recordsBuffer = recordsBuffer;
this.memoryWriter = inMemoryWriter.setSyncInterval(1 << 20);
this.headerPosition = inMemoryWriter.sync();
this.options = options;
syncs = new LinkedList<>();
syncs.add(memoryWriter.sync());
this.keyValueFields = keyValueFields;
}
private DataFileWriter<GenericRecord> createMemoryFileWriter(Writer.Options options, Schema schema, ByteArrayOutputStream recordsBuffer) throws IOException {
GenericData model = options.model;
DatumWriter<GenericRecord> datumWriter = model.createDatumWriter(schema);
DataFileWriter<GenericRecord> inMemoryWriter =
new DataFileWriter<>(datumWriter)
.setSyncInterval(1 << 20)
.setCodec(options.getCodec())
.create(schema, recordsBuffer);
return inMemoryWriter;
}
public Long sync() throws IOException {
long sync = memoryWriter.sync();
if (syncs.getLast() != sync)
// saving the syncs just to be able to efficiently read the blocks backwards.
syncs.add(sync);
return sync - headerPosition;
}
public void append(GenericRecord record) throws IOException {
memoryWriter.append(record);
}
public void reverseAndClose(FSDataOutputStream output) throws IOException {
sync();
memoryWriter.close();
try (DataFileWriter fileWriter = new DataFileWriter<>(options.getDataModel().createDatumWriter(schema))) {
// create seekable file reader from the in memory file:
DatumReader datumReader = options.model.createDatumReader(schema);
byte[] rawAvroFileData = recordsBuffer.toByteArray();
SeekableByteArrayInput input = new SeekableByteArrayInput(rawAvroFileData);
DataFileReader inMemoryReader = (DataFileReader) DataFileReader.openReader(input, datumReader);
// create (real) file writer:
long dataSize = rawAvroFileData.length - headerPosition;
fileWriter
.setMeta(DATA_SIZE_KEY, dataSize) // put data size in metadata:
.setMeta(KEY_VALUE_HEADER_NAME, keyValueFields) // put data size in metadata:
.setCodec(options.getCodec())
.setSyncInterval(1 << 20)
.create(schema, output);
// read blocks backwards, and append to the real file:
ByteBuffer emptyBuffer = ByteBuffer.allocate(0);
Iterator<Long> reversedBlocks = syncs.descendingIterator();
reversedBlocks.next(); // last sync points to end of file, skip it
while (reversedBlocks.hasNext()) {
Long sync = reversedBlocks.next();
inMemoryReader.seek(sync);
inMemoryReader.hasNext(); // important! forces the reader to load the next block
long count = inMemoryReader.getBlockCount();
ByteBuffer block = inMemoryReader.nextBlock();
assert (count > 0);
fileWriter.appendEncoded(block);
// appendEncoded ^ only increments block count by 1, so manually increase it to the real value:
while (--count > 0) fileWriter.appendEncoded(emptyBuffer);
fileWriter.sync();
}
}
}
}
public static Schema createSchema(Schema key, Schema value) {
List<Schema.Field> schemaFields = new ArrayList<>();
addFromSchema(schemaFields, key);
addFromSchema(schemaFields, value);
schemaFields.add(new Schema.Field(
METADATA_COL_NAME, metadataSchema, metadataSchema.getDoc(), null, Schema.Field.Order.ASCENDING));
Schema schema = Schema.createRecord("keyValueSchema", "doc", "na", false);
schema.setFields(schemaFields);
return schema;
}
private static void addFromSchema(List<Schema.Field> schemaFields, Schema srcSchema) {
srcSchema.getFields().stream().forEach(field -> {
Schema.Field f = new Schema.Field(
field.name(), field.schema(), field.doc(), field.defaultVal(), field.order());
schemaFields.add(f);
});
}
private static Schema projectSchema(Schema schema, String[] fields) {
if(fields.length == 0)
throw new RuntimeException("attempt to create empty schema");
HashMap<String, Schema.Field> map = new HashMap<>();
schema.getFields().forEach(f -> map.put(f.name(), f));
List<Schema.Field> schemaFields =
Arrays.stream(fields)
.map(map::get)
.filter(Objects::nonNull)
.map(AvroBtreeFile::cloneField)
.collect(Collectors.toList());
if (schemaFields.size() != fields.length)
throw new RuntimeException("fields are not subset of the schema");
return Schema.createRecord(schemaFields);
}
private static Schema.Field cloneField(Schema.Field f) {
return new Schema.Field(f.name(), f.schema(), f.doc(), f.defaultVal(), f.order());
}
}
| 4,426 |
0 | Create_ds/dione/dione-hadoop/src/main/java/com/paypal/dione/avro/hadoop | Create_ds/dione/dione-hadoop/src/main/java/com/paypal/dione/avro/hadoop/file/RecordProjection.java | package com.paypal.dione.avro.hadoop.file;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericRecord;
import static com.paypal.dione.avro.hadoop.file.AvroBtreeFile.METADATA_COL_NAME;
public class RecordProjection {
private final Schema keySchema;
private final Schema valueSchema;
public RecordProjection(Schema keySchema, Schema valueSchema) {
this.keySchema = keySchema;
this.valueSchema = valueSchema;
}
public GenericRecord getKey(GenericRecord record) {
return projectSchema(record, keySchema);
}
public GenericRecord getValue(GenericRecord record) {
Schema schema = this.valueSchema;
return projectSchema(record, schema);
}
public Long getMetadata(GenericRecord record) {
GenericData.Record res = new GenericData.Record(keySchema);
return (Long) record.get(METADATA_COL_NAME);
}
private GenericRecord projectSchema(GenericRecord record, Schema schema) {
GenericData.Record res = new GenericData.Record(schema);
schema.getFields().stream().map(Schema.Field::name).forEach(f -> {
res.put(f, record.get(f));
});
return res;
}
}
| 4,427 |
0 | Create_ds/dione/dione-hadoop/src/main/java/com/paypal/dione/hdfs | Create_ds/dione/dione-hadoop/src/main/java/com/paypal/dione/hdfs/index/HdfsIndexContants.java | package com.paypal.dione.hdfs.index;
public class HdfsIndexContants {
private HdfsIndexContants() {}
public static final String FILE_NAME_COLUMN = "data_filename";
public static final String OFFSET_COLUMN = "data_offset";
public static final String SUB_OFFSET_COLUMN = "data_sub_offset";
public static final String SIZE_COLUMN = "data_size";
}
| 4,428 |
0 | Create_ds/dione/dione-hadoop/src/main/java/com/paypal/dione/hdfs/index | Create_ds/dione/dione-hadoop/src/main/java/com/paypal/dione/hdfs/index/parquet/MyInternalParquetRecordReader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.paypal.dione.hdfs.index.parquet;
import org.apache.hadoop.conf.Configuration;
import org.apache.parquet.column.page.PageReadStore;
import org.apache.parquet.filter2.compat.FilterCompat;
import org.apache.parquet.filter2.compat.FilterCompat.Filter;
import org.apache.parquet.hadoop.ParquetFileReader;
import org.apache.parquet.hadoop.UnmaterializableRecordCounter;
import org.apache.parquet.hadoop.api.InitContext;
import org.apache.parquet.hadoop.api.ReadSupport;
import org.apache.parquet.hadoop.metadata.FileMetaData;
import org.apache.parquet.hadoop.util.counters.BenchmarkCounter;
import org.apache.parquet.io.ColumnIOFactory;
import org.apache.parquet.io.MessageColumnIO;
import org.apache.parquet.io.ParquetDecodingException;
import org.apache.parquet.io.api.RecordMaterializer;
import org.apache.parquet.io.api.RecordMaterializer.RecordMaterializationException;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.Type;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.*;
import static java.lang.String.format;
import static org.apache.parquet.Preconditions.checkNotNull;
import static org.apache.parquet.hadoop.ParquetInputFormat.RECORD_FILTERING_ENABLED;
import static org.apache.parquet.hadoop.ParquetInputFormat.STRICT_TYPE_CHECKING;
/**
* copied from parquet.hadoop.InternalParquetRecordReader in order to access a few internal members, like currentBlock,
* currentInBlock, etc.
*/
public class MyInternalParquetRecordReader<T> {
private static final Logger LOG = LoggerFactory.getLogger(MyInternalParquetRecordReader.class);
private ColumnIOFactory columnIOFactory = null;
private final Filter filter;
private boolean filterRecords = true;
private MessageType requestedSchema;
private MessageType fileSchema;
private int columnCount;
private final ReadSupport<T> readSupport;
private RecordMaterializer<T> recordConverter;
private T currentValue;
private long total;
private long rowGroupSize;
private long current = 0;
private int currentBlock = -1;
private int currentInBlock = -1;
private ParquetFileReader reader;
private org.apache.parquet.io.RecordReader<T> recordReader;
private boolean strictTypeChecking;
private long totalTimeSpentReadingBytes;
private long totalTimeSpentProcessingRecords;
private long startedAssemblingCurrentBlockAt;
private long totalCountLoadedSoFar = 0;
private UnmaterializableRecordCounter unmaterializableRecordCounter;
/**
* @param readSupport Object which helps reads files of the given type, e.g. Thrift, Avro.
* @param filter for filtering individual records
*/
public MyInternalParquetRecordReader(ReadSupport<T> readSupport, Filter filter) {
this.readSupport = readSupport;
this.filter = checkNotNull(filter, "filter");
}
/**
* @param readSupport Object which helps reads files of the given type, e.g. Thrift, Avro.
*/
public MyInternalParquetRecordReader(ReadSupport<T> readSupport) {
this(readSupport, FilterCompat.NOOP);
}
private void checkRead() throws IOException {
if (current == totalCountLoadedSoFar) {
if (current != 0) {
totalTimeSpentProcessingRecords += (System.currentTimeMillis() - startedAssemblingCurrentBlockAt);
if (LOG.isInfoEnabled()) {
LOG.info("Assembled and processed " + totalCountLoadedSoFar + " records from " + columnCount + " columns in " + totalTimeSpentProcessingRecords + " ms: "+((float)totalCountLoadedSoFar / totalTimeSpentProcessingRecords) + " rec/ms, " + ((float)totalCountLoadedSoFar * columnCount / totalTimeSpentProcessingRecords) + " cell/ms");
final long totalTime = totalTimeSpentProcessingRecords + totalTimeSpentReadingBytes;
if (totalTime != 0) {
final long percentReading = 100 * totalTimeSpentReadingBytes / totalTime;
final long percentProcessing = 100 * totalTimeSpentProcessingRecords / totalTime;
LOG.info("time spent so far " + percentReading + "% reading ("+totalTimeSpentReadingBytes+" ms) and " + percentProcessing + "% processing ("+totalTimeSpentProcessingRecords+" ms)");
}
}
}
LOG.info("at row " + current + ". reading next block");
long t0 = System.currentTimeMillis();
PageReadStore pages = reader.readNextRowGroup();
if (pages == null) {
throw new IOException("expecting more rows but reached last block. Read " + current + " out of " + total);
}
long timeSpentReading = System.currentTimeMillis() - t0;
totalTimeSpentReadingBytes += timeSpentReading;
BenchmarkCounter.incrementTime(timeSpentReading);
if (LOG.isInfoEnabled()) LOG.info("block read in memory in {} ms. row count = {}", timeSpentReading, pages.getRowCount());
LOG.debug("initializing Record assembly with requested schema {}", requestedSchema);
MessageColumnIO columnIO = columnIOFactory.getColumnIO(requestedSchema, fileSchema, strictTypeChecking);
recordReader = columnIO.getRecordReader(pages, recordConverter,
filterRecords ? filter : FilterCompat.NOOP);
startedAssemblingCurrentBlockAt = System.currentTimeMillis();
totalCountLoadedSoFar += pages.getRowCount();
++ currentBlock;
currentInBlock=-1;
}
}
public void close() throws IOException {
if (reader != null) {
reader.close();
}
}
public Void getCurrentKey() throws IOException, InterruptedException {
return null;
}
public T getCurrentValue() {
return currentValue;
}
public int getCurrentBlock() {
return currentBlock;
}
public int getCurrentInBlock() {
return currentInBlock;
}
public boolean skipRowGroup() {
if(currentBlock + 1 >= rowGroupSize) {
LOG.info("Already reached the last row group {}, ignored.", currentBlock);
current = totalCountLoadedSoFar;
return true;
}
LOG.info("skipping block {}", ++currentBlock);
current = totalCountLoadedSoFar;
return reader.skipNextRowGroup();
}
public void skipToEndOfBlock() {
LOG.info("skipping to end of block {}", currentBlock);
current = totalCountLoadedSoFar;
}
public float getProgress() throws IOException, InterruptedException {
return (float) current / total;
}
public void initialize(ParquetFileReader reader, Configuration configuration, Set<String> projectedFieldNames)
throws IOException {
// initialize a ReadContext for this file
this.reader = reader;
FileMetaData parquetFileMetadata = reader.getFooter().getFileMetaData();
this.fileSchema = parquetFileMetadata.getSchema();
Map<String, String> fileMetadata = parquetFileMetadata.getKeyValueMetaData();
ReadSupport.ReadContext readContext = readSupport.init(new InitContext(
configuration, toSetMultiMap(fileMetadata), fileSchema));
this.columnIOFactory = new ColumnIOFactory(parquetFileMetadata.getCreatedBy());
this.requestedSchema = readContext.getRequestedSchema();
this.columnCount = requestedSchema.getPaths().size();
this.recordConverter = readSupport.prepareForRead(
configuration, fileMetadata, fileSchema, readContext);
this.strictTypeChecking = configuration.getBoolean(STRICT_TYPE_CHECKING, true);
this.total = reader.getRecordCount();
this.rowGroupSize = reader.getRowGroups().size();
this.unmaterializableRecordCounter = new UnmaterializableRecordCounter(configuration, total);
this.filterRecords = configuration.getBoolean(
RECORD_FILTERING_ENABLED, false);
// this is a workaround for an apparent bug in Spark's avro schema generation for complex fields (e.g Map)
// we replace the `requestedSchema` with a projection of the fileSchema
if (projectedFieldNames!=null) {
List<Type> projectedFields = new ArrayList<>();
for (Type field : fileSchema.getFields()) {
if (projectedFieldNames.contains(field.getName()))
projectedFields.add(field);
}
this.requestedSchema = new MessageType(fileSchema.getName(), projectedFields);
LOG.debug("initialized requested schema {}", this.requestedSchema);
}
reader.setRequestedSchema(requestedSchema);
LOG.info("RecordReader initialized will read a total of {} records with {} row groups.", total, rowGroupSize);
}
public boolean nextKeyValue() throws IOException {
boolean recordFound = false;
while (!recordFound) {
// no more records left
if (current >= total) { return false; }
try {
checkRead();
current ++;
currentInBlock++;
try {
currentValue = recordReader.read();
} catch (RecordMaterializationException e) {
// this might throw, but it's fatal if it does.
unmaterializableRecordCounter.incErrors(e);
LOG.debug("skipping a corrupt record");
continue;
}
if (recordReader.shouldSkipCurrentRecord()) {
// this record is being filtered via the filter2 package
LOG.debug("skipping record");
continue;
}
if (currentValue == null) {
// only happens with FilteredRecordReader at end of block
current = totalCountLoadedSoFar;
LOG.debug("filtered record reader reached end of block");
continue;
}
recordFound = true;
LOG.debug("read value[offset: {}, sub_offset {}, current {}]: {}", currentBlock, currentInBlock, current, currentValue.toString());
} catch (RuntimeException e) {
throw new ParquetDecodingException(format("Can not read value at %d in block %d in file %s", current, currentBlock, reader.getPath()), e);
}
}
return true;
}
private static <K, V> Map<K, Set<V>> toSetMultiMap(Map<K, V> map) {
Map<K, Set<V>> setMultiMap = new HashMap<K, Set<V>>();
for (Map.Entry<K, V> entry : map.entrySet()) {
Set<V> set = new HashSet<V>();
set.add(entry.getValue());
setMultiMap.put(entry.getKey(), Collections.unmodifiableSet(set));
}
return Collections.unmodifiableMap(setMultiMap);
}
}
| 4,429 |
0 | Create_ds/dione/dione-spark/src/main/java/com/paypal | Create_ds/dione/dione-spark/src/main/java/com/paypal/dione/DummyDoc.java | package com.paypal.dione;
/**
* Sonatype enforces javadoc artifact to get created, so we need a Java file in the project.
*/
public class DummyDoc {
}
| 4,430 |
0 | Create_ds/cordova-plugin-media/src | Create_ds/cordova-plugin-media/src/android/FileHelper.java | /*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package org.apache.cordova.media;
import android.net.Uri;
public class FileHelper {
/**
* Removes the "file://" prefix from the given URI string, if applicable.
* If the given URI string doesn't have a "file://" prefix, it is returned unchanged.
*
* @param uriString the URI string to operate on
* @return a path without the "file://" prefix
*/
public static String stripFileProtocol(String uriString) {
if (uriString.startsWith("file://")) {
return Uri.parse(uriString).getPath();
}
return uriString;
}
}
| 4,431 |
0 | Create_ds/cordova-plugin-media/src | Create_ds/cordova-plugin-media/src/android/AudioPlayer.java | /*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package org.apache.cordova.media;
import android.content.Context;
import android.media.AudioManager;
import android.media.MediaPlayer;
import android.media.MediaPlayer.OnCompletionListener;
import android.media.MediaPlayer.OnErrorListener;
import android.media.MediaPlayer.OnPreparedListener;
import android.media.MediaRecorder;
import android.os.Environment;
import android.os.Build;
import org.apache.cordova.LOG;
import org.json.JSONException;
import org.json.JSONObject;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.IOException;
import java.util.LinkedList;
/**
* This class implements the audio playback and recording capabilities used by Cordova.
* It is called by the AudioHandler Cordova class.
* Only one file can be played or recorded per class instance.
*
* Local audio files must reside in one of two places:
* android_asset: file name must start with /android_asset/sound.mp3
* sdcard: file name is just sound.mp3
*/
public class AudioPlayer implements OnCompletionListener, OnPreparedListener, OnErrorListener {
// AudioPlayer modes
public enum MODE { NONE, PLAY, RECORD };
// AudioPlayer states
public enum STATE { MEDIA_NONE,
MEDIA_STARTING,
MEDIA_RUNNING,
MEDIA_PAUSED,
MEDIA_STOPPED,
MEDIA_LOADING
};
private static final String LOG_TAG = "AudioPlayer";
// AudioPlayer message ids
private static int MEDIA_STATE = 1;
private static int MEDIA_DURATION = 2;
private static int MEDIA_POSITION = 3;
private static int MEDIA_ERROR = 9;
// Media error codes
private static int MEDIA_ERR_NONE_ACTIVE = 0;
private static int MEDIA_ERR_ABORTED = 1;
// private static int MEDIA_ERR_NETWORK = 2;
// private static int MEDIA_ERR_DECODE = 3;
// private static int MEDIA_ERR_NONE_SUPPORTED = 4;
private AudioHandler handler; // The AudioHandler object
private Context context; // The Application Context object
private String id; // The id of this player (used to identify Media object in JavaScript)
private MODE mode = MODE.NONE; // Playback or Recording mode
private STATE state = STATE.MEDIA_NONE; // State of recording or playback
private String audioFile = null; // File name to play or record to
private float duration = -1; // Duration of audio
private MediaRecorder recorder = null; // Audio recording object
private LinkedList<String> tempFiles = null; // Temporary recording file name
private String tempFile = null;
private MediaPlayer player = null; // Audio player object
private boolean prepareOnly = true; // playback after file prepare flag
private int seekOnPrepared = 0; // seek to this location once media is prepared
private float setRateOnPrepared = -1;
/**
* Constructor.
*
* @param handler The audio handler object
* @param id The id of this audio player
*/
public AudioPlayer(AudioHandler handler, String id, String file) {
this.handler = handler;
context = handler.getApplicationContext();
this.id = id;
this.audioFile = file;
this.tempFiles = new LinkedList<String>();
}
/**
* Creates an audio file path from the provided fileName or creates a new temporary file path.
*
* @param fileName the audio file name, if null a temporary 3gp file name is provided
* @return String
*/
private String createAudioFilePath(String fileName) {
File dir = Environment.getExternalStorageState().equals(Environment.MEDIA_MOUNTED)
? context.getExternalFilesDir(null)
: context.getCacheDir();
fileName = (fileName == null || fileName.isEmpty())
? String.format("tmprecording-%d.3gp", System.currentTimeMillis())
: fileName;
return dir.getAbsolutePath() + File.separator + fileName;
}
/**
* Destroy player and stop audio playing or recording.
*/
public void destroy() {
// Stop any play or record
if (this.player != null) {
if ((this.state == STATE.MEDIA_RUNNING) || (this.state == STATE.MEDIA_PAUSED)) {
this.player.stop();
this.setState(STATE.MEDIA_STOPPED);
}
this.player.release();
this.player = null;
}
if (this.recorder != null) {
if (this.state != STATE.MEDIA_STOPPED) {
this.stopRecording(true);
}
this.recorder.release();
this.recorder = null;
}
}
/**
* Start recording the specified file.
*
* @param file The name of the file
*/
public void startRecording(String file) {
String errorMessage;
switch (this.mode) {
case PLAY:
errorMessage = "AudioPlayer Error: Can't record in play mode.";
sendErrorStatus(MEDIA_ERR_ABORTED, errorMessage);
break;
case NONE:
this.audioFile = file;
this.recorder = new MediaRecorder();
this.recorder.setAudioSource(MediaRecorder.AudioSource.MIC);
this.recorder.setOutputFormat(MediaRecorder.OutputFormat.AAC_ADTS); // RAW_AMR);
this.recorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC); //AMR_NB);
this.recorder.setAudioEncodingBitRate(96000);
this.recorder.setAudioSamplingRate(44100);
this.tempFile = createAudioFilePath(null);
this.recorder.setOutputFile(this.tempFile);
try {
this.recorder.prepare();
this.recorder.start();
this.setState(STATE.MEDIA_RUNNING);
return;
} catch (IllegalStateException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
sendErrorStatus(MEDIA_ERR_ABORTED, null);
break;
case RECORD:
errorMessage = "AudioPlayer Error: Already recording.";
sendErrorStatus(MEDIA_ERR_ABORTED, errorMessage);
}
}
/**
* Save temporary recorded file to specified name
*
* @param file
*/
public void moveFile(String file) {
/* this is a hack to save the file as the specified name */
if (!file.startsWith("/")) {
file = createAudioFilePath(file);
}
int size = this.tempFiles.size();
LOG.d(LOG_TAG, "size = " + size);
// only one file so just copy it
if (size == 1) {
String logMsg = "renaming " + this.tempFile + " to " + file;
LOG.d(LOG_TAG, logMsg);
File f = new File(this.tempFile);
if (!f.renameTo(new File(file))) {
FileOutputStream outputStream = null;
File outputFile = null;
try {
outputFile = new File(file);
outputStream = new FileOutputStream(outputFile);
FileInputStream inputStream = null;
File inputFile = null;
try {
inputFile = new File(this.tempFile);
LOG.d(LOG_TAG, "INPUT FILE LENGTH: " + String.valueOf(inputFile.length()) );
inputStream = new FileInputStream(inputFile);
copy(inputStream, outputStream, false);
} catch (Exception e) {
LOG.e(LOG_TAG, e.getLocalizedMessage(), e);
} finally {
if (inputStream != null) try {
inputStream.close();
inputFile.delete();
inputFile = null;
} catch (Exception e) {
LOG.e(LOG_TAG, e.getLocalizedMessage(), e);
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
if (outputStream != null) try {
outputStream.close();
LOG.d(LOG_TAG, "OUTPUT FILE LENGTH: " + String.valueOf(outputFile.length()) );
} catch (Exception e) {
LOG.e(LOG_TAG, e.getLocalizedMessage(), e);
}
}
}
}
// more than one file so the user must have pause recording. We'll need to concat files.
else {
FileOutputStream outputStream = null;
try {
outputStream = new FileOutputStream(new File(file));
FileInputStream inputStream = null;
File inputFile = null;
for (int i = 0; i < size; i++) {
try {
inputFile = new File(this.tempFiles.get(i));
inputStream = new FileInputStream(inputFile);
copy(inputStream, outputStream, (i>0));
} catch(Exception e) {
LOG.e(LOG_TAG, e.getLocalizedMessage(), e);
} finally {
if (inputStream != null) try {
inputStream.close();
inputFile.delete();
inputFile = null;
} catch (Exception e) {
LOG.e(LOG_TAG, e.getLocalizedMessage(), e);
}
}
}
} catch(Exception e) {
e.printStackTrace();
} finally {
if (outputStream != null) try {
outputStream.close();
} catch (Exception e) {
LOG.e(LOG_TAG, e.getLocalizedMessage(), e);
}
}
}
}
private static long copy(InputStream from, OutputStream to, boolean skipHeader)
throws IOException {
byte[] buf = new byte[8096];
long total = 0;
if (skipHeader) {
from.skip(6);
}
while (true) {
int r = from.read(buf);
if (r == -1) {
break;
}
to.write(buf, 0, r);
total += r;
}
return total;
}
/**
* Stop/Pause recording and save to the file specified when recording started.
*/
public void stopRecording(boolean stop) {
if (this.recorder != null) {
try{
if (this.state == STATE.MEDIA_RUNNING) {
this.recorder.stop();
}
this.recorder.reset();
if (!this.tempFiles.contains(this.tempFile)) {
this.tempFiles.add(this.tempFile);
}
if (stop) {
LOG.d(LOG_TAG, "stopping recording");
this.setState(STATE.MEDIA_STOPPED);
this.moveFile(this.audioFile);
} else {
LOG.d(LOG_TAG, "pause recording");
this.setState(STATE.MEDIA_PAUSED);
}
}
catch (Exception e) {
e.printStackTrace();
}
}
}
/**
* Resume recording and save to the file specified when recording started.
*/
public void resumeRecording() {
startRecording(this.audioFile);
}
//==========================================================================
// Playback
//==========================================================================
/**
* Start or resume playing audio file.
*
* @param file The name of the audio file.
*/
public void startPlaying(String file) {
if (this.readyPlayer(file) && this.player != null) {
this.player.start();
this.setState(STATE.MEDIA_RUNNING);
this.seekOnPrepared = 0; //insures this is always reset
} else {
this.prepareOnly = false;
}
}
/**
* Seek or jump to a new time in the track.
*/
public void seekToPlaying(int milliseconds) {
if (this.readyPlayer(this.audioFile)) {
if (milliseconds > 0) {
this.player.seekTo(milliseconds);
}
LOG.d(LOG_TAG, "Send a onStatus update for the new seek");
sendStatusChange(MEDIA_POSITION, null, (milliseconds / 1000.0f), null);
}
else {
this.seekOnPrepared = milliseconds;
}
}
/**
* Pause playing.
*/
public void pausePlaying() {
// If playing, then pause
if (this.state == STATE.MEDIA_RUNNING && this.player != null) {
this.player.pause();
this.setState(STATE.MEDIA_PAUSED);
}
else {
String errorMessage = "AudioPlayer Error: pausePlaying() called during invalid state: " + this.state.ordinal();
sendErrorStatus(MEDIA_ERR_NONE_ACTIVE, errorMessage);
}
}
/**
* Stop playing the audio file.
*/
public void stopPlaying() {
if ((this.state == STATE.MEDIA_RUNNING) || (this.state == STATE.MEDIA_PAUSED)) {
this.player.pause();
this.player.seekTo(0);
LOG.d(LOG_TAG, "stopPlaying is calling stopped");
this.setState(STATE.MEDIA_STOPPED);
}
else {
String errorMessage = "AudioPlayer Error: stopPlaying() called during invalid state: " + this.state.ordinal();
sendErrorStatus(MEDIA_ERR_NONE_ACTIVE, errorMessage);
}
}
/**
* Resume playing.
*/
public void resumePlaying() {
this.startPlaying(this.audioFile);
}
/**
* Callback to be invoked when playback of a media source has completed.
*
* @param player The MediaPlayer that reached the end of the file
*/
public void onCompletion(MediaPlayer player) {
LOG.d(LOG_TAG, "on completion is calling stopped");
this.setState(STATE.MEDIA_STOPPED);
}
/**
* Get current position of playback.
*
* @return position in msec or -1 if not playing
*/
public long getCurrentPosition() {
if ((this.state == STATE.MEDIA_RUNNING) || (this.state == STATE.MEDIA_PAUSED)) {
int curPos = this.player.getCurrentPosition();
sendStatusChange(MEDIA_POSITION, null, (curPos / 1000.0f), null);
return curPos;
}
else {
return -1;
}
}
/**
* Determine if playback file is streaming or local.
* It is streaming if file name starts with "http://"
*
* @param file The file name
* @return T=streaming, F=local
*/
public boolean isStreaming(String file) {
if (file.contains("http://") || file.contains("https://") || file.contains("rtsp://")) {
return true;
}
else {
return false;
}
}
/**
* Get the duration of the audio file.
*
* @param file The name of the audio file.
* @return The duration in msec.
* -1=can't be determined
* -2=not allowed
*/
public float getDuration(String file) {
// Can't get duration of recording
if (this.recorder != null) {
return (-2); // not allowed
}
// If audio file already loaded and started, then return duration
if (this.player != null) {
return this.duration;
}
// If no player yet, then create one
else {
this.prepareOnly = true;
this.startPlaying(file);
// This will only return value for local, since streaming
// file hasn't been read yet.
return this.duration;
}
}
/**
* Callback to be invoked when the media source is ready for playback.
*
* @param player The MediaPlayer that is ready for playback
*/
public void onPrepared(MediaPlayer player) {
// Listen for playback completion
this.player.setOnCompletionListener(this);
// seek to any location received while not prepared
this.seekToPlaying(this.seekOnPrepared);
// apply any playback rate received while not prepared
if (setRateOnPrepared >= 0)
this.player.setPlaybackParams (this.player.getPlaybackParams().setSpeed(setRateOnPrepared));
// If start playing after prepared
if (!this.prepareOnly) {
this.player.start();
this.setState(STATE.MEDIA_RUNNING);
this.seekOnPrepared = 0; //reset only when played
} else {
this.setState(STATE.MEDIA_STARTING);
}
// Save off duration
this.duration = getDurationInSeconds();
// reset prepare only flag
this.prepareOnly = true;
// Send status notification to JavaScript
sendStatusChange(MEDIA_DURATION, null, this.duration, null);
}
/**
* By default Android returns the length of audio in mills but we want seconds
*
* @return length of clip in seconds
*/
private float getDurationInSeconds() {
return (this.player.getDuration() / 1000.0f);
}
/**
* Callback to be invoked when there has been an error during an asynchronous operation
* (other errors will throw exceptions at method call time).
*
* @param player the MediaPlayer the error pertains to
* @param arg1 the type of error that has occurred: (MEDIA_ERROR_UNKNOWN, MEDIA_ERROR_SERVER_DIED)
* @param arg2 an extra code, specific to the error.
*/
public boolean onError(MediaPlayer player, int arg1, int arg2) {
String errorMessage = "AudioPlayer.onError(" + arg1 + ", " + arg2 + ")";
// we don't want to send success callback
// so we don't call setState() here
this.state = STATE.MEDIA_STOPPED;
this.destroy();
// Send error notification to JavaScript
sendErrorStatus(arg1, errorMessage);
return false;
}
/**
* Set the state and send it to JavaScript.
*
* @param state
*/
private void setState(STATE state) {
if (this.state != state) {
sendStatusChange(MEDIA_STATE, null, (float)state.ordinal(), null);
}
this.state = state;
}
/**
* Set the mode and send it to JavaScript.
*
* @param mode
*/
private void setMode(MODE mode) {
if (this.mode != mode) {
//mode is not part of the expected behavior, so no notification
//this.handler.webView.sendJavascript("cordova.require('cordova-plugin-media.Media').onStatus('" + this.id + "', " + MEDIA_STATE + ", " + mode + ");");
}
this.mode = mode;
}
/**
* Get the audio state.
*
* @return int
*/
public int getState() {
return this.state.ordinal();
}
/**
* Set the volume for audio player
*
* @param volume
*/
public void setVolume(float volume) {
if (this.player != null) {
this.player.setVolume(volume, volume);
} else {
String errorMessage = "AudioPlayer Error: Cannot set volume until the audio file is initialized.";
sendErrorStatus(MEDIA_ERR_NONE_ACTIVE, errorMessage);
}
}
/**
* attempts to put the player in play mode
* @return true if in playmode, false otherwise
*/
private boolean playMode() {
switch(this.mode) {
case NONE:
this.setMode(MODE.PLAY);
break;
case PLAY:
break;
case RECORD:
String errorMessage = "AudioPlayer Error: Can't play in record mode.";
sendErrorStatus(MEDIA_ERR_ABORTED, errorMessage);
return false; //player is not ready
}
return true;
}
/**
* attempts to initialize the media player for playback
* @param file the file to play
* @return false if player not ready, reports if in wrong mode or state
*/
private boolean readyPlayer(String file) {
if (playMode()) {
switch (this.state) {
case MEDIA_NONE:
if (this.player == null) {
this.player = new MediaPlayer();
this.player.setOnErrorListener(this);
}
try {
this.loadAudioFile(file);
} catch (Exception e) {
sendErrorStatus(MEDIA_ERR_ABORTED, e.getMessage());
}
return false;
case MEDIA_LOADING:
//cordova js is not aware of MEDIA_LOADING, so we send MEDIA_STARTING instead
LOG.d(LOG_TAG, "AudioPlayer Loading: startPlaying() called during media preparation: " + STATE.MEDIA_STARTING.ordinal());
this.prepareOnly = false;
return false;
case MEDIA_STARTING:
case MEDIA_RUNNING:
case MEDIA_PAUSED:
return true;
case MEDIA_STOPPED:
//if we are readying the same file
if (file!=null && this.audioFile.compareTo(file) == 0) {
//maybe it was recording?
if (player == null) {
this.player = new MediaPlayer();
this.player.setOnErrorListener(this);
this.prepareOnly = false;
try {
this.loadAudioFile(file);
} catch (Exception e) {
sendErrorStatus(MEDIA_ERR_ABORTED, e.getMessage());
}
return false;//we´re not ready yet
}
else {
//reset the audio file
player.seekTo(0);
player.pause();
return true;
}
} else {
//reset the player
this.player.reset();
try {
this.loadAudioFile(file);
} catch (Exception e) {
sendErrorStatus(MEDIA_ERR_ABORTED, e.getMessage());
}
//if we had to prepare the file, we won't be in the correct state for playback
return false;
}
default:
String errorMessage = "AudioPlayer Error: startPlaying() called during invalid state: " + this.state;
sendErrorStatus(MEDIA_ERR_ABORTED, errorMessage);
}
}
return false;
}
/**
* load audio file
* @throws IOException
* @throws IllegalStateException
* @throws SecurityException
* @throws IllegalArgumentException
*/
private void loadAudioFile(String file) throws IllegalArgumentException, SecurityException, IllegalStateException, IOException {
if (this.isStreaming(file)) {
this.player.setDataSource(file);
this.player.setAudioStreamType(AudioManager.STREAM_MUSIC);
//if it's a streaming file, play mode is implied
this.setMode(MODE.PLAY);
this.setState(STATE.MEDIA_STARTING);
this.player.setOnPreparedListener(this);
this.player.prepareAsync();
}
else {
if (file.startsWith("/android_asset/")) {
String f = file.substring(15);
android.content.res.AssetFileDescriptor fd = this.handler.cordova.getActivity().getAssets().openFd(f);
this.player.setDataSource(fd.getFileDescriptor(), fd.getStartOffset(), fd.getLength());
}
else {
File fp = new File(file);
if (fp.exists()) {
FileInputStream fileInputStream = new FileInputStream(file);
this.player.setDataSource(fileInputStream.getFD());
fileInputStream.close();
}
else {
this.player.setDataSource(createAudioFilePath(file));
}
}
this.setState(STATE.MEDIA_STARTING);
this.player.setOnPreparedListener(this);
this.player.prepare();
// Get duration
this.duration = getDurationInSeconds();
}
}
private void sendErrorStatus(int errorCode, String errorMessage) {
sendStatusChange(MEDIA_ERROR, errorCode, null, errorMessage);
}
private void sendStatusChange(int messageType, Integer additionalCode, Float value, String errorMessage) {
if (additionalCode != null && value != null) {
throw new IllegalArgumentException("Only one of additionalCode or value can be specified, not both");
}
if (errorMessage != null) {
LOG.d(LOG_TAG, errorMessage);
}
JSONObject statusDetails = new JSONObject();
try {
statusDetails.put("id", this.id);
statusDetails.put("msgType", messageType);
if (additionalCode != null) {
JSONObject code = new JSONObject();
code.put("code", additionalCode.intValue());
if (errorMessage != null) {
code.put("message", errorMessage);
}
statusDetails.put("value", code);
}
else if (value != null) {
statusDetails.put("value", value.floatValue());
}
} catch (JSONException e) {
LOG.e(LOG_TAG, "Failed to create status details", e);
}
this.handler.sendEventMessage("status", statusDetails);
}
/**
* Get current amplitude of recording.
*
* @return amplitude or 0 if not recording
*/
public float getCurrentAmplitude() {
if (this.recorder != null) {
try{
if (this.state == STATE.MEDIA_RUNNING) {
return (float) this.recorder.getMaxAmplitude() / 32762;
}
}
catch (Exception e) {
e.printStackTrace();
}
}
return 0;
}
/**
* Set the playback rate for the player (ignored on API < 23)
*
* @param volume
*/
public void setRate(float rate) {
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.M) {
LOG.d(LOG_TAG, "AudioPlayer Warning: Request to set playback rate not supported on current OS version");
return;
}
if (this.player != null) {
try {
boolean wasPlaying = this.player.isPlaying();
this.player.setPlaybackParams(this.player.getPlaybackParams().setSpeed(rate));
if (!wasPlaying && this.player.isPlaying()) {
this.player.pause();
}
} catch(Exception e) {
e.printStackTrace();
}
} else {
setRateOnPrepared = rate;
}
}
}
| 4,432 |
0 | Create_ds/cordova-plugin-media/src | Create_ds/cordova-plugin-media/src/android/AudioHandler.java | /*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package org.apache.cordova.media;
import org.apache.cordova.CallbackContext;
import org.apache.cordova.CordovaPlugin;
import org.apache.cordova.CordovaResourceApi;
import org.apache.cordova.PermissionHelper;
import android.Manifest;
import android.content.Context;
import android.content.pm.PackageManager;
import android.media.AudioManager;
import android.media.AudioManager.OnAudioFocusChangeListener;
import android.net.Uri;
import android.os.Build;
import java.util.ArrayList;
import org.apache.cordova.LOG;
import org.apache.cordova.PluginResult;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.util.HashMap;
/**
* This class called by CordovaActivity to play and record audio.
* The file can be local or over a network using http.
*
* Audio formats supported (tested):
* .mp3, .wav
*
* Local audio files must reside in one of two places:
* android_asset: file name must start with /android_asset/sound.mp3
* sdcard: file name is just sound.mp3
*/
public class AudioHandler extends CordovaPlugin {
public static String TAG = "AudioHandler";
HashMap<String, AudioPlayer> players; // Audio player object
ArrayList<AudioPlayer> pausedForPhone; // Audio players that were paused when phone call came in
ArrayList<AudioPlayer> pausedForFocus; // Audio players that were paused when focus was lost
private int origVolumeStream = -1;
private CallbackContext messageChannel;
// Permission Request Codes
public static int RECORD_AUDIO = 0;
public static int WRITE_EXTERNAL_STORAGE = 1;
public static final int PERMISSION_DENIED_ERROR = 20;
private String recordId;
private String fileUriStr;
/**
* Constructor.
*/
public AudioHandler() {
this.players = new HashMap<String, AudioPlayer>();
this.pausedForPhone = new ArrayList<AudioPlayer>();
this.pausedForFocus = new ArrayList<AudioPlayer>();
}
public Context getApplicationContext() {
return cordova.getActivity().getApplicationContext();
}
/**
* Executes the request and returns PluginResult.
* @param action The action to execute.
* @param args JSONArry of arguments for the plugin.
* @param callbackContext The callback context used when calling back into JavaScript.
* @return A PluginResult object with a status and message.
*/
public boolean execute(String action, JSONArray args, CallbackContext callbackContext) throws JSONException {
CordovaResourceApi resourceApi = webView.getResourceApi();
PluginResult.Status status = PluginResult.Status.OK;
String result = "";
if (action.equals("startRecordingAudio")) {
recordId = args.getString(0);
String target = args.getString(1);
try {
Uri targetUri = resourceApi.remapUri(Uri.parse(target));
fileUriStr = targetUri.toString();
} catch (IllegalArgumentException e) {
fileUriStr = target;
}
promptForRecord();
}
else if (action.equals("stopRecordingAudio")) {
this.stopRecordingAudio(args.getString(0), true);
}
else if (action.equals("pauseRecordingAudio")) {
this.stopRecordingAudio(args.getString(0), false);
}
else if (action.equals("resumeRecordingAudio")) {
this.resumeRecordingAudio(args.getString(0));
}
else if (action.equals("startPlayingAudio")) {
String target = args.getString(1);
String fileUriStr;
try {
Uri targetUri = resourceApi.remapUri(Uri.parse(target));
fileUriStr = targetUri.toString();
} catch (IllegalArgumentException e) {
fileUriStr = target;
}
this.startPlayingAudio(args.getString(0), FileHelper.stripFileProtocol(fileUriStr));
}
else if (action.equals("seekToAudio")) {
this.seekToAudio(args.getString(0), args.getInt(1));
}
else if (action.equals("pausePlayingAudio")) {
this.pausePlayingAudio(args.getString(0));
}
else if (action.equals("stopPlayingAudio")) {
this.stopPlayingAudio(args.getString(0));
} else if (action.equals("setVolume")) {
try {
this.setVolume(args.getString(0), Float.parseFloat(args.getString(1)));
} catch (NumberFormatException nfe) {
//no-op
}
} else if (action.equals("getCurrentPositionAudio")) {
float f = this.getCurrentPositionAudio(args.getString(0));
callbackContext.sendPluginResult(new PluginResult(status, f));
return true;
}
else if (action.equals("getDurationAudio")) {
float f = this.getDurationAudio(args.getString(0), args.getString(1));
callbackContext.sendPluginResult(new PluginResult(status, f));
return true;
}
else if (action.equals("create")) {
String id = args.getString(0);
String src = FileHelper.stripFileProtocol(args.getString(1));
getOrCreatePlayer(id, src);
}
else if (action.equals("release")) {
boolean b = this.release(args.getString(0));
callbackContext.sendPluginResult(new PluginResult(status, b));
return true;
}
else if (action.equals("messageChannel")) {
messageChannel = callbackContext;
return true;
} else if (action.equals("getCurrentAmplitudeAudio")) {
float f = this.getCurrentAmplitudeAudio(args.getString(0));
callbackContext.sendPluginResult(new PluginResult(status, f));
return true;
}
else if (action.equals("setRate")) {
this.setRate(args.getString(0), Float.parseFloat(args.getString(1)));
return true;
}
else { // Unrecognized action.
return false;
}
callbackContext.sendPluginResult(new PluginResult(status, result));
return true;
}
/**
* Stop all audio players and recorders.
*/
public void onDestroy() {
if (!players.isEmpty()) {
onLastPlayerReleased();
}
for (AudioPlayer audio : this.players.values()) {
audio.destroy();
}
this.players.clear();
}
/**
* Stop all audio players and recorders on navigate.
*/
@Override
public void onReset() {
onDestroy();
}
/**
* Called when a message is sent to plugin.
*
* @param id The message id
* @param data The message data
* @return Object to stop propagation or null
*/
public Object onMessage(String id, Object data) {
// If phone message
if (id.equals("telephone")) {
// If phone ringing, then pause playing
if ("ringing".equals(data) || "offhook".equals(data)) {
// Get all audio players and pause them
for (AudioPlayer audio : this.players.values()) {
if (audio.getState() == AudioPlayer.STATE.MEDIA_RUNNING.ordinal()) {
this.pausedForPhone.add(audio);
audio.pausePlaying();
}
}
}
// If phone idle, then resume playing those players we paused
else if ("idle".equals(data)) {
for (AudioPlayer audio : this.pausedForPhone) {
audio.startPlaying(null);
}
this.pausedForPhone.clear();
}
}
return null;
}
//--------------------------------------------------------------------------
// LOCAL METHODS
//--------------------------------------------------------------------------
private AudioPlayer getOrCreatePlayer(String id, String file) {
AudioPlayer ret = players.get(id);
if (ret == null) {
if (players.isEmpty()) {
onFirstPlayerCreated();
}
ret = new AudioPlayer(this, id, file);
players.put(id, ret);
}
return ret;
}
/**
* Release the audio player instance to save memory.
* @param id The id of the audio player
*/
private boolean release(String id) {
AudioPlayer audio = players.remove(id);
if (audio == null) {
return false;
}
if (players.isEmpty()) {
onLastPlayerReleased();
}
audio.destroy();
return true;
}
/**
* Start recording and save the specified file.
* @param id The id of the audio player
* @param file The name of the file
*/
public void startRecordingAudio(String id, String file) {
AudioPlayer audio = getOrCreatePlayer(id, file);
audio.startRecording(file);
}
/**
* Stop/Pause recording and save to the file specified when recording started.
* @param id The id of the audio player
* @param stop If true stop recording, if false pause recording
*/
public void stopRecordingAudio(String id, boolean stop) {
AudioPlayer audio = this.players.get(id);
if (audio != null) {
audio.stopRecording(stop);
}
}
/**
* Resume recording
* @param id The id of the audio player
*/
public void resumeRecordingAudio(String id) {
AudioPlayer audio = players.get(id);
if (audio != null) {
audio.resumeRecording();
}
}
/**
* Start or resume playing audio file.
* @param id The id of the audio player
* @param file The name of the audio file.
*/
public void startPlayingAudio(String id, String file) {
AudioPlayer audio = getOrCreatePlayer(id, file);
audio.startPlaying(file);
getAudioFocus();
}
/**
* Seek to a location.
* @param id The id of the audio player
* @param milliseconds int: number of milliseconds to skip 1000 = 1 second
*/
public void seekToAudio(String id, int milliseconds) {
AudioPlayer audio = this.players.get(id);
if (audio != null) {
audio.seekToPlaying(milliseconds);
}
}
/**
* Pause playing.
* @param id The id of the audio player
*/
public void pausePlayingAudio(String id) {
AudioPlayer audio = this.players.get(id);
if (audio != null) {
audio.pausePlaying();
}
}
/**
* Stop playing the audio file.
* @param id The id of the audio player
*/
public void stopPlayingAudio(String id) {
AudioPlayer audio = this.players.get(id);
if (audio != null) {
audio.stopPlaying();
}
}
/**
* Get current position of playback.
* @param id The id of the audio player
* @return position in msec
*/
public float getCurrentPositionAudio(String id) {
AudioPlayer audio = this.players.get(id);
if (audio != null) {
return (audio.getCurrentPosition() / 1000.0f);
}
return -1;
}
/**
* Get the duration of the audio file.
* @param id The id of the audio player
* @param file The name of the audio file.
* @return The duration in msec.
*/
public float getDurationAudio(String id, String file) {
AudioPlayer audio = getOrCreatePlayer(id, file);
return audio.getDuration(file);
}
/**
* Set the audio device to be used for playback.
*
* @param output 1=earpiece, 2=speaker
*/
@SuppressWarnings("deprecation")
public void setAudioOutputDevice(int output) {
String TAG1 = "AudioHandler.setAudioOutputDevice(): Error : ";
AudioManager audiMgr = (AudioManager) this.cordova.getActivity().getSystemService(Context.AUDIO_SERVICE);
if (output == 2) {
audiMgr.setRouting(AudioManager.MODE_NORMAL, AudioManager.ROUTE_SPEAKER, AudioManager.ROUTE_ALL);
}
else if (output == 1) {
audiMgr.setRouting(AudioManager.MODE_NORMAL, AudioManager.ROUTE_EARPIECE, AudioManager.ROUTE_ALL);
}
else {
LOG.e(TAG1," Unknown output device");
}
}
public void pauseAllLostFocus() {
for (AudioPlayer audio : this.players.values()) {
if (audio.getState() == AudioPlayer.STATE.MEDIA_RUNNING.ordinal()) {
this.pausedForFocus.add(audio);
audio.pausePlaying();
}
}
}
public void resumeAllGainedFocus() {
for (AudioPlayer audio : this.pausedForFocus) {
audio.resumePlaying();
}
this.pausedForFocus.clear();
}
/**
* Get the the audio focus
*/
private OnAudioFocusChangeListener focusChangeListener = new OnAudioFocusChangeListener() {
public void onAudioFocusChange(int focusChange) {
switch (focusChange) {
case (AudioManager.AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK) :
case (AudioManager.AUDIOFOCUS_LOSS_TRANSIENT) :
case (AudioManager.AUDIOFOCUS_LOSS) :
pauseAllLostFocus();
break;
case (AudioManager.AUDIOFOCUS_GAIN):
resumeAllGainedFocus();
break;
default:
break;
}
}
};
public void getAudioFocus() {
String TAG2 = "AudioHandler.getAudioFocus(): Error : ";
AudioManager am = (AudioManager) this.cordova.getActivity().getSystemService(Context.AUDIO_SERVICE);
int result = am.requestAudioFocus(focusChangeListener,
AudioManager.STREAM_MUSIC,
AudioManager.AUDIOFOCUS_GAIN);
if (result != AudioManager.AUDIOFOCUS_REQUEST_GRANTED) {
LOG.e(TAG2,result + " instead of " + AudioManager.AUDIOFOCUS_REQUEST_GRANTED);
}
}
/**
* Get the audio device to be used for playback.
*
* @return 1=earpiece, 2=speaker
*/
@SuppressWarnings("deprecation")
public int getAudioOutputDevice() {
AudioManager audiMgr = (AudioManager) this.cordova.getActivity().getSystemService(Context.AUDIO_SERVICE);
if (audiMgr.getRouting(AudioManager.MODE_NORMAL) == AudioManager.ROUTE_EARPIECE) {
return 1;
}
else if (audiMgr.getRouting(AudioManager.MODE_NORMAL) == AudioManager.ROUTE_SPEAKER) {
return 2;
}
else {
return -1;
}
}
/**
* Set the volume for an audio device
*
* @param id The id of the audio player
* @param volume Volume to adjust to 0.0f - 1.0f
*/
public void setVolume(String id, float volume) {
String TAG3 = "AudioHandler.setVolume(): Error : ";
AudioPlayer audio = this.players.get(id);
if (audio != null) {
audio.setVolume(volume);
} else {
LOG.e(TAG3,"Unknown Audio Player " + id);
}
}
/**
* Set the playback rate of an audio file
*
* @param id The id of the audio player
* @param rate The playback rate
*/
public void setRate(String id, float rate) {
String TAG3 = "AudioHandler.setRate(): Error : ";
AudioPlayer audio = this.players.get(id);
if (audio != null) {
audio.setRate(rate);
} else {
LOG.e(TAG3, "Unknown Audio Player " + id);
}
}
private void onFirstPlayerCreated() {
origVolumeStream = cordova.getActivity().getVolumeControlStream();
cordova.getActivity().setVolumeControlStream(AudioManager.STREAM_MUSIC);
}
private void onLastPlayerReleased() {
if (origVolumeStream != -1) {
cordova.getActivity().setVolumeControlStream(origVolumeStream);
origVolumeStream = -1;
}
}
void sendEventMessage(String action, JSONObject actionData) {
JSONObject message = new JSONObject();
try {
message.put("action", action);
if (actionData != null) {
message.put(action, actionData);
}
} catch (JSONException e) {
LOG.e(TAG, "Failed to create event message", e);
}
PluginResult pluginResult = new PluginResult(PluginResult.Status.OK, message);
pluginResult.setKeepCallback(true);
if (messageChannel != null) {
messageChannel.sendPluginResult(pluginResult);
}
}
public void onRequestPermissionResult(int requestCode, String[] permissions,
int[] grantResults) throws JSONException
{
for(int r:grantResults)
{
if(r == PackageManager.PERMISSION_DENIED)
{
this.messageChannel.sendPluginResult(new PluginResult(PluginResult.Status.ERROR, PERMISSION_DENIED_ERROR));
return;
}
}
promptForRecord();
}
/*
* This little utility method catch-all work great for multi-permission stuff.
*
*/
private void promptForRecord()
{
// If Android < 33, check for WRITE_EXTERNAL_STORAGE permission
if (android.os.Build.VERSION.SDK_INT < Build.VERSION_CODES.TIRAMISU) {
if (!PermissionHelper.hasPermission(this, Manifest.permission.WRITE_EXTERNAL_STORAGE)) {
PermissionHelper.requestPermission(this, WRITE_EXTERNAL_STORAGE, Manifest.permission.WRITE_EXTERNAL_STORAGE);
return;
}
}
// For all Android versions, check for RECORD_AUDIO permission
if (!PermissionHelper.hasPermission(this, Manifest.permission.RECORD_AUDIO)) {
PermissionHelper.requestPermission(this, RECORD_AUDIO, Manifest.permission.RECORD_AUDIO);
return;
}
// Start recording if all necessary permissions were granted.
this.startRecordingAudio(recordId, FileHelper.stripFileProtocol(fileUriStr));
}
/**
* Get current amplitude of recording.
* @param id The id of the audio player
* @return amplitude
*/
public float getCurrentAmplitudeAudio(String id) {
AudioPlayer audio = this.players.get(id);
if (audio != null) {
return (audio.getCurrentAmplitude());
}
return 0;
}
}
| 4,433 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/test/java/io/mantisrx/connector/publish | Create_ds/mantis-connectors/mantis-connector-publish/src/test/java/io/mantisrx/connector/publish/core/QueryRegistryTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.core;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import io.mantisrx.publish.proto.MantisServerSubscription;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
class QueryRegistryTest {
@Test
void registerQueryTest() {
try {
QueryRegistry queryRegistry = new QueryRegistry.Builder().build();
fail();
} catch (IllegalArgumentException ignored) {
}
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = QueryRegistry.ANY;
try {
queryRegistry.registerQuery(targetApp, null, "true");
fail();
} catch (Exception ignored) {
}
try {
queryRegistry.registerQuery(targetApp, "subId", null);
fail();
} catch (Exception ignored) {
}
queryRegistry.registerQuery("myApp", "subId", "true");
queryRegistry.registerQuery("myApp2", "subId", "false");
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp("myApp");
assertEquals(1, currentSubs.size());
List<MantisServerSubscription> currentSubs2 = queryRegistry.getCurrentSubscriptionsForApp("myApp2");
assertEquals(1, currentSubs2.size());
Map<String, List<MantisServerSubscription>> allSubscriptions = queryRegistry.getAllSubscriptions();
assertEquals(2, allSubscriptions.size());
assertTrue(allSubscriptions.containsKey("myApp"));
assertTrue(allSubscriptions.containsKey("myApp2"));
}
@Test
void registerQueryForAnyLookupSpecificAppTest() {
try {
QueryRegistry queryRegistry = new QueryRegistry.Builder().build();
fail();
} catch (IllegalArgumentException ignored) { }
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = QueryRegistry.ANY;
queryRegistry.registerQuery(targetApp, "subId", "true");
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp("myApp");
assertEquals(1, currentSubs.size());
}
@Test
void registerQueryForAppLookupAnyTest() {
try {
QueryRegistry queryRegistry = new QueryRegistry.Builder().build();
fail();
} catch (IllegalArgumentException ignored) { }
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = QueryRegistry.ANY;
queryRegistry.registerQuery("myApp", "subId", "true");
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp);
assertEquals(0, currentSubs.size());
}
@Test
@Disabled("time-based, non-deterministic")
void deregisterQueryTest() throws InterruptedException {
try {
QueryRegistry queryRegistry = new QueryRegistry.Builder().build();
fail();
} catch (IllegalArgumentException ignored) { }
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = "myapp";
try {
queryRegistry.registerQuery(targetApp, null, "true");
fail();
} catch (Exception ignored) {
}
try {
queryRegistry.registerQuery(targetApp, "subId", null);
fail();
} catch (Exception ignored) {
}
queryRegistry.registerQuery(targetApp, "subId", "true");
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp);
assertEquals(1, currentSubs.size());
queryRegistry.deregisterQuery(targetApp, "subId", "true");
Thread.sleep(500);
currentSubs = queryRegistry.getCurrentSubscriptionsForApp(QueryRegistry.ANY);
assertEquals(0, currentSubs.size());
}
@Test
@Disabled("time-based, non-deterministic")
void registerIdenticalQueryGetsDedupedTest() {
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = "myApp";
int concurrency = 5;
CountDownLatch latch = new CountDownLatch(1);
CountDownLatch endLatch = new CountDownLatch(concurrency);
Runnable task = () -> {
try {
latch.await();
queryRegistry.registerQuery(targetApp, "subId", "true");
endLatch.countDown();
} catch (InterruptedException e) {
e.printStackTrace();
}
};
ExecutorService executorService = Executors.newFixedThreadPool(concurrency);
for (int i = 0; i < concurrency; i++) {
executorService.submit(task);
}
latch.countDown();
try {
endLatch.await();
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp);
assertEquals(1, currentSubs.size());
assertEquals("myPrefix_subId", currentSubs.get(0).getSubscriptionId());
} catch (InterruptedException e) {
e.printStackTrace();
fail();
}
}
@Test
@Disabled("time-based, non-deterministic")
void registerIdenticalQueryRemovalTest() throws InterruptedException {
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = "myApp";
int concurrency = 5;
CountDownLatch latch = new CountDownLatch(1);
CountDownLatch endLatch = new CountDownLatch(concurrency);
CountDownLatch removeQueryEndLatch = new CountDownLatch(concurrency - 1);
Runnable addQueryTask = () -> {
try {
latch.await();
queryRegistry.registerQuery(targetApp, "subId", "true");
endLatch.countDown();
} catch (InterruptedException e) {
e.printStackTrace();
}
};
Runnable removeQueryTask = () -> {
try {
latch.await();
queryRegistry.deregisterQuery(targetApp, "subId", "true");
removeQueryEndLatch.countDown();
} catch (InterruptedException ignored) {
}
};
ExecutorService executorService = Executors.newFixedThreadPool(concurrency * 2);
for (int i = 0; i < concurrency; i++) {
executorService.submit(addQueryTask);
}
for (int i = 0; i < concurrency - 1; i++) {
executorService.submit(removeQueryTask);
}
latch.countDown();
removeQueryEndLatch.await();
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp);
assertEquals(1, currentSubs.size());
assertEquals("myPrefix_subId", currentSubs.get(0).getSubscriptionId());
}
@Test
@Disabled("time-based, non-deterministic")
void registerQueryMultipleAppsRemovalTest() throws InterruptedException {
QueryRegistry queryRegistry = new QueryRegistry.Builder().withClientIdPrefix("myPrefix").build();
String targetApp = "myApp";
String targetApp2 = "myApp2";
int concurrency = 5;
CountDownLatch latch = new CountDownLatch(1);
CountDownLatch endLatch = new CountDownLatch(concurrency);
CountDownLatch removeQueryEndLatch = new CountDownLatch(concurrency - 1);
Runnable addQueryTask = () -> {
try {
latch.await();
queryRegistry.registerQuery(targetApp, "subId", "true");
endLatch.countDown();
} catch (InterruptedException ignored) {
}
};
Runnable addQueryTask2 = () -> {
try {
latch.await();
queryRegistry.registerQuery(targetApp2, "subId", "true");
endLatch.countDown();
} catch (InterruptedException ignored) {
}
};
Runnable removeQueryTask = () -> {
try {
latch.await();
queryRegistry.deregisterQuery(targetApp, "subId", "true");
removeQueryEndLatch.countDown();
} catch (InterruptedException ignored) {
}
};
Runnable removeQueryTask2 = () -> {
try {
latch.await();
queryRegistry.deregisterQuery(targetApp2, "subId", "true");
removeQueryEndLatch.countDown();
} catch (InterruptedException ignored) {
}
};
ExecutorService executorService = Executors.newFixedThreadPool(concurrency * 2);
for (int i = 0; i < concurrency; i++) {
executorService.submit(addQueryTask);
executorService.submit(addQueryTask2);
}
for (int i = 0; i < concurrency - 1; i++) {
executorService.submit(removeQueryTask);
executorService.submit(removeQueryTask2);
}
latch.countDown();
removeQueryEndLatch.await();
List<MantisServerSubscription> currentSubs = queryRegistry.getCurrentSubscriptionsForApp(targetApp);
assertEquals(1, currentSubs.size());
List<MantisServerSubscription> currentSubs2 = queryRegistry.getCurrentSubscriptionsForApp(targetApp2);
assertEquals(1, currentSubs2.size());
assertEquals("myPrefix_subId", currentSubs.get(0).getSubscriptionId());
}
}
| 4,434 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/test/java/io/mantisrx/connector/publish | Create_ds/mantis-connectors/mantis-connector-publish/src/test/java/io/mantisrx/connector/publish/core/EventFilterTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.core;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.mantisrx.common.utils.MantisSourceJobConstants;
import org.junit.jupiter.api.Test;
import rx.functions.Func1;
public class EventFilterTest {
private ObjectMapper mapper = new ObjectMapper();
@Test
public void missingClientIdFails() {
try {
new EventFilter(null);
fail();
} catch (Exception ignored) {
}
}
@Test
public void basicFilterTest() throws JsonProcessingException {
String clientId = "myClientId";
EventFilter filter = new EventFilter(clientId);
Map<String, List<String>> params = new HashMap<>();
List<String> subIdParam = new ArrayList<>();
subIdParam.add("mySubId");
params.put(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME, subIdParam);
Func1<String, Boolean> materializedFilter = filter.call(params);
List<String> matchedClients = new ArrayList<>();
matchedClients.add(clientId + "_" + "mySubId");
matchedClients.add(clientId + "_" + "BlahSubId");
Map<String, Object> payLoad = new HashMap<>();
payLoad.put("ts", System.currentTimeMillis());
payLoad.put("matchedClients", matchedClients);
payLoad.put("type", "EVENT");
String payloadStr = mapper.writeValueAsString(payLoad);
assertTrue(materializedFilter.call(payloadStr));
List<String> matchedClients2 = new ArrayList<>();
matchedClients.add(clientId + "_" + "mySubId2");
matchedClients.add(clientId + "_" + "BlahSubId");
payLoad = new HashMap<>();
payLoad.put("ts", System.currentTimeMillis());
payLoad.put("matchedClients", matchedClients2);
payLoad.put("type", "EVENT");
payloadStr = mapper.writeValueAsString(payLoad);
assertFalse(materializedFilter.call(payloadStr));
}
@Test
public void basicEmptyEventFilterTest() throws JsonProcessingException {
String clientId = "myClientId";
EventFilter filter = new EventFilter(clientId);
Map<String, List<String>> params = new HashMap<>();
List<String> subIdParam = new ArrayList<>();
subIdParam.add("mySubId");
params.put(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME, subIdParam);
Func1<String, Boolean> materializedFilter = filter.call(params);
List<String> matchedClients = new ArrayList<>();
matchedClients.add(clientId + "_" + "mySubId");
matchedClients.add(clientId + "_" + "BlahSubId");
Map<String, Object> payLoad = new HashMap<>();
String payloadStr = mapper.writeValueAsString(payLoad);
assertFalse(materializedFilter.call(payloadStr));
try {
assertFalse(materializedFilter.call(null));
} catch (Exception e) {
fail();
}
}
@Test
public void missingSubIdParamAlwaysPasses() throws JsonProcessingException {
String clientId = "myClientId";
EventFilter filter = new EventFilter(clientId);
Map<String, List<String>> params = new HashMap<>();
Func1<String, Boolean> materializedFilter = filter.call(params);
List<String> matchedClients = new ArrayList<>();
matchedClients.add(clientId + "_" + "mySubId");
matchedClients.add(clientId + "_" + "BlahSubId");
Map<String, Object> payLoad = new HashMap<>();
payLoad.put("ts", System.currentTimeMillis());
payLoad.put("matchedClients", matchedClients);
payLoad.put("type", "EVENT");
String payloadStr = mapper.writeValueAsString(payLoad);
assertTrue(materializedFilter.call(payloadStr));
}
}
| 4,435 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/core/QueryRegistry.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.core;
import static io.mantisrx.connector.publish.core.ObjectUtils.checkNotNull;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import io.mantisrx.publish.proto.MantisServerSubscription;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class QueryRegistry {
public static final String ANY = "ANY";
private static final Logger LOGGER = LoggerFactory.getLogger(QueryRegistry.class);
private final Map<String, String> emptyMap = new HashMap<>(0);
private final ConcurrentMap<String, QueryMap> appToSubscriptionMap = new ConcurrentHashMap<>();
private final String clientIdPrefix;
private QueryRegistry(String clientIdPrefix) {
this.clientIdPrefix = clientIdPrefix;
}
public void registerQuery(String targetApp, String subId, String query) {
registerQuery(targetApp, subId, query, this.emptyMap, false);
}
public void registerQuery(String targetApp, String subId, String query, Map<String, String> additionalParams, boolean validateQueryAsGroovy) {
checkNotNull("subscriptionId", subId);
checkNotNull("query", query);
checkNotNull("targetAppName", targetApp);
Map<String, String> addParams = (additionalParams == null) ? emptyMap : additionalParams;
appToSubscriptionMap.putIfAbsent(targetApp, new QueryMap(clientIdPrefix));
appToSubscriptionMap.get(targetApp).registerQuery(subId, query, additionalParams, validateQueryAsGroovy);
}
public boolean deregisterQuery(String targetApp, String subId, String query) {
appToSubscriptionMap.computeIfPresent(targetApp, (k, v) -> {
v.deregisterQuery(subId, query);
return v;
});
return true;
}
public List<MantisServerSubscription> getCurrentSubscriptionsForApp(String app) {
List<MantisServerSubscription> subsForApp = (appToSubscriptionMap.containsKey(app)) ? appToSubscriptionMap.get(app).getCurrentSubscriptions() : new ArrayList<>();
if (!app.equals(ANY) && appToSubscriptionMap.containsKey(ANY)) {
subsForApp.addAll(appToSubscriptionMap.get(ANY).getCurrentSubscriptions());
}
return subsForApp;
}
/**
* Returns a list of {@link MantisServerSubscription}s.
*
* @param queryParams key-value pairs of stream-queries.
*/
public List<MantisServerSubscription> getCurrentSubscriptions(Map<String, List<String>> queryParams) {
String app = ANY;
if (queryParams.containsKey("app")) {
app = queryParams.get("app").get(0);
}
return getCurrentSubscriptionsForApp(app);
}
public Map<String, List<MantisServerSubscription>> getAllSubscriptions() {
Map<String, List<MantisServerSubscription>> allSubMap = new HashMap<>();
appToSubscriptionMap.forEach((s, q) -> {
allSubMap.put(s, q.getCurrentSubscriptions());
});
return allSubMap;
}
private String addMantisPrefix(String subId) {
return clientIdPrefix + "_" + subId;
}
public static class Builder {
private String prefix = null;
public Builder() {
}
public Builder withClientIdPrefix(String prefix) {
checkNotNull("prefix", prefix);
this.prefix = prefix;
return this;
}
public QueryRegistry build() {
checkNotNull("prefix", this.prefix);
return new QueryRegistry(prefix);
}
}
}
| 4,436 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/core/QueryMap.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.core;
import static io.mantisrx.connector.publish.core.ObjectUtils.checkNotNull;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import io.mantisrx.publish.proto.MantisServerSubscription;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class QueryMap {
private static final Logger LOGGER = LoggerFactory.getLogger(QueryMap.class);
private final Map<String, String> emptyMap = new HashMap<>(0);
private final ConcurrentHashMap<String, MantisServerSubscriptionWrapper> subscriptionMap =
new ConcurrentHashMap<>();
private final ConcurrentMap<String, ConcurrentMap<String, MantisServerSubscriptionWrapper>> appToSubscriptionMap =
new ConcurrentHashMap<>();
private final String clientIdPrefix;
QueryMap(String clientIdPrefix) {
this.clientIdPrefix = clientIdPrefix;
}
void registerQuery(String subId, String query, Map<String, String> emptyMap) {
registerQuery(subId, query, this.emptyMap, false);
}
void registerQuery(String subId, String query,
Map<String, String> additionalParams,
boolean validateQueryAsGroovy) {
checkNotNull("subscriptionId", subId);
checkNotNull("query", query);
Map<String, String> addParams = (additionalParams == null) ? emptyMap : additionalParams;
subscriptionMap.computeIfAbsent(subId, (s) -> new MantisServerSubscriptionWrapper(addMantisPrefix(subId), query, addParams)).incrementAndGetRefCount();
}
boolean deregisterQuery(String subId, String query) {
MantisServerSubscriptionWrapper subscription = subscriptionMap.computeIfPresent(subId, (k, v) -> {
v.decrementRefCount();
return v;
});
if (subscription != null) {
if (subscription.getRefCount() <= 0) {
LOGGER.info("Subscription ref count is 0 for subscriptionId " + subId + " removing subscription");
subscriptionMap.remove(subId);
} else {
LOGGER.info("Subscription ref count decremented for subscriptionId " + subId);
}
} else {
LOGGER.info("Subscription " + subId + " not found");
}
return true;
}
public List<MantisServerSubscription> getCurrentSubscriptions() {
return subscriptionMap.values().stream().map(MantisServerSubscriptionWrapper::getSubscription).collect(Collectors.toList());
}
private String addMantisPrefix(String subId) {
return clientIdPrefix + "_" + subId;
}
public static class Builder {
String prefix = null;
Builder() {
}
Builder withClientIdPrefix(String prefix) {
checkNotNull("prefix", prefix);
this.prefix = prefix;
return this;
}
QueryMap build() {
checkNotNull("prefix", this.prefix);
return new QueryMap(prefix);
}
}
public static class MantisServerSubscriptionWrapper {
private final MantisServerSubscription subscription;
// Used to dedup erroneous subscriptions from client.
AtomicInteger refCount = new AtomicInteger();
MantisServerSubscriptionWrapper(String subId,
String query,
Map<String, String> additionalParams) {
this.subscription = new MantisServerSubscription(subId, query, additionalParams);
}
MantisServerSubscription getSubscription() {
return this.subscription;
}
int incrementAndGetRefCount() {
return refCount.incrementAndGet();
}
void decrementRefCount() {
refCount.decrementAndGet();
}
int getRefCount() {
return refCount.get();
}
@Override
public String toString() {
return "MantisServerSubscriptionWrapper{"
+ " subscription=" + subscription
+ ", refCount=" + refCount
+ '}';
}
}
}
| 4,437 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/core/ObjectUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.core;
import java.util.Arrays;
import java.util.List;
public class ObjectUtils {
public static void checkNotNull(String paramName, String param) {
if (param == null || param.isEmpty()) {
throw new IllegalArgumentException(paramName + " cannot be null");
}
}
public static void checkArgCondition(String paramName, boolean condition) {
if (!condition) {
throw new IllegalArgumentException(paramName + " is invalid");
}
}
public static List<String> convertCommaSeparatedStringToList(String str) {
return Arrays.asList(str.trim().split("\\,"));
}
}
| 4,438 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/core/EventFilter.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.core;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import com.mantisrx.common.utils.MantisSourceJobConstants;
import org.apache.log4j.Logger;
import rx.functions.Func1;
public class EventFilter implements Func1<Map<String, List<String>>, Func1<String, Boolean>> {
private static final Logger LOGGER = Logger.getLogger(EventFilter.class);
private final String clientId;
public EventFilter(String clientId) {
ObjectUtils.checkNotNull("clientId", clientId);
this.clientId = clientId;
}
@Override
public Func1<String, Boolean> call(Map<String, List<String>> parameters) {
Func1<String, Boolean> filter = t1 -> true;
if (parameters != null) {
if (parameters.containsKey(MantisSourceJobConstants.FILTER_PARAM_NAME)) {
String filterBy = parameters.get(MantisSourceJobConstants.FILTER_PARAM_NAME).get(0);
List<String> terms = convertCommaSeparatedEventsToList(filterBy);
LOGGER.info("terms: " + terms);
// Create filter function based on parameter value.
filter = new SourceEventFilter(terms);
} else if (parameters.containsKey(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME)) {
String subId = parameters.get(MantisSourceJobConstants.SUBSCRIPTION_ID_PARAM_NAME).get(0);
List<String> terms = new ArrayList<String>();
terms.add(clientId + "_" + subId);
filter = new SourceEventFilter(terms);
}
return filter;
}
return filter;
}
private List<String> convertCommaSeparatedEventsToList(String filterBy) {
List<String> terms = new ArrayList<>();
if (filterBy != null && !filterBy.isEmpty()) {
terms = Arrays.asList(filterBy.split("\\s*,\\s*"));
}
return terms;
}
private static class SourceEventFilter implements Func1<String, Boolean> {
private List<String> terms;
SourceEventFilter(List<String> terms) {
this.terms = terms;
LOGGER.info("Initiated with terms" + terms);
}
@Override
public Boolean call(String data) {
boolean match = true;
if (data != null && !data.isEmpty()) {
for (String term : terms) {
if (!data.contains(term)) {
match = false;
break;
}
}
} else {
match = false;
}
return match;
}
}
}
| 4,439 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/NettyExceptionHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION;
import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.DefaultHttpResponse;
import io.netty.handler.codec.http.HttpHeaderValues;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.HttpResponse;
import io.netty.handler.codec.http.HttpResponseStatus;
import io.netty.handler.codec.http.HttpUtil;
import io.netty.handler.codec.http.LastHttpContent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class NettyExceptionHandler extends SimpleChannelInboundHandler<HttpRequest> {
private final Map<String, String> responseHeaders = new HashMap<>();
private static Logger logger = LoggerFactory.getLogger(NettyExceptionHandler.class);
// MetricGroupId metricGroupId;
// Counter invalidRequestCount;
public NettyExceptionHandler() {
// metricGroupId = new MetricGroupId(METRIC_GROUP + "_incoming");
//
// Metrics m = new Metrics.Builder().id(metricGroupId).addCounter("InvalidRequestCount").build();
//
// m = MetricsRegistry.getInstance().registerAndGet(m);
//
// invalidRequestCount = m.getCounter("InvalidRequestCount");
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpRequest message) {
// we can't deal with this message. No one in the pipeline handled it. Log it.
logger.warn("Unknown message received: {}", message);
// invalidRequestCount.increment();
sendResponse(
ctx,
false,
message + " Bad request received.",
HttpResponseStatus.BAD_REQUEST, responseHeaders)
;
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
// invalidRequestCount.increment();
logger.warn("Unhandled exception", cause);
sendResponse(
ctx,
false,
"Internal server error: " + cause.getMessage(),
HttpResponseStatus.INTERNAL_SERVER_ERROR,
responseHeaders);
}
/**
* Sends the given response and status code to the given channel.
*
* @param channelHandlerContext identifying the open channel
* @param keepAlive If the connection should be kept alive.
* @param message which should be sent
* @param statusCode of the message to send
* @param headers additional header values
*/
public static CompletableFuture<Void> sendResponse(
ChannelHandlerContext channelHandlerContext,
boolean keepAlive,
String message,
HttpResponseStatus statusCode,
Map<String, String> headers) {
HttpResponse response = new DefaultHttpResponse(HTTP_1_1, statusCode);
response.headers().set(CONTENT_TYPE, "application/json");
for (Map.Entry<String, String> headerEntry : headers.entrySet()) {
response.headers().set(headerEntry.getKey(), headerEntry.getValue());
}
if (keepAlive) {
response.headers().set(CONNECTION, HttpHeaderValues.KEEP_ALIVE);
}
byte[] buf = message.getBytes(StandardCharsets.UTF_8);
ByteBuf b = Unpooled.copiedBuffer(buf);
HttpUtil.setContentLength(response, buf.length);
// write the initial line and the header.
channelHandlerContext.write(response);
channelHandlerContext.write(b);
ChannelFuture lastContentFuture = channelHandlerContext.writeAndFlush(LastHttpContent.EMPTY_LAST_CONTENT);
// close the connection, if no keep-alive is needed
if (!keepAlive) {
lastContentFuture.addListener(ChannelFutureListener.CLOSE);
}
return toCompletableFuture(lastContentFuture);
}
private static CompletableFuture<Void> toCompletableFuture(final ChannelFuture channelFuture) {
final CompletableFuture<Void> completableFuture = new CompletableFuture<>();
channelFuture.addListener(future -> {
if (future.isSuccess()) {
completableFuture.complete(null);
} else {
completableFuture.completeExceptionally(future.cause());
}
});
return completableFuture;
}
}
| 4,440 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/SourceHttpServer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import io.mantisrx.connector.publish.core.QueryRegistry;
import rx.subjects.Subject;
public interface SourceHttpServer {
public static final String METRIC_GROUP = "PushServer";
enum State {
NOTINITED,
INITED,
RUNNING,
SHUTDOWN
}
void init(QueryRegistry registry, Subject<String, String> eventSubject, int port) throws InterruptedException;
void startServer();
void shutdownServer();
}
| 4,441 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/NettySourceHttpServer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import io.mantisrx.connector.publish.core.QueryRegistry;
import io.mantisrx.runtime.Context;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.Channel;
import io.netty.channel.ChannelOption;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.subjects.Subject;
public class NettySourceHttpServer implements SourceHttpServer {
private static final Logger LOGGER = LoggerFactory.getLogger(NettySourceHttpServer.class);
private final NioEventLoopGroup workerGroup;
private final NioEventLoopGroup bossGroup;
private Runnable nettyServerRunnable;
private volatile boolean isInitialized = false;
private volatile boolean isStarted = false;
public NettySourceHttpServer(Context context, int threadCount) {
this.bossGroup = new NioEventLoopGroup(threadCount);
this.workerGroup = new NioEventLoopGroup();
}
@Override
public void init(QueryRegistry queryRegistry, Subject<String, String> eventSubject, int port) {
if (!isInitialized) {
nettyServerRunnable = () -> {
try {
ServerBootstrap b = new ServerBootstrap();
b.option(ChannelOption.SO_BACKLOG, 1024);
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new HttpServerInitializer(queryRegistry, eventSubject));
Channel ch = b.bind(port).sync().channel();
ch.closeFuture().sync();
} catch (Exception e) {
LOGGER.error(e.getMessage());
} finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
};
isInitialized = true;
}
}
@Override
public void startServer() {
if (isInitialized && !isStarted) {
ExecutorService executor = Executors.newSingleThreadExecutor();
executor.submit(nettyServerRunnable);
Runtime.getRuntime().addShutdownHook(new Thread(this::shutdownServer));
isStarted = true;
} else {
throw new IllegalStateException("Server already started");
}
}
@Override
public void shutdownServer() {
if (isInitialized && isStarted) {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
}
| 4,442 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/HttpServerInitializer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import io.mantisrx.connector.publish.core.QueryRegistry;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.codec.http.HttpContentDecompressor;
import io.netty.handler.codec.http.HttpObjectAggregator;
import io.netty.handler.codec.http.HttpServerCodec;
import rx.subjects.Subject;
public class HttpServerInitializer extends ChannelInitializer<SocketChannel> {
private final QueryRegistry registry;
private final Subject<String, String> eventSubject;
private static final int DEFAULT_MAX_INITIAL_LENGTH = 4096;
private static final int DEFAULT_MAX_HEADER_SIZE = 16384;
private static final int DEFAULT_MAX_CHUNK_SIZE = 32768;
private static final int DEFAULT_MAX_CONTENT_LENGTH = 1048576;
public HttpServerInitializer(QueryRegistry registry, Subject<String, String> eventSubject) {
this.registry = registry;
this.eventSubject = eventSubject;
}
@Override
protected void initChannel(SocketChannel ch) {
ChannelPipeline p = ch.pipeline();
p.addLast("http", new HttpServerCodec(DEFAULT_MAX_INITIAL_LENGTH, DEFAULT_MAX_HEADER_SIZE, DEFAULT_MAX_CHUNK_SIZE));
p.addLast("inflater", new HttpContentDecompressor());
p.addLast("aggregator", new HttpObjectAggregator(DEFAULT_MAX_CONTENT_LENGTH));
p.addLast(new HttpSourceServerHandler(registry, eventSubject));
p.addLast(new NettyExceptionHandler());
}
}
| 4,443 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/SourceSink.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import java.util.List;
import java.util.Map;
import io.mantisrx.connector.publish.core.EventFilter;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.sink.ServerSentEventsSink;
import io.mantisrx.runtime.sink.Sink;
import io.mantisrx.runtime.sink.predicate.Predicate;
import rx.Observable;
import rx.functions.Func2;
public class SourceSink implements Sink<String> {
private final String clientId;
private Func2<Map<String, List<String>>, Context, Void> preProcessor = new NoOpProcessor();
private Func2<Map<String, List<String>>, Context, Void> postProcessor = new NoOpProcessor();
static class NoOpProcessor implements Func2<Map<String, List<String>>, Context, Void> {
@Override
public Void call(Map<String, List<String>> t1, Context t2) {
return null;
}
}
public SourceSink(Func2<Map<String, List<String>>, Context, Void> preProcessor,
Func2<Map<String, List<String>>, Context, Void> postProcessor, String mantisClientId) {
this.postProcessor = postProcessor;
this.preProcessor = preProcessor;
this.clientId = mantisClientId;
}
@Override
public void call(Context context, PortRequest portRequest,
Observable<String> observable) {
observable = observable.filter(t1 -> !t1.isEmpty());
ServerSentEventsSink<String> sink = new ServerSentEventsSink.Builder<String>()
.withEncoder(data -> data)
.withPredicate(new Predicate<>("description", new EventFilter(clientId)))
.withRequestPreprocessor(preProcessor)
.withRequestPostprocessor(postProcessor)
.build();
observable.subscribe();
sink.call(context, portRequest, observable);
}
}
| 4,444 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/PushHttpSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import com.mantisrx.common.utils.MantisSourceJobConstants;
import io.mantisrx.connector.publish.core.QueryRegistry;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.WorkerMap;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import io.reactivx.mantis.operators.DropOperator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.schedulers.Schedulers;
import rx.subjects.PublishSubject;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
public class PushHttpSource implements Source<String> {
private static final Logger LOGGER = LoggerFactory.getLogger(PushHttpSource.class);
private final Subject<String, String> eventSubject = new SerializedSubject<>(PublishSubject.create());
private final QueryRegistry queryRegistry;
private final int serverPort;
private AtomicReference<WorkerMap> workerMapAtomicReference = new AtomicReference<>(new WorkerMap(new HashMap<>()));
private static final String NETTY_THREAD_COUNT_PARAM_NAME = "nettyThreadCount";
public PushHttpSource(QueryRegistry registry, int serverPort) {
this.queryRegistry = registry;
this.serverPort = serverPort;
}
@Override
public Observable<Observable<String>> call(Context context, Index index) {
return Observable.just(eventSubject
.lift(new DropOperator<>("incoming_" + PushHttpSource.class.getCanonicalName() + "_batch"))
.onErrorResumeNext((e) -> Observable.empty()));
}
@Override
public void init(Context context, Index index) {
LOGGER.info("Initializing PushHttpSource");
int threadCount = (Integer) context.getParameters().get(NETTY_THREAD_COUNT_PARAM_NAME, 4);
LOGGER.info("PushHttpSource server starting at Port " + serverPort);
SourceHttpServer server = new NettySourceHttpServer(context, threadCount);
try {
server.init(queryRegistry, eventSubject, serverPort);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
server.startServer();
context.getWorkerMapObservable().subscribeOn(Schedulers.io()).subscribe((workerMap) -> {
LOGGER.info("Got WorkerUpdate" + workerMap);
workerMapAtomicReference.set(workerMap);
});
LOGGER.info("PushHttpSource server started");
}
@Override
public List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> parameters = new ArrayList<>();
parameters.add(new IntParameter()
.name(NETTY_THREAD_COUNT_PARAM_NAME)
.validator(Validators.range(1, 8))
.defaultValue(4)
.build());
parameters.add(new StringParameter()
.name(MantisSourceJobConstants.ZONE_LIST_PARAMETER_NAME)
.description("list of Zones")
.validator(Validators.alwaysPass())
.defaultValue("")
.build());
parameters.add(new StringParameter()
.name(MantisSourceJobConstants.TARGET_APP_PARAMETER_NAME)
.description("target app")
.validator(Validators.alwaysPass())
.defaultValue("")
.build());
parameters.add(new StringParameter()
.name(MantisSourceJobConstants.TARGET_ASG_CSV_PARAM)
.description("target ASGs CSV regex")
.validator(Validators.alwaysPass())
.defaultValue("")
.build());
return parameters;
}
}
| 4,445 |
0 | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source | Create_ds/mantis-connectors/mantis-connector-publish/src/main/java/io/mantisrx/connector/publish/source/http/HttpSourceServerHandler.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.publish.source.http;
import static io.netty.handler.codec.http.HttpResponseStatus.OK;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import java.util.List;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
import io.mantisrx.common.metrics.spectator.MetricGroupId;
import io.mantisrx.connector.publish.core.QueryRegistry;
import io.mantisrx.publish.proto.MantisServerSubscription;
import io.mantisrx.publish.proto.MantisServerSubscriptionEnvelope;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.SimpleChannelInboundHandler;
import io.netty.handler.codec.http.DefaultFullHttpResponse;
import io.netty.handler.codec.http.FullHttpMessage;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpObject;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.HttpUtil;
import io.netty.util.AsciiString;
import io.netty.util.CharsetUtil;
import mantis.io.reactivex.netty.protocol.http.server.UriInfoHolder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.subjects.Subject;
public class HttpSourceServerHandler extends SimpleChannelInboundHandler<HttpObject> {
private static final Logger LOGGER = LoggerFactory.getLogger(HttpSourceServerHandler.class);
private static final byte[] CONTENT = {'O', 'K'};
private static final AsciiString CONTENT_TYPE = AsciiString.cached("Content-Type");
private static final AsciiString CONTENT_LENGTH = AsciiString.cached("Content-Length");
private static final AsciiString CONNECTION = AsciiString.cached("Connection");
private static final AsciiString KEEP_ALIVE = AsciiString.cached("keep-alive");
ObjectMapper mapper = new ObjectMapper();
private final Counter getRequestCount;
private final Counter unknownRequestCount;
private final Counter postRequestCount;
MetricGroupId metricGroupId;
private final QueryRegistry registry;
private final Subject<String, String> eventSubject;
public HttpSourceServerHandler(QueryRegistry queryRegistry, Subject<String, String> eventSubject) {
registry = queryRegistry;
this.eventSubject = eventSubject;
metricGroupId = new MetricGroupId(SourceHttpServer.METRIC_GROUP + "_incoming");
Metrics m = new Metrics.Builder()
.id(metricGroupId)
.addCounter("GetRequestCount")
.addCounter("PostRequestCount")
.addCounter("UnknownRequestCount")
.build();
m = MetricsRegistry.getInstance().registerAndGet(m);
getRequestCount = m.getCounter("GetRequestCount");
unknownRequestCount = m.getCounter("UnknownRequestCount");
postRequestCount = m.getCounter("PostRequestCount");
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
ctx.flush();
}
@Override
protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) {
if (msg instanceof HttpRequest) {
HttpRequest req = (HttpRequest) msg;
boolean keepAlive = HttpUtil.isKeepAlive(req);
if (req.method().equals(HttpMethod.GET)) {
getRequestCount.increment();
UriInfoHolder uriInfoHolder = new UriInfoHolder(req.uri());
List<MantisServerSubscription> currentSubscriptions =
registry.getCurrentSubscriptions(uriInfoHolder.getQueryParameters());
try {
byte[] serializedSubs =
mapper.writeValueAsBytes(new MantisServerSubscriptionEnvelope(currentSubscriptions));
FullHttpResponse response =
new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(serializedSubs));
response.headers().set(CONTENT_TYPE, "application/json");
response.headers().setInt(CONTENT_LENGTH, response.content().readableBytes());
if (!keepAlive) {
ctx.write(response).addListener(ChannelFutureListener.CLOSE);
} else {
response.headers().set(CONNECTION, KEEP_ALIVE);
ctx.write(response);
}
} catch (Exception e) {
LOGGER.error("problem reading from channel", e);
}
} else {
if (req.method().equals(HttpMethod.POST)) {
postRequestCount.increment();
FullHttpMessage aggregator = (FullHttpMessage) msg;
ByteBuf content = aggregator.content();
String data = content.toString(CharsetUtil.UTF_8);
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("got data " + data);
}
eventSubject.onNext(data);
FullHttpResponse response =
new DefaultFullHttpResponse(HTTP_1_1, OK, Unpooled.wrappedBuffer(CONTENT));
response.headers().set(CONTENT_TYPE, "text/plain");
response.headers().setInt(CONTENT_LENGTH, response.content().readableBytes());
if (!keepAlive) {
ctx.write(response).addListener(ChannelFutureListener.CLOSE);
} else {
response.headers().set(CONNECTION, KEEP_ALIVE);
ctx.write(response);
}
} else {
unknownRequestCount.increment();
}
}
}
}
}
| 4,446 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg | Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/StageOverrideParameters.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import io.mantisrx.connector.iceberg.sink.config.SinkProperties;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties;
import io.mantisrx.runtime.parameter.Parameters;
public class StageOverrideParameters {
private StageOverrideParameters() {
}
public static Parameters newParameters() {
Map<String, Object> state = new HashMap<>();
Set<String> required = new HashSet<>();
required.add(SinkProperties.SINK_CATALOG);
state.put(SinkProperties.SINK_CATALOG, "catalog");
required.add(SinkProperties.SINK_DATABASE);
state.put(SinkProperties.SINK_DATABASE, "database");
required.add(SinkProperties.SINK_TABLE);
state.put(SinkProperties.SINK_TABLE, "table");
required.add(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC);
state.put(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC, "5000");
return new Parameters(state, required, required);
}
}
| 4,447 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/codecs/IcebergCodecsTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.codecs;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
import java.util.Collections;
import io.mantisrx.common.codec.Codec;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.Metrics;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.types.Types;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
class IcebergCodecsTest {
private static final Schema SCHEMA =
new Schema(Types.NestedField.required(1, "id", Types.IntegerType.get()));
private Codec<Record> recordCodec;
private Codec<DataFile> dataFileCodec;
@BeforeEach
void setUp() {
this.recordCodec = IcebergCodecs.record(SCHEMA);
this.dataFileCodec = IcebergCodecs.dataFile();
}
@Test
void shouldEncodeAndDecodeRecord() {
Record expected = GenericRecord.create(SCHEMA);
expected.setField("id", 1);
byte[] encoded = recordCodec.encode(expected);
Record actual = recordCodec.decode(encoded);
assertEquals(expected, actual);
}
@Test
void shouldEncodeAndDecodeDataFile() {
PartitionSpec spec = PartitionSpec.unpartitioned();
DataFile expected = DataFiles.builder(spec)
.withPath("/path/filename.parquet")
.withFileSizeInBytes(1)
.withPartition(null)
.withMetrics(mock(Metrics.class))
.withSplitOffsets(Collections.singletonList(1L))
.build();
byte[] encoded = dataFileCodec.encode(expected);
DataFile actual = dataFileCodec.decode(encoded);
assertEquals(expected.path(), actual.path());
assertEquals(expected.fileSizeInBytes(), actual.fileSizeInBytes());
assertEquals(expected.partition(), actual.partition());
assertEquals(expected.splitOffsets(), actual.splitOffsets());
}
} | 4,448 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/writer/IcebergWriterStageTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import io.mantisrx.connector.iceberg.sink.StageOverrideParameters;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig;
import io.mantisrx.connector.iceberg.sink.writer.metrics.WriterMetrics;
import io.mantisrx.connector.iceberg.sink.writer.partitioner.Partitioner;
import io.mantisrx.connector.iceberg.sink.writer.partitioner.PartitionerFactory;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.lifecycle.ServiceLocator;
import io.mantisrx.runtime.parameter.Parameters;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.types.Types;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import rx.Observable;
import rx.observers.TestSubscriber;
import rx.schedulers.TestScheduler;
class IcebergWriterStageTest {
private TestScheduler scheduler;
private TestSubscriber<DataFile> subscriber;
private IcebergWriterStage.Transformer transformer;
private Catalog catalog;
private Table table;
private Context context;
private IcebergWriter writer;
private Partitioner partitioner;
private Observable<DataFile> flow;
private static final Schema SCHEMA =
new Schema(Types.NestedField.required(1, "id", Types.IntegerType.get()));
private Record record;
@BeforeEach
void setUp() {
record = GenericRecord.create(SCHEMA);
record.setField("id", 1);
this.scheduler = new TestScheduler();
this.subscriber = new TestSubscriber<>();
// Writer
Parameters parameters = StageOverrideParameters.newParameters();
WriterConfig config = new WriterConfig(parameters, mock(Configuration.class));
WriterMetrics metrics = new WriterMetrics();
this.writer = spy(FakeIcebergWriter.class);
this.partitioner = mock(Partitioner.class);
when(this.writer.length()).thenReturn(Long.MAX_VALUE);
this.transformer = new IcebergWriterStage.Transformer(
config,
metrics,
this.writer,
this.partitioner,
this.scheduler,
this.scheduler);
// Catalog
ServiceLocator serviceLocator = mock(ServiceLocator.class);
when(serviceLocator.service(Configuration.class)).thenReturn(mock(Configuration.class));
this.catalog = mock(Catalog.class);
this.table = mock(Table.class);
when(this.table.spec()).thenReturn(PartitionSpec.unpartitioned());
when(this.catalog.loadTable(any())).thenReturn(this.table);
when(serviceLocator.service(Catalog.class)).thenReturn(this.catalog);
when(serviceLocator.service(PartitionerFactory.class)).thenReturn(mock(PartitionerFactory.class));
// Mantis Context
this.context = mock(Context.class);
when(this.context.getParameters()).thenReturn(parameters);
when(this.context.getServiceLocator()).thenReturn(serviceLocator);
// Flow
Observable<Record> source = Observable.interval(1, TimeUnit.MILLISECONDS, this.scheduler)
.map(i -> record);
this.flow = source.compose(this.transformer);
}
@Test
void shouldCloseOnNewPartition() throws IOException {
PartitionSpec spec = PartitionSpec.builderFor(SCHEMA).identity("id").build();
when(table.spec()).thenReturn(spec);
when(catalog.loadTable(any())).thenReturn(table);
Record recordWithNewPartition = GenericRecord.create(SCHEMA);
recordWithNewPartition.setField("id", 2);
// Identity partitioning.
when(partitioner.partition(record)).thenReturn(record);
when(partitioner.partition(recordWithNewPartition)).thenReturn(recordWithNewPartition);
Observable<Record> source = Observable.just(record, record, recordWithNewPartition, record)
.concatMap(r -> Observable.just(r).delay(1, TimeUnit.MILLISECONDS, scheduler));
flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
// Same partition; no other thresholds (size, time) met.
scheduler.advanceTimeBy(2, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
// New partition detected
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertValueCount(1);
// New partition detected
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertValueCount(2);
verify(writer, times(4)).write(any());
// Two closes for [record, record] and [recordWithNewPartition]; a file is still open from the latest write.
verify(writer, times(2)).close();
verify(writer, times(3)).open(any());
}
@Test
void shouldCloseOnSizeThreshold() throws IOException {
flow.subscribeOn(scheduler).subscribe(subscriber);
// Greater than size threshold, but not yet checked at row-group-size config.
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
scheduler.advanceTimeBy(99, TimeUnit.MILLISECONDS);
subscriber.assertValueCount(1);
scheduler.advanceTimeBy(100, TimeUnit.MILLISECONDS);
subscriber.assertValueCount(2);
subscriber.assertNoTerminalEvent();
verify(writer, times(200)).write(any());
verify(writer, times(2)).close();
}
@Test
void shouldNotCloseWhenUnderSizeThreshold() throws IOException {
when(writer.length()).thenReturn(1L);
flow.subscribeOn(scheduler).subscribe(subscriber);
// Size is checked at row-group-size config, but under size-threshold, so no-op.
scheduler.advanceTimeBy(100, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
subscriber.assertNoTerminalEvent();
verify(writer, times(100)).write(any());
verify(writer, times(0)).close();
}
@Test
void shouldCloseWhenLowVolumeOnTimeThreshold() throws IOException {
when(writer.length()).thenReturn(1L);
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
// Size is checked at row-group-size config, but under size threshold, so no-op.
scheduler.advanceTimeBy(999, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
// Hits time threshold; proceed to close.
scheduler.advanceTimeBy(4000, TimeUnit.MILLISECONDS);
subscriber.assertValueCount(1);
subscriber.assertNoTerminalEvent();
verify(writer, times(5000)).write(any());
verify(writer, times(1)).close();
}
@Test
void shouldCloseWhenHighVolumeOnTimeThreshold() throws IOException {
Observable<Record> source = Observable.interval(500, TimeUnit.MILLISECONDS, scheduler)
.map(i -> record);
flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
// Over the size threshold, but not yet checked at row-group-size config.
scheduler.advanceTimeBy(500, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
// Hits time threshold and there's data to write; proceed to close.
scheduler.advanceTimeBy(4500, TimeUnit.MILLISECONDS);
subscriber.assertValueCount(1);
subscriber.assertNoTerminalEvent();
verify(writer, times(10)).write(any());
verify(writer, times(1)).close();
}
@Test
void shouldNoOpWhenNoDataOnTimeThreshold() throws IOException {
// Low volume stream.
Observable<Record> source = Observable.interval(10_000, TimeUnit.MILLISECONDS, scheduler)
.map(i -> record);
flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(5000, TimeUnit.MILLISECONDS);
subscriber.assertNoValues();
subscriber.assertNoErrors();
subscriber.assertNoTerminalEvent();
verify(writer, times(0)).open(any());
verify(writer, times(0)).write(any());
verify(writer, times(1)).isClosed();
verify(writer, times(0)).close();
}
@Test
void shouldNoOpCloseWhenFailedToOpen() throws IOException {
doThrow(new IOException()).when(writer).open(any());
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertError(RuntimeException.class);
subscriber.assertTerminalEvent();
verify(writer).open(any());
verify(writer, times(1)).isClosed();
verify(writer, times(1)).close();
}
@Test
void shouldContinueOnWriteFailure() {
doThrow(new RuntimeException()).when(writer).write(any());
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertNoTerminalEvent();
scheduler.advanceTimeBy(1, TimeUnit.MILLISECONDS);
subscriber.assertNoTerminalEvent();
verify(writer, times(2)).write(any());
}
@Test
@Disabled("Will never terminate: Source terminates, but timer will continue to tick")
void shouldCloseOnTerminate() throws IOException {
Observable<Record> source = Observable.just(record);
Observable<DataFile> flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.triggerActions();
subscriber.assertNoErrors();
verify(writer).open();
verify(writer).write(any());
verify(writer, times(2)).isClosed();
verify(writer, times(1)).close();
}
@Test
void shouldInitializeWithExistingTable() {
IcebergWriterStage stage = new IcebergWriterStage();
assertDoesNotThrow(() -> stage.init(context));
}
@Test
void shouldFailToInitializeWithMissingTable() {
when(catalog.loadTable(any())).thenThrow(new RuntimeException());
IcebergWriterStage stage = new IcebergWriterStage();
assertThrows(RuntimeException.class, () -> stage.init(context));
}
private static class FakeIcebergWriter implements IcebergWriter {
private static final DataFile DATA_FILE = new DataFiles.Builder()
.withPath("/datafile.parquet")
.withFileSizeInBytes(1L)
.withRecordCount(1L)
.build();
private final Object object;
private Object fileAppender;
private StructLike partitionKey;
public FakeIcebergWriter() {
this.object = new Object();
this.fileAppender = null;
}
@Override
public void open() throws IOException {
open(null);
}
@Override
public void open(StructLike newPartitionKey) throws IOException {
fileAppender = object;
partitionKey = newPartitionKey;
}
@Override
public void write(Record record) {
}
@Override
public DataFile close() throws IOException {
fileAppender = null;
return DATA_FILE;
}
@Override
public boolean isClosed() {
return fileAppender == null;
}
@Override
public long length() {
return 0;
}
@Override
public StructLike getPartitionKey() {
return partitionKey;
}
}
} | 4,449 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis-connectors/mantis-connector-iceberg/src/test/java/io/mantisrx/connector/iceberg/sink/committer/IcebergCommitterStageTest.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import io.mantisrx.connector.iceberg.sink.StageOverrideParameters;
import io.mantisrx.connector.iceberg.sink.committer.config.CommitterConfig;
import io.mantisrx.connector.iceberg.sink.committer.metrics.CommitterMetrics;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.lifecycle.ServiceLocator;
import io.mantisrx.runtime.parameter.Parameters;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Catalog;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import rx.Observable;
import rx.observers.TestSubscriber;
import rx.schedulers.TestScheduler;
class IcebergCommitterStageTest {
private TestScheduler scheduler;
private TestSubscriber<Map<String, Object>> subscriber;
private Catalog catalog;
private Context context;
private IcebergCommitter committer;
private IcebergCommitterStage.Transformer transformer;
@BeforeEach
void setUp() {
this.scheduler = new TestScheduler();
this.subscriber = new TestSubscriber<>();
Parameters parameters = StageOverrideParameters.newParameters();
CommitterConfig config = new CommitterConfig(parameters);
CommitterMetrics metrics = new CommitterMetrics();
this.committer = mock(IcebergCommitter.class);
transformer = new IcebergCommitterStage.Transformer(config, metrics, committer, scheduler);
ServiceLocator serviceLocator = mock(ServiceLocator.class);
when(serviceLocator.service(Configuration.class)).thenReturn(mock(Configuration.class));
this.catalog = mock(Catalog.class);
Table table = mock(Table.class);
when(table.spec()).thenReturn(PartitionSpec.unpartitioned());
when(this.catalog.loadTable(any())).thenReturn(table);
when(serviceLocator.service(Catalog.class)).thenReturn(this.catalog);
this.context = mock(Context.class);
when(this.context.getParameters()).thenReturn(parameters);
when(this.context.getServiceLocator()).thenReturn(serviceLocator);
}
@Test
void shouldCommitPeriodically() {
Map<String, Object> summary = new HashMap<>();
summary.put("test", "test");
when(committer.commit(any())).thenReturn(summary);
Observable<DataFile> source = Observable.interval(1, TimeUnit.MINUTES, scheduler)
.map(i -> mock(DataFile.class));
Observable<Map<String, Object>> flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(1, TimeUnit.MINUTES);
subscriber.assertNoValues();
subscriber.assertNotCompleted();
scheduler.advanceTimeBy(4, TimeUnit.MINUTES);
subscriber.assertValueCount(1);
scheduler.advanceTimeBy(5, TimeUnit.MINUTES);
subscriber.assertValueCount(2);
scheduler.advanceTimeBy(1, TimeUnit.MINUTES);
subscriber.assertValueCount(2);
subscriber.assertNoErrors();
verify(committer, times(2)).commit(any());
}
@Test
void shouldContinueOnCommitFailure() {
doThrow(new RuntimeException()).when(committer).commit(any());
Observable<DataFile> source = Observable.interval(1, TimeUnit.MINUTES, scheduler)
.map(i -> mock(DataFile.class));
Observable<Map<String, Object>> flow = source.compose(transformer);
flow.subscribeOn(scheduler).subscribe(subscriber);
scheduler.advanceTimeBy(5, TimeUnit.MINUTES);
subscriber.assertNoErrors();
subscriber.assertNotCompleted();
subscriber.assertValueCount(0);
verify(committer).commit(any());
}
@Test
void shouldInitializeWithExistingTable() {
IcebergCommitterStage stage = new IcebergCommitterStage();
assertDoesNotThrow(() -> stage.init(context));
}
@Test
void shouldFailToInitializeWithMissingTable() {
when(catalog.loadTable(any())).thenThrow(new RuntimeException());
IcebergCommitterStage stage = new IcebergCommitterStage();
assertThrows(RuntimeException.class, () -> stage.init(context));
}
} | 4,450 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/codecs/IcebergCodecs.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.codecs;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import io.mantisrx.common.codec.Codec;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.Schema;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.data.avro.IcebergDecoder;
import org.apache.iceberg.data.avro.IcebergEncoder;
import org.apache.iceberg.exceptions.RuntimeIOException;
/**
* Encoders and decoders for working with Iceberg objects
* such as {@link Record}s and {@link DataFile}s.
*/
public class IcebergCodecs {
/**
* @return a codec for encoding/decoding Iceberg Records.
*/
public static Codec<Record> record(Schema schema) {
return new RecordCodec<>(schema);
}
/**
* @return a codec for encoding/decoding DataFiles.
*/
public static Codec<DataFile> dataFile() {
return new DataFileCodec();
}
private static class RecordCodec<T> implements Codec<T> {
private final IcebergEncoder<T> encoder;
private final IcebergDecoder<T> decoder;
private RecordCodec(Schema schema) {
this.encoder = new IcebergEncoder<>(schema);
this.decoder = new IcebergDecoder<>(schema);
}
@Override
public T decode(byte[] bytes) {
try {
return decoder.decode(bytes);
} catch (IOException e) {
throw new RuntimeIOException("problem decoding Iceberg record", e);
}
}
@Override
public byte[] encode(T value) {
try {
return encoder.encode(value).array();
} catch (IOException e) {
throw new RuntimeIOException("problem encoding encoding Iceberg record", e);
}
}
}
private static class DataFileCodec implements Codec<DataFile> {
@Override
public DataFile decode(byte[] bytes) {
try (ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(bytes))) {
return (DataFile) in.readObject();
} catch (IOException | ClassNotFoundException e) {
throw new RuntimeException("Failed to convert bytes to DataFile", e);
}
}
@Override
public byte[] encode(DataFile value) {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
try (ObjectOutputStream out = new ObjectOutputStream(bytes)) {
out.writeObject(value);
} catch (IOException e) {
throw new RuntimeException("Failed to write bytes for DataFile: " + value, e);
}
return bytes.toByteArray();
}
}
}
| 4,451 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/config/SinkConfig.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.config;
import static io.mantisrx.connector.iceberg.sink.config.SinkProperties.SINK_CATALOG;
import static io.mantisrx.connector.iceberg.sink.config.SinkProperties.SINK_DATABASE;
import static io.mantisrx.connector.iceberg.sink.config.SinkProperties.SINK_TABLE;
import io.mantisrx.connector.iceberg.sink.committer.config.CommitterConfig;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig;
import io.mantisrx.runtime.parameter.Parameters;
/**
* Convenient base config used by {@link WriterConfig} and {@link CommitterConfig}.
*/
public class SinkConfig {
private final String catalog;
private final String database;
private final String table;
/**
* Creates an instance from {@link Parameters} derived from the current Mantis Stage's {@code Context}.
*/
public SinkConfig(Parameters parameters) {
this.catalog = (String) parameters.get(SINK_CATALOG);
this.database = (String) parameters.get(SINK_DATABASE);
this.table = (String) parameters.get(SINK_TABLE);
}
/**
* Returns a String for Iceberg Catalog name.
*/
public String getCatalog() {
return catalog;
}
/**
* Returns a String for the database name in a catalog.
*/
public String getDatabase() {
return database;
}
/**
* Returns a String for the table within a database.
*/
public String getTable() {
return table;
}
}
| 4,452 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/config/SinkProperties.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.config;
/**
* Property key names and default values for the base Iceberg Sink config.
*/
public class SinkProperties {
private SinkProperties() {
}
/**
* Name of Iceberg Catalog.
*/
public static final String SINK_CATALOG = "sinkCatalog";
public static final String SINK_CATALOG_DESCRIPTION = "Name of Iceberg Catalog";
/**
* Name of database within Iceberg Catalog.
*/
public static final String SINK_DATABASE = "sinkDatabase";
public static final String SINK_DATABASE_DESCRIPTION = "Name of database within Iceberg Catalog";
/**
* Name of table within database.
*/
public static final String SINK_TABLE = "sinkTable";
public static final String SINK_TABLE_DESCRIPTION = "Name of table within database";
}
| 4,453 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/IcebergWriterStage.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import io.mantisrx.connector.iceberg.sink.codecs.IcebergCodecs;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties;
import io.mantisrx.connector.iceberg.sink.writer.metrics.WriterMetrics;
import io.mantisrx.connector.iceberg.sink.writer.partitioner.Partitioner;
import io.mantisrx.connector.iceberg.sink.writer.partitioner.PartitionerFactory;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.WorkerInfo;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import org.apache.hadoop.conf.Configuration;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.Schema;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.apache.iceberg.data.GenericRecord;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.types.Comparators;
import org.apache.iceberg.types.Types;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Scheduler;
import rx.exceptions.Exceptions;
import rx.schedulers.Schedulers;
/**
* Processing stage which writes records to Iceberg through a backing file store.
*/
public class IcebergWriterStage implements ScalarComputation<Record, DataFile> {
private static final Logger logger = LoggerFactory.getLogger(IcebergWriterStage.class);
private Transformer transformer;
/**
* Returns a config for this stage which has encoding/decoding semantics and parameter definitions.
*/
public static ScalarToScalar.Config<Record, DataFile> config() {
return new ScalarToScalar.Config<Record, DataFile>()
.description("")
.codec(IcebergCodecs.dataFile())
.withParameters(parameters());
}
/**
* Returns a list of parameter definitions for this stage.
*/
public static List<ParameterDefinition<?>> parameters() {
return Arrays.asList(
new IntParameter().name(WriterProperties.WRITER_ROW_GROUP_SIZE)
.description(WriterProperties.WRITER_ROW_GROUP_SIZE_DESCRIPTION)
.validator(Validators.alwaysPass())
.defaultValue(WriterProperties.WRITER_ROW_GROUP_SIZE_DEFAULT)
.build(),
new StringParameter().name(WriterProperties.WRITER_FLUSH_FREQUENCY_BYTES)
.description(WriterProperties.WRITER_FLUSH_FREQUENCY_BYTES_DESCRIPTION)
.validator(Validators.alwaysPass())
.defaultValue(WriterProperties.WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT)
.build(),
new StringParameter().name(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC)
.description(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC_DESCRIPTION)
.validator(Validators.alwaysPass())
.defaultValue(WriterProperties.WRITER_FLUSH_FREQUENCY_MSEC_DEFAULT)
.build(),
new StringParameter().name(WriterProperties.WRITER_FILE_FORMAT)
.description(WriterProperties.WRITER_FILE_FORMAT_DESCRIPTION)
.validator(Validators.alwaysPass())
.defaultValue(WriterProperties.WRITER_FILE_FORMAT_DEFAULT)
.build()
);
}
/**
* Use this to instantiate a new transformer from a given {@link Context}.
*/
public static Transformer newTransformer(Context context) {
Configuration hadoopConfig = context.getServiceLocator().service(Configuration.class);
WriterConfig config = new WriterConfig(context.getParameters(), hadoopConfig);
Catalog catalog = context.getServiceLocator().service(Catalog.class);
TableIdentifier id = TableIdentifier.of(config.getCatalog(), config.getDatabase(), config.getTable());
Table table = catalog.loadTable(id);
WorkerInfo workerInfo = context.getWorkerInfo();
IcebergWriter writer = new DefaultIcebergWriter(config, workerInfo, table);
WriterMetrics metrics = new WriterMetrics();
PartitionerFactory partitionerFactory = context.getServiceLocator().service(PartitionerFactory.class);
Partitioner partitioner = partitionerFactory.getPartitioner(table);
return new Transformer(config, metrics, writer, partitioner, Schedulers.computation(), Schedulers.io());
}
public IcebergWriterStage() {
}
/**
* Uses the provided Mantis Context to inject configuration and opens an underlying file appender.
* <p>
* This method depends on a Hadoop Configuration and Iceberg Catalog, both injected
* from the Context's service locator.
* <p>
* Note that this method expects an Iceberg Table to have been previously created out-of-band,
* otherwise initialization will fail. Users should prefer to create tables
* out-of-band so they can be versioned alongside their schemas.
*/
@Override
public void init(Context context) {
transformer = newTransformer(context);
}
@Override
public Observable<DataFile> call(Context context, Observable<Record> recordObservable) {
return recordObservable.compose(transformer);
}
/**
* Reactive Transformer for writing records to Iceberg.
* <p>
* Users may use this class independently of this Stage, for example, if they want to
* {@link Observable#compose(Observable.Transformer)} this transformer with a flow into
* an existing Stage. One benefit of this co-location is to avoid extra network
* cost from worker-to-worker communication, trading off debuggability.
*/
public static class Transformer implements Observable.Transformer<Record, DataFile> {
private static final DataFile ERROR_DATA_FILE = new DataFiles.Builder()
.withPath("/error.parquet")
.withFileSizeInBytes(0L)
.withRecordCount(0L)
.build();
private static final Schema TIMER_SCHEMA = new Schema(
Types.NestedField.required(1, "ts_utc_msec", Types.LongType.get()));
private static final Record TIMER_RECORD = GenericRecord.create(TIMER_SCHEMA);
private final WriterConfig config;
private final WriterMetrics metrics;
private final Partitioner partitioner;
private final IcebergWriter writer;
private final Scheduler timerScheduler;
private final Scheduler transformerScheduler;
public Transformer(
WriterConfig config,
WriterMetrics metrics,
IcebergWriter writer,
Partitioner partitioner,
Scheduler timerScheduler,
Scheduler transformerScheduler) {
this.config = config;
this.metrics = metrics;
this.writer = writer;
this.partitioner = partitioner;
this.timerScheduler = timerScheduler;
this.transformerScheduler = transformerScheduler;
}
/**
* Opens an IcebergWriter FileAppender, writes records to a file. The appender flushes if any of
* following criteria is met, in order of precedence:
* <p>
* 1. new partition
* 2. size threshold
* 3. time threshold
* <p>
* New Partition:
* <p>
* If the Iceberg Table is partitioned, the appender will check _every_ record to detect a new partition.
* If there's a new partition, the appender will align the record to the new partition. It does this by
* closing the current file, opening a new file, and writing the record to that new file.
* <p>
* It's _important_ that upstream producers align events to partitions as best as possible. For example,
* - Given an Iceberg Table partitioned by {@code hour}
* - 10 producers writing Iceberg Records
* <p>
* Each of the 10 producers _should_ try to produce events aligned by the hour.
* If writes are not well-aligned, then results will be correct, but performance negatively impacted due to
* frequent opening/closing of files.
* <p>
* Writes may be _unordered_ as long as they're aligned by the table's partitioning.
* <p>
* Size Threshold:
* <p>
* The appender will periodically check the current file size as configured by
* {@link WriterConfig#getWriterRowGroupSize()}. If it's time to check, then the appender will flush on
* {@link WriterConfig#getWriterFlushFrequencyBytes()}.
* <p>
* Time Threshold:
* <p>
* The appender will periodically attempt to flush as configured by
* {@link WriterConfig#getWriterFlushFrequencyMsec()}. If this threshold is met, the appender will flush
* only if the appender has an open file. This avoids flushing unnecessarily if there are no events.
* Otherwise, a flush will happen, even if there are few events in the file. This effectively limits the
* upper-bound for allowed lateness.
* <p>
* Pair this writer with a progressive multipart file uploader backend for better latencies.
*/
@Override
public Observable<DataFile> call(Observable<Record> source) {
Observable<Record> timer = Observable.interval(
config.getWriterFlushFrequencyMsec(), TimeUnit.MILLISECONDS, timerScheduler)
.map(i -> TIMER_RECORD);
return source.mergeWith(timer)
.observeOn(transformerScheduler)
.scan(new Trigger(config.getWriterRowGroupSize()), (trigger, record) -> {
if (record.struct().fields().equals(TIMER_SCHEMA.columns())) {
trigger.timeout();
} else {
StructLike partition = partitioner.partition(record);
// Only open (if closed) on new events from `source`; exclude timer records.
if (writer.isClosed()) {
try {
logger.info("opening file for partition {}", partition);
writer.open(partition);
trigger.setPartition(partition);
metrics.increment(WriterMetrics.OPEN_SUCCESS_COUNT);
} catch (IOException e) {
metrics.increment(WriterMetrics.OPEN_FAILURE_COUNT);
throw Exceptions.propagate(e);
}
}
// Make sure records are aligned with the partition.
if (trigger.isNewPartition(partition)) {
trigger.setPartition(partition);
try {
DataFile dataFile = writer.close();
trigger.stage(dataFile);
trigger.reset();
} catch (IOException | RuntimeException e) {
metrics.increment(WriterMetrics.BATCH_FAILURE_COUNT);
logger.error("error writing DataFile", e);
}
try {
logger.info("opening file for new partition {}", partition);
writer.open(partition);
metrics.increment(WriterMetrics.OPEN_SUCCESS_COUNT);
} catch (IOException e) {
metrics.increment(WriterMetrics.OPEN_FAILURE_COUNT);
throw Exceptions.propagate(e);
}
}
try {
writer.write(record);
trigger.increment();
metrics.increment(WriterMetrics.WRITE_SUCCESS_COUNT);
} catch (RuntimeException e) {
metrics.increment(WriterMetrics.WRITE_FAILURE_COUNT);
logger.debug("error writing record {}", record);
}
}
return trigger;
})
.filter(this::shouldFlush)
// Writer can be closed if there are no events, yet timer is still ticking.
.filter(trigger -> !writer.isClosed())
.map(trigger -> {
// Triggered by new partition.
if (trigger.hasStagedDataFile()) {
DataFile dataFile = trigger.getStagedDataFile().copy();
trigger.clearStagedDataFile();
return dataFile;
}
// Triggered by size or time.
try {
DataFile dataFile = writer.close();
trigger.reset();
return dataFile;
} catch (IOException | RuntimeException e) {
metrics.increment(WriterMetrics.BATCH_FAILURE_COUNT);
logger.error("error writing DataFile", e);
return ERROR_DATA_FILE;
}
})
.filter(dataFile -> !isErrorDataFile(dataFile))
.doOnNext(dataFile -> {
metrics.increment(WriterMetrics.BATCH_SUCCESS_COUNT);
logger.info("writing DataFile: {}", dataFile);
metrics.setGauge(WriterMetrics.BATCH_SIZE, dataFile.recordCount());
metrics.setGauge(WriterMetrics.BATCH_SIZE_BYTES, dataFile.fileSizeInBytes());
})
.doOnTerminate(() -> {
try {
logger.info("closing writer on rx terminate signal");
writer.close();
} catch (IOException e) {
throw Exceptions.propagate(e);
}
});
}
private boolean isErrorDataFile(DataFile dataFile) {
return Comparators.charSequences().compare(ERROR_DATA_FILE.path(), dataFile.path()) == 0 &&
ERROR_DATA_FILE.fileSizeInBytes() == dataFile.fileSizeInBytes() &&
ERROR_DATA_FILE.recordCount() == dataFile.recordCount();
}
/**
* Trigger a flush on a repartition event, size threshold, or time threshold.
*/
private boolean shouldFlush(Trigger trigger) {
// For size threshold, check the trigger to short-circuit if the count is over the threshold first
// because implementations of `writer.length()` may be expensive if called in a tight loop.
return trigger.hasStagedDataFile()
|| (trigger.isOverCountThreshold() && writer.length() >= config.getWriterFlushFrequencyBytes())
|| trigger.isTimedOut();
}
private static class Trigger {
private final int countThreshold;
private int counter;
private boolean timedOut;
private StructLike partition;
private DataFile stagedDataFile;
Trigger(int countThreshold) {
this.countThreshold = countThreshold;
}
void increment() {
counter++;
}
void timeout() {
timedOut = true;
}
void setPartition(StructLike newPartition) {
partition = newPartition;
}
void reset() {
counter = 0;
timedOut = false;
}
void stage(DataFile dataFile) {
this.stagedDataFile = dataFile;
}
boolean isOverCountThreshold() {
return counter >= countThreshold;
}
boolean isTimedOut() {
return timedOut;
}
boolean isNewPartition(StructLike newPartition) {
return partition != null && !partition.equals(newPartition);
}
boolean hasStagedDataFile() {
return stagedDataFile != null;
}
DataFile getStagedDataFile() {
return stagedDataFile;
}
void clearStagedDataFile() {
stagedDataFile = null;
}
@Override
public String toString() {
return "Trigger{"
+ " countThreshold=" + countThreshold
+ ", counter=" + counter
+ ", timedOut=" + timedOut
+ ", partition=" + partition
+ ", stagedDataFile=" + stagedDataFile
+ '}';
}
}
}
}
| 4,454 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/IcebergWriter.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import java.io.IOException;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.exceptions.RuntimeIOException;
public interface IcebergWriter {
void open() throws IOException;
void open(StructLike newPartitionKey) throws IOException;
void write(Record record);
DataFile close() throws IOException, RuntimeIOException;
boolean isClosed();
long length();
StructLike getPartitionKey();
}
| 4,455 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/DefaultIcebergWriter.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer;
import java.io.IOException;
import java.util.UUID;
import io.mantisrx.connector.iceberg.sink.writer.config.WriterConfig;
import io.mantisrx.runtime.WorkerInfo;
import org.apache.hadoop.fs.Path;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.DataFiles;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.StructLike;
import org.apache.iceberg.Table;
import org.apache.iceberg.data.Record;
import org.apache.iceberg.data.parquet.GenericParquetWriter;
import org.apache.iceberg.exceptions.RuntimeIOException;
import org.apache.iceberg.hadoop.HadoopOutputFile;
import org.apache.iceberg.io.FileAppender;
import org.apache.iceberg.io.OutputFile;
import org.apache.iceberg.parquet.Parquet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class for writing {@link Record}s to Iceberg via a HDFS-compatible backend.
* For example, this class may be used with an S3 compatible filesystem library
* which progressively uploads (multipart) to S3 on each write operation for
* optimizing latencies.
* <p>
* Users have the flexibility to choose the semantics of opening, writing, and closing
* this Writer, for example, closing the underlying appender after some number
* of Bytes written and opening a new appender.
*/
public class DefaultIcebergWriter implements IcebergWriter {
private static final Logger logger = LoggerFactory.getLogger(DefaultIcebergWriter.class);
private final WriterConfig config;
private final WorkerInfo workerInfo;
private final Table table;
private final PartitionSpec spec;
private final FileFormat format;
private FileAppender<Record> appender;
private OutputFile file;
private StructLike partitionKey;
public DefaultIcebergWriter(WriterConfig config, WorkerInfo workerInfo, Table table) {
this.config = config;
this.workerInfo = workerInfo;
this.table = table;
this.spec = table.spec();
this.format = FileFormat.valueOf(config.getWriterFileFormat());
}
/**
* Opens a {@link FileAppender} for a specific {@link FileFormat}.
* <p>
* A filename is automatically generated for this appender.
* <p>
* Supports Parquet. Avro, Orc, and others unsupported.
*/
@Override
public void open() throws IOException {
open(null);
}
/**
* Opens a {@link FileAppender} using a {@link StructLike} partition key
* for a specific {@link FileFormat}.
* <p>
* A filename is automatically generated for this appender.
* <p>
* Supports Parquet. Avro, Orc, and others unsupported.
*/
@Override
public void open(StructLike newPartitionKey) throws IOException {
partitionKey = newPartitionKey;
Path path = new Path(table.location(), generateFilename());
logger.info("opening new {} file appender {}", format, path);
file = HadoopOutputFile.fromPath(path, config.getHadoopConfig());
switch (format) {
case PARQUET:
appender = Parquet.write(file)
.schema(table.schema())
.createWriterFunc(GenericParquetWriter::buildWriter)
.setAll(table.properties())
.overwrite()
.build();
break;
case AVRO:
default:
throw new UnsupportedOperationException("Cannot write using an unsupported file format " + format);
}
}
@Override
public void write(Record record) {
appender.add(record);
}
/**
* Closes the currently opened file appender and builds a DataFile.
* <p>
* Users are expected to {@link IcebergWriter#open()} a new file appender for this writer
* if they want to continue writing. Users can check for status of the file appender
* using {@link IcebergWriter#isClosed()}.
*
* @return a DataFile representing metadata about the records written.
*/
@Override
public DataFile close() throws IOException, RuntimeIOException {
if (appender == null) {
return null;
}
// Calls to FileAppender#close can fail if the backing file system fails to close.
// For example, this can happen for an S3-backed file system where it might fail
// to GET the status of the file. The file would have already been closed.
// Callers should open a new appender.
try {
appender.close();
return DataFiles.builder(spec)
.withPath(file.location())
.withInputFile(file.toInputFile())
.withFileSizeInBytes(appender.length())
.withPartition(spec.fields().size() == 0 ? null : partitionKey)
.withMetrics(appender.metrics())
.withSplitOffsets(appender.splitOffsets())
.build();
} finally {
appender = null;
file = null;
}
}
public boolean isClosed() {
return appender == null;
}
/**
* Returns the current file size (in Bytes) written using this writer's appender.
* <p>
* Users should be careful calling this method in a tight loop because it can
* be expensive depending on the file format, for example in Parquet.
*
* @return current file size (in Bytes).
*/
public long length() {
return appender == null ? 0 : appender.length();
}
/**
* Returns the partition key for which this record is partitioned in an Iceberg table.
*
* @return StructLike for partitioned tables; null for unpartitioned tables
*/
public StructLike getPartitionKey() {
return partitionKey;
}
/**
* Generate a Parquet filename with attributes which make it more friendly to determine
* the source of the file. For example, if the caller exits unexpectedly and leaves
* files in the system, it's possible to identify them through a recursive listing.
*/
private String generateFilename() {
return generateDataPath(
generatePartitionPath(
format.addExtension(String.format("%s_%s_%s_%s_%s",
workerInfo.getJobId(),
workerInfo.getStageNumber(),
workerInfo.getWorkerIndex(),
workerInfo.getWorkerNumber(),
UUID.randomUUID().toString()))));
}
private String generateDataPath(String partitionPath) {
return String.format("data/%s", partitionPath);
}
private String generatePartitionPath(String filename) {
if (spec.fields().isEmpty()) {
return filename;
}
return String.format("/%s/%s", spec.partitionToPath(partitionKey), filename);
}
}
| 4,456 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/metrics/WriterMetrics.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.metrics;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
public class WriterMetrics {
public static final String OPEN_SUCCESS_COUNT = "openSuccessCount";
private final Counter openSuccessCount;
public static final String OPEN_FAILURE_COUNT = "openFailureCount";
private final Counter openFailureCount;
public static final String WRITE_SUCCESS_COUNT = "writeSuccessCount";
private final Counter writeSuccessCount;
public static final String WRITE_FAILURE_COUNT = "writeFailureCount";
private final Counter writeFailureCount;
public static final String BATCH_SUCCESS_COUNT = "batchSuccessCount";
private final Counter batchSuccessCount;
public static final String BATCH_FAILURE_COUNT = "batchFailureCount";
private final Counter batchFailureCount;
public static final String BATCH_SIZE = "batchSize";
private final Gauge batchSize;
public static final String BATCH_SIZE_BYTES = "batchSizeBytes";
private final Gauge batchSizeBytes;
public WriterMetrics() {
Metrics metrics = new Metrics.Builder()
.name(WriterMetrics.class.getCanonicalName())
.addCounter(OPEN_SUCCESS_COUNT)
.addCounter(OPEN_FAILURE_COUNT)
.addCounter(WRITE_SUCCESS_COUNT)
.addCounter(WRITE_FAILURE_COUNT)
.addCounter(BATCH_SUCCESS_COUNT)
.addCounter(BATCH_FAILURE_COUNT)
.addGauge(BATCH_SIZE)
.addGauge(BATCH_SIZE_BYTES)
.build();
metrics = MetricsRegistry.getInstance().registerAndGet(metrics);
openSuccessCount = metrics.getCounter(OPEN_SUCCESS_COUNT);
openFailureCount = metrics.getCounter(OPEN_FAILURE_COUNT);
writeSuccessCount = metrics.getCounter(WRITE_SUCCESS_COUNT);
writeFailureCount = metrics.getCounter(WRITE_FAILURE_COUNT);
batchSuccessCount = metrics.getCounter(BATCH_SUCCESS_COUNT);
batchFailureCount = metrics.getCounter(BATCH_FAILURE_COUNT);
batchSize = metrics.getGauge(BATCH_SIZE);
batchSizeBytes = metrics.getGauge(BATCH_SIZE_BYTES);
}
public void setGauge(final String metric, final long value) {
switch (metric) {
case BATCH_SIZE:
batchSize.set(value);
break;
case BATCH_SIZE_BYTES:
batchSizeBytes.set(value);
break;
default:
break;
}
}
public void increment(final String metric) {
switch (metric) {
case OPEN_SUCCESS_COUNT:
openSuccessCount.increment();
break;
case OPEN_FAILURE_COUNT:
openFailureCount.increment();
break;
case WRITE_SUCCESS_COUNT:
writeSuccessCount.increment();
break;
case WRITE_FAILURE_COUNT:
writeFailureCount.increment();
break;
case BATCH_SUCCESS_COUNT:
batchSuccessCount.increment();
break;
case BATCH_FAILURE_COUNT:
batchFailureCount.increment();
break;
default:
break;
}
}
public void increment(final String metric, final long value) {
switch (metric) {
case OPEN_SUCCESS_COUNT:
openSuccessCount.increment(value);
break;
case OPEN_FAILURE_COUNT:
openFailureCount.increment(value);
break;
case WRITE_SUCCESS_COUNT:
writeSuccessCount.increment(value);
break;
case WRITE_FAILURE_COUNT:
writeFailureCount.increment(value);
break;
case BATCH_SUCCESS_COUNT:
batchSuccessCount.increment(value);
break;
case BATCH_FAILURE_COUNT:
batchFailureCount.increment(value);
break;
default:
break;
}
}
}
| 4,457 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/config/WriterProperties.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.config;
import org.apache.iceberg.FileFormat;
/**
* Property key names and default values for an Iceberg Committer.
*/
public class WriterProperties {
private WriterProperties() {
}
/**
* Maximum number of rows that should exist in a file.
*/
public static final String WRITER_ROW_GROUP_SIZE = "writerRowGroupSize";
public static final int WRITER_ROW_GROUP_SIZE_DEFAULT = 100;
public static final String WRITER_ROW_GROUP_SIZE_DESCRIPTION =
String.format("Number of rows to chunk before checking for file size (default: %s)",
WRITER_ROW_GROUP_SIZE_DEFAULT);
/**
* Flush frequency by size (in Bytes).
*/
public static final String WRITER_FLUSH_FREQUENCY_BYTES = "writerFlushFrequencyBytes";
// TODO: Change to long.
public static final String WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT = "134217728"; // 128 MiB
public static final String WRITER_FLUSH_FREQUENCY_BYTES_DESCRIPTION =
String.format("Flush frequency by size in Bytes (default: %s)",
WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT);
/**
* Flush frequency by time (in milliseconds).
*/
public static final String WRITER_FLUSH_FREQUENCY_MSEC = "writerFlushFrequencyMsec";
// TODO: Change to long.
public static final String WRITER_FLUSH_FREQUENCY_MSEC_DEFAULT = "60000"; // 1 min
public static final String WRITER_FLUSH_FREQUENCY_MSEC_DESCRIPTION =
String.format("Flush frequency by time in milliseconds (default: %s)",
WRITER_FLUSH_FREQUENCY_MSEC_DEFAULT);
/**
* File format for writing data files to backing Iceberg store.
*/
public static final String WRITER_FILE_FORMAT = "writerFileFormat";
public static final String WRITER_FILE_FORMAT_DEFAULT = FileFormat.PARQUET.name();
public static final String WRITER_FILE_FORMAT_DESCRIPTION =
String.format("File format for writing data files to backing Iceberg store (default: %s)",
WRITER_FILE_FORMAT_DEFAULT);
}
| 4,458 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/config/WriterConfig.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.config;
import static io.mantisrx.connector.iceberg.sink.writer.config.WriterProperties.*;
import io.mantisrx.connector.iceberg.sink.config.SinkConfig;
import io.mantisrx.runtime.parameter.Parameters;
import org.apache.hadoop.conf.Configuration;
/**
* Config for controlling Iceberg Writer semantics.
*/
public class WriterConfig extends SinkConfig {
private final int writerRowGroupSize;
private final long writerFlushFrequencyBytes;
private final long writerFlushFrequencyMsec;
private final String writerFileFormat;
private final Configuration hadoopConfig;
/**
* Creates an instance from {@link Parameters} derived from the current Mantis Stage's {@code Context}.
*/
public WriterConfig(Parameters parameters, Configuration hadoopConfig) {
super(parameters);
this.writerRowGroupSize = (int) parameters.get(
WRITER_ROW_GROUP_SIZE, WRITER_ROW_GROUP_SIZE_DEFAULT);
this.writerFlushFrequencyBytes = Long.parseLong((String) parameters.get(
WRITER_FLUSH_FREQUENCY_BYTES, WRITER_FLUSH_FREQUENCY_BYTES_DEFAULT));
this.writerFlushFrequencyMsec = Long.parseLong((String) parameters.get(
WRITER_FLUSH_FREQUENCY_MSEC, WRITER_FLUSH_FREQUENCY_MSEC_DEFAULT));
this.writerFileFormat = (String) parameters.get(
WRITER_FILE_FORMAT, WRITER_FILE_FORMAT_DEFAULT);
this.hadoopConfig = hadoopConfig;
}
/**
* Returns an int representing maximum number of rows that should exist in a file.
*/
public int getWriterRowGroupSize() {
return writerRowGroupSize;
}
/**
* Returns a long representing flush frequency by size in Bytes.
*/
public long getWriterFlushFrequencyBytes() {
return writerFlushFrequencyBytes;
}
/**
* Returns a long representing flush frequency by size in milliseconds.
*/
public long getWriterFlushFrequencyMsec() {
return writerFlushFrequencyMsec;
}
/**
* Returns the file format for Iceberg writers.
*/
public String getWriterFileFormat() {
return writerFileFormat;
}
/**
* Returns a Hadoop configuration which has metadata for how and where to write files.
*/
public Configuration getHadoopConfig() {
return hadoopConfig;
}
}
| 4,459 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/partitioner/NoOpPartitioner.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.partitioner;
import org.apache.iceberg.StructLike;
/**
* Partitioner to use for unpartitioned Iceberg tables.
*/
public class NoOpPartitioner implements Partitioner {
@Override
public StructLike partition(StructLike record) {
return null;
}
}
| 4,460 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/partitioner/Partitioner.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.partitioner;
import org.apache.iceberg.StructLike;
public interface Partitioner {
StructLike partition(StructLike record);
}
| 4,461 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/writer/partitioner/PartitionerFactory.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.writer.partitioner;
import org.apache.iceberg.Table;
public interface PartitionerFactory {
Partitioner getPartitioner(Table table);
}
| 4,462 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/IcebergCommitter.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.iceberg.AppendFiles;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.Table;
/**
* Commits {@link DataFile}s for Iceberg tables.
*
* This class uses Iceberg's Table API and only supports Table#append operations.
*/
public class IcebergCommitter {
private final Table table;
public IcebergCommitter(Table table) {
this.table = table;
}
/**
* Uses Iceberg's Table API to append DataFiles and commit metadata to Iceberg.
*
* @return the current snapshot of the table.
*/
public Map<String, Object> commit(List<DataFile> dataFiles) {
AppendFiles tableAppender = table.newAppend();
dataFiles.forEach(tableAppender::appendFile);
tableAppender.commit();
return table.currentSnapshot() == null ? new HashMap<>() : new HashMap<>(table.currentSnapshot().summary());
}
}
| 4,463 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/IcebergCommitterStage.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import io.mantisrx.connector.iceberg.sink.committer.config.CommitterConfig;
import io.mantisrx.connector.iceberg.sink.committer.config.CommitterProperties;
import io.mantisrx.connector.iceberg.sink.committer.metrics.CommitterMetrics;
import io.mantisrx.connector.iceberg.sink.config.SinkProperties;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.ScalarToScalar;
import io.mantisrx.runtime.codec.JacksonCodecs;
import io.mantisrx.runtime.computation.ScalarComputation;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.Table;
import org.apache.iceberg.catalog.Catalog;
import org.apache.iceberg.catalog.TableIdentifier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Scheduler;
import rx.schedulers.Schedulers;
/**
* Processing stage which commits table metadata to Iceberg on a time interval.
*/
public class IcebergCommitterStage implements ScalarComputation<DataFile, Map<String, Object>> {
private static final Logger logger = LoggerFactory.getLogger(IcebergCommitterStage.class);
private Transformer transformer;
/**
* Returns a config for this stage which has encoding/decoding semantics and parameter definitions.
*/
public static ScalarToScalar.Config<DataFile, Map<String, Object>> config() {
return new ScalarToScalar.Config<DataFile, Map<String, Object>>()
.description("")
.codec(JacksonCodecs.mapStringObject())
.withParameters(parameters());
}
/**
* Returns a list of parameter definitions for this stage.
*/
public static List<ParameterDefinition<?>> parameters() {
return Arrays.asList(
new StringParameter().name(SinkProperties.SINK_CATALOG)
.description(SinkProperties.SINK_CATALOG_DESCRIPTION)
.validator(Validators.notNullOrEmpty())
.required()
.build(),
new StringParameter().name(SinkProperties.SINK_DATABASE)
.description(SinkProperties.SINK_DATABASE_DESCRIPTION)
.validator(Validators.notNullOrEmpty())
.required()
.build(),
new StringParameter().name(SinkProperties.SINK_TABLE)
.description(SinkProperties.SINK_TABLE_DESCRIPTION)
.validator(Validators.notNullOrEmpty())
.required()
.build(),
new StringParameter().name(CommitterProperties.COMMIT_FREQUENCY_MS)
.description(CommitterProperties.COMMIT_FREQUENCY_DESCRIPTION)
.validator(Validators.alwaysPass())
.defaultValue(CommitterProperties.COMMIT_FREQUENCY_MS_DEFAULT)
.build()
);
}
/**
* Use this to instantiate a new transformer from a given {@link Context}.
*/
public static Transformer newTransformer(Context context) {
CommitterConfig config = new CommitterConfig(context.getParameters());
CommitterMetrics metrics = new CommitterMetrics();
Catalog catalog = context.getServiceLocator().service(Catalog.class);
TableIdentifier id = TableIdentifier.of(config.getCatalog(), config.getDatabase(), config.getTable());
Table table = catalog.loadTable(id);
IcebergCommitter committer = new IcebergCommitter(table);
return new Transformer(config, metrics, committer, Schedulers.computation());
}
public IcebergCommitterStage() {
}
/**
* Uses the provided Mantis Context to inject configuration and creates an underlying table appender.
*
* This method depends on a Hadoop Configuration and Iceberg Catalog, both injected
* from the Context's service locator.
*
* Note that this method expects an Iceberg Table to have been previously created out-of-band,
* otherwise initialization will fail. Users should prefer to create tables
* out-of-band so they can be versioned alongside their schemas.
*/
@Override
public void init(Context context) {
transformer = newTransformer(context);
}
@Override
public Observable<Map<String, Object>> call(Context context, Observable<DataFile> dataFileObservable) {
return dataFileObservable.compose(transformer);
}
/**
* Reactive Transformer for committing metadata to Iceberg.
*
* Users may use this class independently of this Stage, for example, if they want to
* {@link Observable#compose(Observable.Transformer)} this transformer with a flow into
* an existing Stage. One benefit of this co-location is to avoid extra network
* cost from worker-to-worker communication, trading off debuggability.
*/
public static class Transformer implements Observable.Transformer<DataFile, Map<String, Object>> {
private final CommitterConfig config;
private final CommitterMetrics metrics;
private final IcebergCommitter committer;
private final Scheduler scheduler;
public Transformer(CommitterConfig config,
CommitterMetrics metrics,
IcebergCommitter committer,
Scheduler scheduler) {
this.config = config;
this.metrics = metrics;
this.committer = committer;
this.scheduler = scheduler;
}
/**
* Periodically commits DataFiles to Iceberg as a batch.
*/
@Override
public Observable<Map<String, Object>> call(Observable<DataFile> source) {
return source
.buffer(config.getCommitFrequencyMs(), TimeUnit.MILLISECONDS, scheduler)
.doOnNext(dataFiles -> metrics.increment(CommitterMetrics.INVOCATION_COUNT))
.filter(dataFiles -> !dataFiles.isEmpty())
.map(dataFiles -> {
try {
long start = scheduler.now();
Map<String, Object> summary = committer.commit(dataFiles);
long now = scheduler.now();
metrics.setGauge(CommitterMetrics.COMMIT_LATENCY_MSEC, now - start);
metrics.setGauge(CommitterMetrics.COMMIT_BATCH_SIZE, dataFiles.size());
return summary;
} catch (RuntimeException e) {
metrics.increment(CommitterMetrics.COMMIT_FAILURE_COUNT);
logger.error("error committing to Iceberg", e);
return new HashMap<String, Object>();
}
})
.filter(summary -> !summary.isEmpty())
.doOnNext(summary -> {
metrics.increment(CommitterMetrics.COMMIT_SUCCESS_COUNT);
logger.info("committed {}", summary);
});
}
}
}
| 4,464 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/metrics/CommitterMetrics.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer.metrics;
import io.mantisrx.common.metrics.Counter;
import io.mantisrx.common.metrics.Gauge;
import io.mantisrx.common.metrics.Metrics;
import io.mantisrx.common.metrics.MetricsRegistry;
public class CommitterMetrics {
public static final String INVOCATION_COUNT = "invocationCount";
private final Counter invocationCount;
public static final String COMMIT_SUCCESS_COUNT = "commitSuccessCount";
private final Counter commitSuccessCount;
public static final String COMMIT_FAILURE_COUNT = "commitFailureCount";
private final Counter commitFailureCount;
public static final String COMMIT_LATENCY_MSEC = "commitLatencyMsec";
private final Gauge commitLatencyMsec;
public static final String COMMIT_BATCH_SIZE = "commitBatchSize";
private final Gauge commitBatchSize;
public CommitterMetrics() {
Metrics metrics = new Metrics.Builder()
.name(CommitterMetrics.class.getCanonicalName())
.addCounter(INVOCATION_COUNT)
.addCounter(COMMIT_SUCCESS_COUNT)
.addCounter(COMMIT_FAILURE_COUNT)
.addGauge(COMMIT_LATENCY_MSEC)
.addGauge(COMMIT_BATCH_SIZE)
.build();
metrics = MetricsRegistry.getInstance().registerAndGet(metrics);
invocationCount = metrics.getCounter(INVOCATION_COUNT);
commitSuccessCount = metrics.getCounter(COMMIT_SUCCESS_COUNT);
commitFailureCount = metrics.getCounter(COMMIT_FAILURE_COUNT);
commitLatencyMsec = metrics.getGauge(COMMIT_LATENCY_MSEC);
commitBatchSize = metrics.getGauge(COMMIT_BATCH_SIZE);
}
public void setGauge(final String metric, final long value) {
switch (metric) {
case COMMIT_LATENCY_MSEC:
commitLatencyMsec.set(value);
break;
case COMMIT_BATCH_SIZE:
commitBatchSize.set(value);
break;
default:
break;
}
}
public void increment(final String metric) {
switch (metric) {
case INVOCATION_COUNT:
invocationCount.increment();
break;
case COMMIT_SUCCESS_COUNT:
commitSuccessCount.increment();
break;
case COMMIT_FAILURE_COUNT:
commitFailureCount.increment();
break;
default:
break;
}
}
public void increment(final String metric, final long value) {
switch (metric) {
case INVOCATION_COUNT:
invocationCount.increment(value);
break;
case COMMIT_SUCCESS_COUNT:
commitSuccessCount.increment(value);
break;
case COMMIT_FAILURE_COUNT:
commitFailureCount.increment(value);
break;
case COMMIT_BATCH_SIZE:
commitBatchSize.increment(value);
break;
default:
break;
}
}
}
| 4,465 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/config/CommitterProperties.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer.config;
/**
* Property key names and default values for an Iceberg Committer.
*/
public class CommitterProperties {
private CommitterProperties() {
}
/**
* Iceberg committer frequency by time in milliseconds.
*/
public static final String COMMIT_FREQUENCY_MS = "commitFrequencyMs";
// TODO: Change to long.
public static final String COMMIT_FREQUENCY_MS_DEFAULT = "300000"; // 5 min
public static final String COMMIT_FREQUENCY_DESCRIPTION =
String.format("Iceberg Committer frequency by time in milliseconds (default: %s)",
COMMIT_FREQUENCY_MS_DEFAULT);
}
| 4,466 |
0 | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer | Create_ds/mantis-connectors/mantis-connector-iceberg/src/main/java/io/mantisrx/connector/iceberg/sink/committer/config/CommitterConfig.java | /*
* Copyright 2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.iceberg.sink.committer.config;
import static io.mantisrx.connector.iceberg.sink.committer.config.CommitterProperties.COMMIT_FREQUENCY_MS;
import static io.mantisrx.connector.iceberg.sink.committer.config.CommitterProperties.COMMIT_FREQUENCY_MS_DEFAULT;
import io.mantisrx.connector.iceberg.sink.config.SinkConfig;
import io.mantisrx.runtime.parameter.Parameters;
/**
* Config for controlling Iceberg Committer semantics.
*/
public class CommitterConfig extends SinkConfig {
private final long commitFrequencyMs;
/**
* Creates an instance from {@link Parameters} derived from the current Mantis Stage's {@code Context}.
*/
public CommitterConfig(Parameters parameters) {
super(parameters);
this.commitFrequencyMs =
Long.parseLong((String) parameters.get(COMMIT_FREQUENCY_MS, COMMIT_FREQUENCY_MS_DEFAULT));
}
/**
* Returns a long representing Iceberg committer frequency by time (milliseconds).
*/
public long getCommitFrequencyMs() {
return commitFrequencyMs;
}
}
| 4,467 |
0 | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/MantisSourceJobConnectorFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
public class MantisSourceJobConnectorFactory {
public static MantisSourceJobConnector getConnector() {
return new MantisSourceJobConnector();
}
}
| 4,468 |
0 | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/AbstractSourceJobSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import java.util.Optional;
import io.mantisrx.client.MantisSSEJob;
import io.mantisrx.client.SinkConnectionsStatus;
import io.mantisrx.runtime.parameter.SinkParameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
public abstract class AbstractSourceJobSource extends AbstractJobSource {
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractSourceJobSource.class);
/**
* @deprecated use {@link #getSourceJob(String, String, String, int, Optional)}, forPartition & toPartition params are not used and will be removed in next release
*/
@Deprecated
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId, int forPartition, int totalPartitions, int samplePerSec) {
LOGGER.info("Connecting to source job " + sourceJobName);
return getSourceJob(sourceJobName, criterion, clientId, samplePerSec, Optional.empty());
}
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId, int samplePerSec, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to source job " + sourceJobName);
return getSourceJob(sourceJobName, criterion, clientId, samplePerSec, new MantisSourceJobConnector.NoOpSinkConnectionsStatusObserver(), sinkParamsO);
}
/**
* @deprecated use {@link #getSourceJob(String, String, String, int, Observer, Optional)}
*/
@Deprecated
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId, int forPartition, int totalPartitions, int samplePerSec, Observer<SinkConnectionsStatus> sinkConnObs) {
LOGGER.info("Connecting to source job " + sourceJobName + " obs " + sinkConnObs);
boolean enableMetaMessages = false;
boolean enableCompressedBinaryInput = false;
return connectToQueryBasedJob(MantisSourceJobConnectorFactory.getConnector(), criterion, sourceJobName, clientId, samplePerSec, enableMetaMessages, enableCompressedBinaryInput, sinkConnObs, Optional.<SinkParameters>empty());
}
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId,
int samplePerSec, Observer<SinkConnectionsStatus> sinkConnObs, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to source job " + sourceJobName + " obs " + sinkConnObs);
boolean enableMetaMessages = false;
return getSourceJob(sourceJobName, criterion, clientId, samplePerSec, enableMetaMessages, sinkConnObs, sinkParamsO);
}
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId,
int samplePerSec, boolean enableMetaMessages, Observer<SinkConnectionsStatus> sinkConnObs, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to source job " + sourceJobName + " obs " + sinkConnObs);
boolean enableCompressedBinary = false;
return getSourceJob(sourceJobName, criterion, clientId, samplePerSec, enableMetaMessages, enableCompressedBinary, sinkConnObs, sinkParamsO);
}
public MantisSSEJob getSourceJob(String sourceJobName, String criterion, String clientId,
int samplePerSec, boolean enableMetaMessages, boolean enableCompressedBinaryInput, Observer<SinkConnectionsStatus> sinkConnObs, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to source job " + sourceJobName + " obs " + sinkConnObs);
return connectToQueryBasedJob(MantisSourceJobConnectorFactory.getConnector(), criterion, sourceJobName, clientId, samplePerSec, enableMetaMessages, enableCompressedBinaryInput, sinkConnObs, sinkParamsO);
}
private MantisSSEJob connectToQueryBasedJob(MantisSourceJobConnector connector, String criterion,
String jobName, String clientId, int samplePerSec, boolean enableMetaMessages, boolean enableCompressedBinaryInput,
Observer<SinkConnectionsStatus> sinkConnObs,
Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to " + jobName);
if (criterion == null || criterion.isEmpty()) {
throw new RuntimeException("Criterion cannot be empty");
}
String subId = Integer.toString(criterion.hashCode());
SinkParameters defaultParams = getDefaultSinkParams(clientId, samplePerSec,
Optional.of(criterion), Optional.of(subId), enableMetaMessages, enableCompressedBinaryInput, 500);
return connector.connectToJob(jobName, sinkParamsO.orElse(defaultParams), sinkConnObs);
}
}
| 4,469 |
0 | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/MantisSourceJobConnector.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import com.sampullara.cli.Args;
import com.sampullara.cli.Argument;
import io.mantisrx.client.MantisSSEJob;
import io.mantisrx.client.SinkConnectionsStatus;
import io.mantisrx.client.examples.SubmitEphemeralJob;
import io.mantisrx.runtime.parameter.SinkParameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
import rx.Subscription;
/**
* Used to locate and connect to Mantis Source Jobs.
*/
public class MantisSourceJobConnector {
@Argument(alias = "p", description = "Specify a configuration file")
private static String propFile = "";
private static final Logger LOGGER = LoggerFactory.getLogger(MantisSourceJobConnector.class);
private final Properties props;
public static final String MANTIS_SOURCEJOB_CLIENT_ID_PARAM = "clientId";
public static final String MANTIS_SOURCEJOB_SUBSCRIPTION_ID = "subscriptionId";
public static final String MANTIS_SOURCEJOB_CLIENT_ID = "clientId";
public static final String MANTIS_SOURCEJOB_CRITERION = "criterion";
public static final String MANTIS_SOURCEJOB_NAME_PARAM = "sourceJobName";
public static final String MANTIS_SOURCEJOB_TARGET_KEY = "target";
public static final String MANTIS_SOURCEJOB_IS_BROADCAST_MODE = "isBroadcastMode";
public static final String MANTIS_SOURCEJOB_SAMPLE_PER_SEC_KEY = "sample";
public static final String MANTIS_ENABLE_PINGS = "enablePings";
public static final String MANTIS_ENABLE_META_MESSAGES = "enableMetaMessages";
public static final String MANTIS_META_MESSAGE_INTERVAL_SEC = "metaMessagesSec";
public static final String MANTIS_MQL_THREADING_PARAM = "mantis.mql.threading.enabled";
private static final String ZK_CONNECT_STRING = "mantis.zookeeper.connectString";
private static final String ZK_ROOT = "mantis.zookeeper.root";
private static final String ZK_LEADER_PATH = "mantis.zookeeper.leader.announcement.path";
public MantisSourceJobConnector(Properties props) {
this.props = props;
}
public MantisSourceJobConnector() {
props = new Properties();
final String defaultZkConnect = "127.0.0.1:2181";
final String defaultZkRoot = "/mantis/master";
final String defaultZkLeaderPath = "/leader";
String connectString;
String zookeeperRoot;
String zookeeperLeaderAnnouncementPath;
Map<String, String> env = System.getenv();
if (env == null || env.isEmpty()) {
connectString = defaultZkConnect;
zookeeperRoot = defaultZkRoot;
zookeeperLeaderAnnouncementPath = defaultZkLeaderPath;
} else {
connectString = env.getOrDefault(ZK_CONNECT_STRING, defaultZkConnect);
zookeeperRoot = env.getOrDefault(ZK_ROOT, defaultZkRoot);
zookeeperLeaderAnnouncementPath = env.getOrDefault(ZK_LEADER_PATH, defaultZkLeaderPath);
LOGGER.info("Mantis Zk settings read from ENV: connectString {} root {} path {}", env.get(ZK_CONNECT_STRING), env.get(ZK_ROOT), env.get(ZK_LEADER_PATH));
}
if (connectString != null && !connectString.isEmpty()
&& zookeeperRoot != null && !zookeeperRoot.isEmpty()
&& zookeeperLeaderAnnouncementPath != null && !zookeeperLeaderAnnouncementPath.isEmpty()) {
props.put(ZK_CONNECT_STRING, connectString);
props.put(ZK_ROOT, zookeeperRoot);
props.put(ZK_LEADER_PATH, zookeeperLeaderAnnouncementPath);
props.put("mantis.zookeeper.connectionTimeMs", "2000");
props.put("mantis.zookeeper.connection.retrySleepMs", "500");
props.put("mantis.zookeeper.connection.retryCount", "5");
} else {
throw new RuntimeException("Zookeeper properties not available!");
}
LOGGER.info("Mantis Zk settings used for Source Job connector: connectString {} root {} path {}", connectString, zookeeperRoot, zookeeperLeaderAnnouncementPath);
}
@Deprecated
public MantisSSEJob connecToJob(String jobName) {
return connectToJob(jobName, new SinkParameters.Builder().build(), new NoOpSinkConnectionsStatusObserver());
}
public MantisSSEJob connectToJob(String jobName, SinkParameters params) {
return connectToJob(jobName, params, new NoOpSinkConnectionsStatusObserver());
}
/**
* @deprecated forPartition and totalPartitions is not used internally, this API will be removed in next release
*/
@Deprecated
public MantisSSEJob connectToJob(String jobName, SinkParameters params, int forPartition, int totalPartitions) {
return connectToJob(jobName, params, new NoOpSinkConnectionsStatusObserver());
}
/**
* @deprecated forPartition and totalPartitions is not used internally, this API will be removed in next release
*/
@Deprecated
public MantisSSEJob connectToJob(String jobName, SinkParameters params, int forPartition, int totalPartitions, Observer<SinkConnectionsStatus> sinkObserver) {
return connectToJob(jobName, params, sinkObserver);
}
public MantisSSEJob connectToJob(
String jobName,
SinkParameters params,
Observer<SinkConnectionsStatus> sinkObserver) {
return new MantisSSEJob.Builder(props)
.name(jobName)
.sinkConnectionsStatusObserver(sinkObserver)
.onConnectionReset(throwable -> LOGGER.error("Reconnecting due to error: " + throwable.getMessage()))
.sinkParams(params)
.buildJobConnector();
}
static class NoOpSinkConnectionsStatusObserver implements Observer<SinkConnectionsStatus> {
@Override
public void onCompleted() {
LOGGER.warn("Got Completed on SinkConnectionStatus ");
}
@Override
public void onError(Throwable e) {
LOGGER.error("Got Error on SinkConnectionStatus ", e);
}
@Override
public void onNext(SinkConnectionsStatus t) {
LOGGER.info("Got Sink Connection Status update " + t);
}
}
public static void main(String[] args) {
try {
SinkParameters params = new SinkParameters.Builder().withParameter("subscriptionId", "id1").
withParameter("criterion", "select * where true").build();
Args.parse(MantisSourceJobConnector.class, args);
final CountDownLatch latch = new CountDownLatch(20);
MantisSourceJobConnector sourceJobConnector = new MantisSourceJobConnector();
MantisSSEJob job = sourceJobConnector.connectToJob("TestSourceJob", params);
Subscription subscription = job.connectAndGetObservable()
.doOnNext(o -> {
LOGGER.info("Got event: data: " + o.getEventAsString());
latch.countDown();
})
.subscribe();
Subscription s2 = job.connectAndGetObservable()
.doOnNext(event -> {
LOGGER.info(" 2nd: Got event: data: " + event.getEventAsString());
latch.countDown();
})
.subscribe();
try {
boolean await = latch.await(300, TimeUnit.SECONDS);
if (await)
System.out.println("PASSED");
else
System.err.println("FAILED!");
} catch (InterruptedException e) {
e.printStackTrace();
}
subscription.unsubscribe();
System.out.println("Unsubscribed");
} catch (IllegalArgumentException e) {
Args.usage(SubmitEphemeralJob.class);
System.exit(1);
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
}
}
| 4,470 |
0 | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/SinkConnectionStatusObserver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import io.mantisrx.client.SinkConnectionsStatus;
import rx.Observer;
public interface SinkConnectionStatusObserver extends Observer<SinkConnectionsStatus> {
public abstract long getConnectedServerCount();
public abstract long getTotalServerCount();
public abstract long getReceivingDataCount();
public abstract boolean isConnectedToAllSinks();
}
| 4,471 |
0 | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/AbstractJobSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import java.io.UnsupportedEncodingException;
import java.util.Optional;
import com.mantisrx.common.utils.MantisSSEConstants;
import io.mantisrx.client.MantisSSEJob;
import io.mantisrx.client.SinkConnectionsStatus;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.mantisrx.runtime.source.Source;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observer;
public abstract class AbstractJobSource implements Source<MantisServerSentEvent> {
private static final int DEFAULT_META_MSG_INTERVAL_MSEC = 500;
private static final Logger LOGGER = LoggerFactory.getLogger(AbstractJobSource.class);
public SinkParameters getDefaultSinkParams(final String clientId,
final int samplePerSec,
final Optional<String> criterion,
final Optional<String> subscriptionId,
final boolean enableMetaMessages,
boolean enableCompressedBinaryInput, final long metaMessageInterval) {
SinkParameters.Builder defaultParamBuilder = new SinkParameters.Builder();
try {
defaultParamBuilder = defaultParamBuilder
.withParameter(MantisSourceJobConnector.MANTIS_SOURCEJOB_CLIENT_ID_PARAM, clientId)
.withParameter(MantisSSEConstants.ENABLE_PINGS, "true");
if (samplePerSec >= 1) {
defaultParamBuilder = defaultParamBuilder.withParameter("sample", Integer.toString(samplePerSec));
}
if (criterion.isPresent()) {
defaultParamBuilder =
defaultParamBuilder.withParameter(MantisSourceJobConnector.MANTIS_SOURCEJOB_CRITERION, criterion.get());
}
if (subscriptionId.isPresent()) {
defaultParamBuilder = defaultParamBuilder.withParameter(MantisSourceJobConnector.MANTIS_SOURCEJOB_SUBSCRIPTION_ID, subscriptionId.get());
}
if (enableMetaMessages) {
defaultParamBuilder = defaultParamBuilder.withParameter(MantisSSEConstants.ENABLE_META_MESSAGES, Boolean.toString(true));
defaultParamBuilder = defaultParamBuilder.withParameter(MantisSSEConstants.META_MESSAGES_SEC, Long.toString(metaMessageInterval));
}
if (enableCompressedBinaryInput) {
defaultParamBuilder = defaultParamBuilder.withParameter(MantisSSEConstants.MANTIS_ENABLE_COMPRESSION, Boolean.toString(true));
}
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e.getMessage());
}
return defaultParamBuilder.build();
}
public MantisSSEJob getJob(String jobName, String clientId, int samplePerSec,
Observer<SinkConnectionsStatus> sinkConnObs, Optional<SinkParameters> sinkParamsO) {
LOGGER.info("Connecting to job " + jobName + " obs " + sinkConnObs);
boolean enableMetaMessages = false;
boolean enableCompressedBinaryInput = false;
MantisSourceJobConnector connector = MantisSourceJobConnectorFactory.getConnector();
SinkParameters defaultParams = getDefaultSinkParams(clientId,
samplePerSec, Optional.<String>empty(), Optional.<String>empty(), enableMetaMessages, enableCompressedBinaryInput, DEFAULT_META_MSG_INTERVAL_MSEC);
return connector.connectToJob(jobName, sinkParamsO.orElse(defaultParams), sinkConnObs);
}
}
| 4,472 |
0 | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/MultiSinkConnectionStatusObserver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import java.util.Iterator;
import java.util.concurrent.ConcurrentHashMap;
import io.mantisrx.client.SinkConnectionsStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MultiSinkConnectionStatusObserver implements SinkConnectionStatusObserver {
private static final Logger LOGGER = LoggerFactory.getLogger(MultiSinkConnectionStatusObserver.class);
public static final MultiSinkConnectionStatusObserver INSTANCE = new MultiSinkConnectionStatusObserver();
private final ConcurrentHashMap<String, SinkConnectionStatusObserver> sinkObserverMap = new ConcurrentHashMap<>();
public void addSinkConnectionObserver(String name, SinkConnectionStatusObserver obs) {
sinkObserverMap.put(name, obs);
}
public void removeSinkConnectionObserver(String name) {
sinkObserverMap.remove(name);
}
public SinkConnectionStatusObserver getSinkConnectionObserver(String name) {
return sinkObserverMap.get(name);
}
// for testing
void removeAllSinkConnectionObservers() {
sinkObserverMap.clear();
}
/**
* Iterate through all member connectionObservers and sum up the connectedServer counts.
*/
@Override
public long getConnectedServerCount() {
if (sinkObserverMap.isEmpty()) {
LOGGER.warn("No connection observers registered!");
}
Iterator<SinkConnectionStatusObserver> it = sinkObserverMap.values().iterator();
int count = 0;
while (it.hasNext()) {
SinkConnectionStatusObserver ob = it.next();
count += ob.getConnectedServerCount();
}
LOGGER.info("Total connected server count" + count);
return count;
}
/**
* Iterate through all member connectionObservers and sum up the totalServer counts.
*/
@Override
public long getTotalServerCount() {
if (sinkObserverMap.isEmpty()) {
LOGGER.warn("No connection observers registered!");
}
Iterator<SinkConnectionStatusObserver> it = sinkObserverMap.values().iterator();
int count = 0;
while (it.hasNext()) {
SinkConnectionStatusObserver ob = it.next();
count += ob.getTotalServerCount();
}
LOGGER.info("Total server count" + count);
return count;
}
/**
* Iterate through all member connectionObservers and sum up the receiving data counts.
*/
@Override
public long getReceivingDataCount() {
if (sinkObserverMap.isEmpty()) {
LOGGER.warn("No connection observers registered!");
}
Iterator<SinkConnectionStatusObserver> it = sinkObserverMap.values().iterator();
int count = 0;
while (it.hasNext()) {
SinkConnectionStatusObserver ob = it.next();
count += ob.getConnectedServerCount();
}
LOGGER.info("Total receiving server count" + count);
return count;
}
/**
* Iterate through all member connectionObservers and return false if any of the constituent client connections
* are not complete.
*/
@Override
public boolean isConnectedToAllSinks() {
if (sinkObserverMap.isEmpty()) {
LOGGER.warn("No connection observers registered!");
}
Iterator<SinkConnectionStatusObserver> it = sinkObserverMap.values().iterator();
boolean connectedToAll = false;
while (it.hasNext()) {
SinkConnectionStatusObserver ob = it.next();
connectedToAll = ob.isConnectedToAllSinks();
if (!connectedToAll) {
LOGGER.warn("Not connected to sinks of all jobs");
break;
}
}
return connectedToAll;
}
@Override
public void onCompleted() {
// NO OP
}
@Override
public void onError(Throwable e) {
// NO OP
}
@Override
public void onNext(SinkConnectionsStatus t) {
// NO OP
}
}
| 4,473 |
0 | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/core/DefaultSinkConnectionStatusObserver.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.core;
import java.util.concurrent.atomic.AtomicLong;
import io.mantisrx.client.SinkConnectionsStatus;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DefaultSinkConnectionStatusObserver implements SinkConnectionStatusObserver {
private static final Logger LOGGER = LoggerFactory.getLogger(DefaultSinkConnectionStatusObserver.class);
private static final SinkConnectionStatusObserver INSTANCE = new DefaultSinkConnectionStatusObserver();
private final AtomicLong numConnected = new AtomicLong();
private final AtomicLong total = new AtomicLong();
private final AtomicLong receivingData = new AtomicLong();
public static synchronized SinkConnectionStatusObserver getInstance(boolean singleton) {
if (singleton) {
return INSTANCE;
} else {
return new DefaultSinkConnectionStatusObserver();
}
}
public static SinkConnectionStatusObserver getInstance() {
boolean singleton = true;
return getInstance(singleton);
}
@Override
public void onCompleted() {
LOGGER.error("SinkConnectionStatusObserver completed!");
}
@Override
public void onError(Throwable e) {
LOGGER.error("Got Error", e);
}
@Override
public void onNext(SinkConnectionsStatus t) {
LOGGER.info("Got SinkConnectionStatus update " + t);
numConnected.set(t.getNumConnected());
total.set(t.getTotal());
receivingData.set(t.getRecevingDataFrom());
}
@Override
public long getConnectedServerCount() {
return numConnected.get();
}
@Override
public long getTotalServerCount() {
return total.get();
}
@Override
public long getReceivingDataCount() {
return receivingData.get();
}
@Override
public boolean isConnectedToAllSinks() {
if (receivingData.get() > 0
&& numConnected.get() > 0
&& total.get() > 0
&& ((numConnected.get() == total.get()) && (total.get() == receivingData.get()))) {
return true;
}
LOGGER.warn("NOT connected to all sinks "
+ " connected : " + numConnected.get()
+ " total " + total.get()
+ " receiving Data " + receivingData.get());
return false;
}
}
| 4,474 |
0 | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/source/JobSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.source;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.Optional;
import com.google.common.collect.Lists;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonParser;
import com.mantisrx.common.utils.MantisSSEConstants;
import io.mantisrx.client.MantisSSEJob;
import io.mantisrx.common.MantisServerSentEvent;
import io.mantisrx.connector.job.core.AbstractSourceJobSource;
import io.mantisrx.connector.job.core.DefaultSinkConnectionStatusObserver;
import io.mantisrx.connector.job.core.MantisSourceJobConnector;
import io.mantisrx.connector.job.core.MultiSinkConnectionStatusObserver;
import io.mantisrx.connector.job.core.SinkConnectionStatusObserver;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.SinkParameters;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import io.vavr.Tuple;
import io.vavr.Tuple2;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
public class JobSource extends AbstractSourceJobSource implements Source<MantisServerSentEvent> {
private static final Logger LOGGER = LoggerFactory.getLogger(JobSource.class);
private static JsonParser parser = new JsonParser();
protected List<TargetInfo> targets;
public JobSource(List<TargetInfo> targets) {
this.targets = targets;
}
// For backwards compatibility.
public JobSource() {
this(new ArrayList<>());
}
public JobSource(String targetInfoStr) {
this.targets = parseTargetInfo(targetInfoStr);
}
@Override
public List<ParameterDefinition<?>> getParameters() {
List<ParameterDefinition<?>> params = Lists.newArrayList();
params.add(new StringParameter()
.name(MantisSourceJobConnector.MANTIS_SOURCEJOB_TARGET_KEY)
.validator(Validators.notNullOrEmpty())
.defaultValue("{}")
.build());
return params;
}
@Override
public Observable<Observable<MantisServerSentEvent>> call(Context context, Index index) {
if (targets.isEmpty()) {
targets = parseInputParameters(context);
}
Observable<Observable<MantisServerSentEvent>> sourceObs = null;
int workerNo = context.getWorkerInfo().getWorkerNumber();
targets = enforceClientIdConsistency(targets, context.getJobId());
for (TargetInfo targetInfo : targets) {
MantisSSEJob job;
String sourceJobName = targetInfo.sourceJobName;
String criterion = targetInfo.criterion;
int samplePerSec = targetInfo.samplePerSec;
boolean enableMetaMessages = targetInfo.enableMetaMessages;
LOGGER.info("Processing job " + sourceJobName);
boolean singleton = false;
SinkConnectionStatusObserver obs = DefaultSinkConnectionStatusObserver.getInstance(singleton);
MultiSinkConnectionStatusObserver.INSTANCE.addSinkConnectionObserver(sourceJobName, obs);
String clientId = targetInfo.clientId;
if (targetInfo.isBroadcastMode) {
clientId = clientId + "_" + workerNo;
}
boolean enableCompressedBinary = targetInfo.enableCompressedBinary;
job = getSourceJob(sourceJobName, criterion, clientId, samplePerSec, enableMetaMessages, enableCompressedBinary, obs, Optional.<SinkParameters>empty());
if (sourceObs == null) {
sourceObs = job.connectAndGet();
} else {
if (job != null) {
Observable<Observable<MantisServerSentEvent>> clientObs = job.connectAndGet();
if (clientObs != null) {
sourceObs = sourceObs.mergeWith(clientObs);
} else {
LOGGER.error("Could not connect to job " + sourceJobName);
}
} else {
LOGGER.error("Could not connect to job " + sourceJobName);
}
}
}
return sourceObs;
}
public static class TargetInfo {
public String sourceJobName;
public String criterion;
public int samplePerSec;
public boolean isBroadcastMode;
public boolean enableMetaMessages;
public boolean enableCompressedBinary;
public String clientId;
public TargetInfo(String jobName,
String criterion,
String clientId,
int samplePerSec,
boolean isBroadcastMode,
boolean enableMetaMessages,
boolean enableCompressedBinary) {
this.sourceJobName = jobName;
this.criterion = criterion;
this.clientId = clientId;
this.samplePerSec = samplePerSec;
this.isBroadcastMode = isBroadcastMode;
this.enableMetaMessages = enableMetaMessages;
this.enableCompressedBinary = enableCompressedBinary;
}
}
protected static List<TargetInfo> parseInputParameters(Context ctx) {
String targetListStr = (String) ctx.getParameters()
.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_TARGET_KEY, "{}");
return parseTargetInfo(targetListStr);
}
protected static List<TargetInfo> parseTargetInfo(String targetListStr) {
List<TargetInfo> targetList = new ArrayList<TargetInfo>();
JsonObject requestObj = (JsonObject) parser.parse(targetListStr);
JsonArray arr = requestObj.get("targets").getAsJsonArray();
for (int i = 0; i < arr.size(); i++) {
int sample = -1;
boolean isBroadCastMode = false;
JsonObject srcObj = arr.get(i).getAsJsonObject();
String sName = srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_NAME_PARAM).getAsString();
String criterion = srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_CRITERION).getAsString();
String clientId = null;
if (srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_CLIENT_ID) != null) {
clientId = srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_CLIENT_ID).getAsString();
}
if (srcObj.get(MantisSSEConstants.SAMPLE) != null) {
sample = srcObj.get(MantisSSEConstants.SAMPLE).getAsInt();
}
if (srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_IS_BROADCAST_MODE) != null) {
isBroadCastMode =
srcObj.get(MantisSourceJobConnector.MANTIS_SOURCEJOB_IS_BROADCAST_MODE).getAsBoolean();
}
boolean enableMetaMessages = false;
if (srcObj.get(MantisSSEConstants.ENABLE_META_MESSAGES) != null) {
enableMetaMessages = srcObj.get(MantisSSEConstants.ENABLE_META_MESSAGES).getAsBoolean();
}
boolean enableCompressedBinary = false;
if (srcObj.get(MantisSSEConstants.MANTIS_ENABLE_COMPRESSION) != null) {
enableCompressedBinary = true;
}
TargetInfo ti = new TargetInfo(
sName,
criterion,
clientId,
sample,
isBroadCastMode,
enableMetaMessages,
enableCompressedBinary);
targetList.add(ti);
LOGGER.info("sname: " + sName + " criterion: " + criterion + " isBroadcastMode " + isBroadCastMode);
}
return targetList;
}
public static class TargetInfoBuilder {
private String sourceJobName;
private String criterion;
private String clientId;
private int samplePerSec = -1;
private boolean isBroadcastMode = false;
private boolean enableMetaMessages = false;
private boolean enableCompressedBinary = false;
public TargetInfoBuilder() {
}
public TargetInfoBuilder withSourceJobName(String srcJobName) {
this.sourceJobName = srcJobName;
return this;
}
public TargetInfoBuilder withQuery(String query) {
this.criterion = query;
return this;
}
public TargetInfoBuilder withSamplePerSec(int samplePerSec) {
this.samplePerSec = samplePerSec;
return this;
}
public TargetInfoBuilder withBroadCastMode() {
this.isBroadcastMode = true;
return this;
}
public TargetInfoBuilder withMetaMessagesEnabled() {
this.enableMetaMessages = true;
return this;
}
public TargetInfoBuilder withBinaryCompressionEnabled() {
this.enableCompressedBinary = true;
return this;
}
public TargetInfoBuilder withClientId(String clientId) {
this.clientId = clientId;
return this;
}
public TargetInfo build() {
return new TargetInfo(
sourceJobName,
criterion,
clientId,
samplePerSec,
isBroadcastMode,
enableMetaMessages,
enableCompressedBinary);
}
}
/**
* Ensures that a list of TargetInfo contains a sane set of sourceJobName, ClientId pairs.
* TODO: Currently mutates the list, which isn't problematic here, but it would be prudent to clean this up.
*
* @param targets A List of TargetInfo for which to validate and correct clientId inconsistencies.
*
* @return The original List modified to have consistent clientIds.
*/
public static List<TargetInfo> enforceClientIdConsistency(List<TargetInfo> targets, String defaultClientId) {
targets.sort(Comparator.comparing(t -> t.criterion));
HashSet<Tuple2<String, String>> connectionPairs = new HashSet<>(targets.size());
for (TargetInfo target : targets) {
if (target.clientId == null) {
target.clientId = defaultClientId;
}
Tuple2<String, String> connectionPair = Tuple.of(target.sourceJobName, target.clientId);
int attempts = 0;
while (connectionPairs.contains(connectionPair)) {
connectionPair = Tuple.of(target.sourceJobName, target.clientId + "_" + ++attempts);
}
target.clientId = connectionPair._2;
connectionPairs.add(connectionPair);
}
return targets;
}
}
| 4,475 |
0 | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job | Create_ds/mantis-connectors/mantis-connector-job/src/main/java/io/mantisrx/connector/job/sink/ServerSentEventsSink.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.job.sink;
import java.util.List;
import java.util.Map;
import io.mantisrx.common.properties.MantisPropertiesService;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.Metadata;
import io.mantisrx.runtime.PortRequest;
import io.mantisrx.runtime.sink.SelfDocumentingSink;
import io.mantisrx.runtime.sink.ServerSentEventRequestHandler;
import io.mantisrx.runtime.sink.predicate.Predicate;
import io.mantisrx.server.core.ServiceRegistry;
import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelOption;
import io.reactivex.mantis.network.push.PushServerSse;
import io.reactivex.mantis.network.push.PushServers;
import io.reactivex.mantis.network.push.Routers;
import io.reactivex.mantis.network.push.ServerConfig;
import mantis.io.reactivex.netty.RxNetty;
import mantis.io.reactivex.netty.pipeline.PipelineConfigurators;
import mantis.io.reactivex.netty.protocol.http.server.HttpServer;
import mantis.io.reactivex.netty.protocol.http.sse.ServerSentEvent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.functions.Func1;
import rx.functions.Func2;
import rx.subjects.BehaviorSubject;
public class ServerSentEventsSink<T> implements SelfDocumentingSink<T> {
private static final Logger LOG = LoggerFactory.getLogger(ServerSentEventsSink.class);
private final Func2<Map<String, List<String>>, Context, Void> subscribeProcessor;
private final BehaviorSubject<Integer> portObservable = BehaviorSubject.create();
private Func1<T, String> encoder;
private Func1<Throwable, String> errorEncoder;
private Predicate<T> predicate;
private Func2<Map<String, List<String>>, Context, Void> requestPreprocessor;
private Func2<Map<String, List<String>>, Context, Void> requestPostprocessor;
private int port = -1;
private MantisPropertiesService propService;
public ServerSentEventsSink(Func1<T, String> encoder) {
this(encoder, null, null);
}
ServerSentEventsSink(Func1<T, String> encoder,
Func1<Throwable, String> errorEncoder,
Predicate<T> predicate) {
if (errorEncoder == null) {
// default
errorEncoder = Throwable::getMessage;
}
this.encoder = encoder;
this.errorEncoder = errorEncoder;
this.predicate = predicate;
this.propService = ServiceRegistry.INSTANCE.getPropertiesService();
this.subscribeProcessor = null;
}
ServerSentEventsSink(Builder<T> builder) {
this.encoder = builder.encoder;
this.errorEncoder = builder.errorEncoder;
this.predicate = builder.predicate;
this.requestPreprocessor = builder.requestPreprocessor;
this.requestPostprocessor = builder.requestPostprocessor;
this.subscribeProcessor = builder.subscribeProcessor;
this.propService = ServiceRegistry.INSTANCE.getPropertiesService();
}
@Override
public Metadata metadata() {
StringBuilder description = new StringBuilder();
description.append("HTTP server streaming results using Server-sent events. The sink"
+ " supports optional subscription (GET) parameters to change the events emitted"
+ " by the stream. A sampling interval can be applied to the stream using"
+ " the GET parameter sample=numSeconds. This will limit the stream rate to"
+ " events-per-numSeconds.");
if (predicate != null && predicate.getDescription() != null) {
description.append(" Predicate description: ").append(predicate.getDescription());
}
return new Metadata.Builder()
.name("Server Sent Event Sink")
.description(description.toString())
.build();
}
private boolean runNewSseServerImpl(String jobName) {
String legacyServerString = propService.getStringValue("mantis.sse.newServerImpl", "true");
String legacyServerStringPerJob = propService.getStringValue(jobName + ".mantis.sse.newServerImpl", "false");
return Boolean.parseBoolean(legacyServerString) || Boolean.parseBoolean(legacyServerStringPerJob);
}
private int numConsumerThreads() {
String consumerThreadsString = propService.getStringValue("mantis.sse.numConsumerThreads", "1");
return Integer.parseInt(consumerThreadsString);
}
private int maxChunkSize() {
String maxChunkSize = propService.getStringValue("mantis.sse.maxChunkSize", "1000");
return Integer.parseInt(maxChunkSize);
}
private int maxReadTime() {
String maxChunkSize = propService.getStringValue("mantis.sse.maxReadTimeMSec", "250");
return Integer.parseInt(maxChunkSize);
}
private int bufferCapacity() {
String bufferCapacityString = propService.getStringValue("mantis.sse.bufferCapacity", "25000");
return Integer.parseInt(bufferCapacityString);
}
private boolean useSpsc() {
String useSpsc = propService.getStringValue("mantis.sse.spsc", "false");
return Boolean.parseBoolean(useSpsc);
}
@Override
public void call(Context context, PortRequest portRequest, final Observable<T> observable) {
port = portRequest.getPort();
if (runNewSseServerImpl(context.getWorkerInfo().getJobName())) {
LOG.info("Serving modern HTTP SSE server sink on port: " + port);
String serverName = "SseSink";
ServerConfig.Builder<T> config = new ServerConfig.Builder<T>()
.name(serverName)
.groupRouter(Routers.roundRobinSse(serverName, encoder))
.port(port)
.metricsRegistry(context.getMetricsRegistry())
.maxChunkTimeMSec(maxReadTime())
.maxChunkSize(maxChunkSize())
.bufferCapacity(bufferCapacity())
.numQueueConsumers(numConsumerThreads())
.useSpscQueue(useSpsc())
.maxChunkTimeMSec(getBatchInterval());
if (predicate != null) {
config.predicate(predicate.getPredicate());
}
PushServerSse<T, Context> server = PushServers.infiniteStreamSse(config.build(), observable,
requestPreprocessor, requestPostprocessor,
subscribeProcessor, context, true);
server.start();
} else {
LOG.info("Serving legacy HTTP SSE server sink on port: " + port);
int batchInterval = getBatchInterval();
HttpServer<ByteBuf, ServerSentEvent> server = RxNetty.newHttpServerBuilder(
port,
new ServerSentEventRequestHandler<>(
observable,
encoder,
errorEncoder,
predicate,
requestPreprocessor,
requestPostprocessor,
context,
batchInterval))
.pipelineConfigurator(PipelineConfigurators.<ByteBuf>serveSseConfigurator())
.channelOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, 5 * 1024 * 1024)
.channelOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, 1024 * 1024)
.build();
server.start();
}
portObservable.onNext(port);
}
private int getBatchInterval() {
//default flush interval
String flushIntervalMillisStr =
ServiceRegistry.INSTANCE.getPropertiesService()
.getStringValue("mantis.sse.batchInterval", "100");
LOG.info("Read fast property mantis.sse.batchInterval" + flushIntervalMillisStr);
return Integer.parseInt(flushIntervalMillisStr);
}
private int getHighWaterMark() {
String jobName = propService.getStringValue("JOB_NAME", "default");
int highWaterMark = 5 * 1024 * 1024;
String highWaterMarkStr = propService.getStringValue(
jobName + ".sse.highwater.mark",
Integer.toString(5 * 1024 * 1024));
LOG.info("Read fast property:" + jobName + ".sse.highwater.mark ->" + highWaterMarkStr);
try {
highWaterMark = Integer.parseInt(highWaterMarkStr);
} catch (Exception e) {
LOG.error("Error parsing string " + highWaterMarkStr + " exception " + e.getMessage());
}
return highWaterMark;
}
public int getServerPort() {
return port;
}
/**
* Notifies you when the mantis job is available to listen to, for use when you want to
* write unit or regressions tests with the local runner that verify the output.
*/
public Observable<Integer> portConnections() {
return portObservable;
}
public static class Builder<T> {
private Func1<T, String> encoder;
private Func2<Map<String, List<String>>, Context, Void> requestPreprocessor;
private Func2<Map<String, List<String>>, Context, Void> requestPostprocessor;
private Func1<Throwable, String> errorEncoder = Throwable::getMessage;
private Predicate<T> predicate;
private Func2<Map<String, List<String>>, Context, Void> subscribeProcessor;
public Builder<T> withEncoder(Func1<T, String> encoder) {
this.encoder = encoder;
return this;
}
public Builder<T> withErrorEncoder(Func1<Throwable, String> errorEncoder) {
this.errorEncoder = errorEncoder;
return this;
}
public Builder<T> withPredicate(Predicate<T> predicate) {
this.predicate = predicate;
return this;
}
public Builder<T> withRequestPreprocessor(Func2<Map<String, List<String>>, Context, Void> preProcessor) {
this.requestPreprocessor = preProcessor;
return this;
}
public Builder<T> withSubscribePreprocessor(
Func2<Map<String, List<String>>, Context, Void> subscribeProcessor) {
this.subscribeProcessor = subscribeProcessor;
return this;
}
public Builder<T> withRequestPostprocessor(Func2<Map<String, List<String>>, Context, Void> postProcessor) {
this.requestPostprocessor = postProcessor;
return this;
}
public ServerSentEventsSink<T> build() {
return new ServerSentEventsSink<>(this);
}
}
}
| 4,476 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/ParameterTestUtils.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import io.mantisrx.runtime.parameter.Parameters;
public class ParameterTestUtils {
public static Parameters createParameters(Object... params) {
Map<String, Object> paramsMap = new HashMap();
Set<String> requiredParams = new HashSet<>();
List<Object> paramsList = Arrays.asList(params);
Iterator<Object> iterator = paramsList.iterator();
while (iterator.hasNext()) {
Object token = iterator.next();
if (token instanceof String) {
String paramkey = (String) token;
if (iterator.hasNext()) {
Object pVal = iterator.next();
paramsMap.put(paramkey, pVal);
requiredParams.add(paramkey);
}
} else {
throw new IllegalArgumentException("parameter key must be of type String, parameter key not supported with type " + token.getClass());
}
}
return new Parameters(paramsMap, requiredParams, requiredParams);
}
}
| 4,477 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/KafkaSourceTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import static junit.framework.TestCase.fail;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import com.netflix.spectator.api.NoopRegistry;
import info.batey.kafka.unit.KafkaUnit;
import io.mantisrx.connector.kafka.KafkaAckable;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.WorkerInfo;
import io.mantisrx.runtime.parameter.Parameters;
import io.mantisrx.runtime.source.Index;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
// Test ignored until kafka-unit dependency for kafka v2.2.+ is released after merging PR https://github.com/chbatey/kafka-unit/pull/69
@Ignore
public class KafkaSourceTest {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaSourceTest.class);
private static final KafkaUnit kafkaServer = new KafkaUnit(5000, 9092);
private static final Random random = new Random(System.currentTimeMillis());
private static final AtomicInteger topicNum = new AtomicInteger(1);
@BeforeClass
public static void startup() {
kafkaServer.startup();
}
@AfterClass
public static void shutdown() {
kafkaServer.shutdown();
}
@Test
public void testKafkaSourceSingleConsumerReadsAllMessagesInOrderFromSinglePartition() throws InterruptedException {
String testTopic = "testTopic" + topicNum.incrementAndGet();
int numPartitions = 1;
kafkaServer.createTopic(testTopic, numPartitions);
int numMessages = 10;
for (int i = 0; i < numMessages; i++) {
ProducerRecord<String, String> keyedMessage = new ProducerRecord<>(testTopic, "{\"messageNum\":"+i+"}");
kafkaServer.sendMessages(keyedMessage);
}
KafkaSource kafkaSource = new KafkaSource(new NoopRegistry());
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, "testKafkaConsumer-" + random.nextInt());
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
when(context.getWorkerInfo()).then((Answer<WorkerInfo>) invocation -> new WorkerInfo("testJobName", "testJobName-1", 1, 0, 1, MantisJobDurationType.Perpetual, "1.1.1.1"));
when(context.getJobId()).then((Answer<String>) invocation -> "testJobName-1");
Index index = new Index(0, 10);
Observable<Observable<KafkaAckable>> sourceObs = kafkaSource.call(context, index);
final CountDownLatch latch = new CountDownLatch(numMessages);
final AtomicInteger counter = new AtomicInteger(0);
sourceObs
.flatMap(kafkaAckableObs -> kafkaAckableObs)
.map(kafkaAckable -> {
Optional<Map<String, Object>> parsedEvent = kafkaAckable.getKafkaData().getParsedEvent();
assertTrue(parsedEvent.isPresent());
assertEquals(counter.getAndIncrement(), parsedEvent.get().get("messageNum"));
LOGGER.info("got message on topic {} consumer Id {}", parsedEvent.get(), kafkaAckable.getKafkaData().getMantisKafkaConsumerId());
kafkaAckable.ack();
latch.countDown();
return parsedEvent;
})
.subscribe();
assertTrue("timed out waiting to get all messages from Kafka", latch.await(10, TimeUnit.SECONDS));
kafkaServer.deleteTopic(testTopic);
}
@Test
public void testKafkaSourceSingleConsumerHandlesMessageParseFailures() throws InterruptedException {
String testTopic = "testTopic" + topicNum.incrementAndGet();
int numPartitions = 1;
kafkaServer.createTopic(testTopic, numPartitions);
int numMessages = 10;
for (int i = 0; i < numMessages; i++) {
ProducerRecord<String, String> keyedMessage = new ProducerRecord<>(testTopic, "{\"messageNum\":"+i+"}");
kafkaServer.sendMessages(keyedMessage);
ProducerRecord<String, String> invalidJsonMessage = new ProducerRecord<>(testTopic, "{\"messageNum:"+i+"}");
kafkaServer.sendMessages(invalidJsonMessage);
}
KafkaSource kafkaSource = new KafkaSource(new NoopRegistry());
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, "testKafkaConsumer-" + random.nextInt());
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
when(context.getWorkerInfo()).then((Answer<WorkerInfo>) invocation -> new WorkerInfo("testJobName", "testJobName-1", 1, 0, 1, MantisJobDurationType.Perpetual, "1.1.1.1"));
when(context.getJobId()).then((Answer<String>) invocation -> "testJobName-1");
Index index = new Index(0, 10);
Observable<Observable<KafkaAckable>> sourceObs = kafkaSource.call(context, index);
final CountDownLatch latch = new CountDownLatch(numMessages);
final AtomicInteger counter = new AtomicInteger(0);
sourceObs
.flatMap(kafkaAckableObs -> kafkaAckableObs)
.map(kafkaAckable -> {
Optional<Map<String, Object>> parsedEvent = kafkaAckable.getKafkaData().getParsedEvent();
assertTrue(parsedEvent.isPresent());
assertEquals(counter.getAndIncrement(), parsedEvent.get().get("messageNum"));
LOGGER.info("got message on topic {} consumer Id {}", parsedEvent.get(), kafkaAckable.getKafkaData().getMantisKafkaConsumerId());
kafkaAckable.ack();
latch.countDown();
return parsedEvent;
})
.subscribe();
assertTrue("timed out waiting to get all messages from Kafka", latch.await(30, TimeUnit.SECONDS));
kafkaServer.deleteTopic(testTopic);
}
@Test
public void testKafkaSourceMultipleConsumersReadsAllMessagesFromMultiplePartitions() throws InterruptedException {
String testTopic = "testTopic" + topicNum.incrementAndGet();
int numPartitions = 2;
kafkaServer.createTopic(testTopic, numPartitions);
int numMessages = 10;
Set<Integer> outstandingMsgs = new ConcurrentSkipListSet<>();
for (int i = 0; i < numMessages; i++) {
ProducerRecord<String, String> keyedMessage = new ProducerRecord<>(testTopic, "{\"messageNum\":"+i+"}");
kafkaServer.sendMessages(keyedMessage);
outstandingMsgs.add(i);
}
KafkaSource kafkaSource = new KafkaSource(new NoopRegistry());
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.NUM_KAFKA_CONSUMER_PER_WORKER, 2,
KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, "testKafkaConsumer-" + random.nextInt());
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
when(context.getWorkerInfo()).then((Answer<WorkerInfo>) invocation -> new WorkerInfo("testJobName", "testJobName-1", 1, 0, 1, MantisJobDurationType.Perpetual, "1.1.1.1"));
when(context.getJobId()).then((Answer<String>) invocation -> "testJobName-1");
Index index = new Index(0, 10);
Observable<Observable<KafkaAckable>> sourceObs = kafkaSource.call(context, index);
final CountDownLatch latch = new CountDownLatch(numMessages);
final Map<Integer, Integer> lastMessageNumByConsumerId = new ConcurrentHashMap<>();
sourceObs
.flatMap(kafkaAckableObs -> kafkaAckableObs)
.map(kafkaAckable -> {
Optional<Map<String, Object>> parsedEvent = kafkaAckable.getKafkaData().getParsedEvent();
assertTrue(parsedEvent.isPresent());
Integer messageNum = (Integer)parsedEvent.get().get("messageNum");
assertTrue(outstandingMsgs.contains(messageNum));
outstandingMsgs.remove(messageNum);
int mantisKafkaConsumerId = kafkaAckable.getKafkaData().getMantisKafkaConsumerId();
lastMessageNumByConsumerId.putIfAbsent(mantisKafkaConsumerId, -1);
// assert consumption of higher message numbers across consumer instances
assertTrue(messageNum > lastMessageNumByConsumerId.get(mantisKafkaConsumerId));
lastMessageNumByConsumerId.put(mantisKafkaConsumerId, messageNum);
LOGGER.info("got message on topic {} consumer id {}", parsedEvent.get(), mantisKafkaConsumerId);
kafkaAckable.ack();
latch.countDown();
return parsedEvent;
})
.doOnError(t -> {
LOGGER.error("caught unexpected exception", t);
fail("test failed due to unexpected error "+ t.getMessage());
})
.subscribe();
assertTrue("timed out waiting to get all messages from Kafka", latch.await(10, TimeUnit.SECONDS));
assertEquals(0, outstandingMsgs.size());
assertTrue(lastMessageNumByConsumerId.keySet().size() == 2);
lastMessageNumByConsumerId.keySet().forEach(consumerId -> {
assertTrue(lastMessageNumByConsumerId.get(consumerId) >= 0);
});
kafkaServer.deleteTopic(testTopic);
}
@Test
public void testKafkaSourceMultipleConsumersStaticPartitionAssignment() throws InterruptedException {
String testTopic = "testTopic" + topicNum.incrementAndGet();
int numConsumers = 3;
int numPartitions = 3;
kafkaServer.createTopic(testTopic, numPartitions);
int numMessages = 10;
Set<Integer> outstandingMsgs = new ConcurrentSkipListSet<>();
for (int i = 0; i < numMessages; i++) {
ProducerRecord<String, String> keyedMessage = new ProducerRecord<>(testTopic, "{\"messageNum\":"+i+"}");
kafkaServer.sendMessages(keyedMessage);
outstandingMsgs.add(i);
}
KafkaSource kafkaSource = new KafkaSource(new NoopRegistry());
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.NUM_KAFKA_CONSUMER_PER_WORKER, numConsumers,
KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.ENABLE_STATIC_PARTITION_ASSIGN, true,
KafkaSourceParameters.TOPIC_PARTITION_COUNTS, testTopic + ":" + numPartitions,
KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, "testKafkaConsumer-" + random.nextInt());
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
when(context.getWorkerInfo()).then((Answer<WorkerInfo>) invocation -> new WorkerInfo("testJobName", "testJobName-1", 1, 0, 1, MantisJobDurationType.Perpetual, "1.1.1.1"));
when(context.getJobId()).then((Answer<String>) invocation -> "testJobName-1");
// Force all consumer instances to be created on same JVM by setting total number of workers for this job to 1
int totalNumWorkerForJob = 1;
Index index = new Index(0, totalNumWorkerForJob);
Observable<Observable<KafkaAckable>> sourceObs = kafkaSource.call(context, index);
final CountDownLatch latch = new CountDownLatch(numMessages);
final Map<Integer, Integer> lastMessageNumByConsumerId = new ConcurrentHashMap<>();
sourceObs
.flatMap(kafkaAckableObs -> kafkaAckableObs)
.map(kafkaAckable -> {
Optional<Map<String, Object>> parsedEvent = kafkaAckable.getKafkaData().getParsedEvent();
assertTrue(parsedEvent.isPresent());
Integer messageNum = (Integer)parsedEvent.get().get("messageNum");
assertTrue(outstandingMsgs.contains(messageNum));
outstandingMsgs.remove(messageNum);
int mantisKafkaConsumerId = kafkaAckable.getKafkaData().getMantisKafkaConsumerId();
lastMessageNumByConsumerId.putIfAbsent(mantisKafkaConsumerId, -1);
// assert consumption of higher message numbers across consumer instances
assertTrue(messageNum > lastMessageNumByConsumerId.get(mantisKafkaConsumerId));
lastMessageNumByConsumerId.put(mantisKafkaConsumerId, messageNum);
LOGGER.info("got message on topic {} consumer id {}", parsedEvent.get(), mantisKafkaConsumerId);
kafkaAckable.ack();
latch.countDown();
return parsedEvent;
})
.doOnError(t -> {
LOGGER.error("caught unexpected exception", t);
fail("test failed due to unexpected error "+ t.getMessage());
})
.subscribe();
assertTrue("timed out waiting to get all messages from Kafka", latch.await(10, TimeUnit.SECONDS));
assertEquals(0, outstandingMsgs.size());
assertTrue(lastMessageNumByConsumerId.keySet().size() == numConsumers);
lastMessageNumByConsumerId.keySet().forEach(consumerId -> {
assertTrue(lastMessageNumByConsumerId.get(consumerId) >= 0);
});
kafkaServer.deleteTopic(testTopic);
}
}
| 4,478 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/MantisKafkaConsumerConfigTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.Arrays;
import java.util.Map;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.RangeAssignor;
import org.apache.kafka.common.metrics.JmxReporter;
import org.junit.Test;
import org.mockito.stubbing.Answer;
public class MantisKafkaConsumerConfigTest {
@Test
public void testDefaultConsumerConfig() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters();
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaConsumerConfig mantisKafkaConsumerConfig = new MantisKafkaConsumerConfig(context);
Map<String, Object> consumerProperties = mantisKafkaConsumerConfig.getConsumerProperties();
assertEquals(Boolean.valueOf(MantisKafkaConsumerConfig.DEFAULT_AUTO_COMMIT_ENABLED), consumerProperties.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_AUTO_COMMIT_INTERVAL_MS, consumerProperties.get(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_AUTO_OFFSET_RESET, consumerProperties.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_FETCH_MAX_WAIT_MS, consumerProperties.get(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_FETCH_MIN_BYTES, consumerProperties.get(ConsumerConfig.FETCH_MIN_BYTES_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_HEARTBEAT_INTERVAL_MS, consumerProperties.get(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_SESSION_TIMEOUT_MS, consumerProperties.get(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_KEY_DESERIALIZER, consumerProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_VALUE_DESERIALIZER, consumerProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_MAX_PARTITION_FETCH_BYTES, consumerProperties.get(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_RECEIVE_BUFFER_BYTES, consumerProperties.get(ConsumerConfig.RECEIVE_BUFFER_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_SEND_BUFFER_BYTES, consumerProperties.get(ConsumerConfig.SEND_BUFFER_CONFIG));
assertEquals(Arrays.asList(MantisKafkaConsumerConfig.DEFAULT_BOOTSTRAP_SERVERS_CONFIG), consumerProperties.get(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG));
assertEquals(Arrays.asList(JmxReporter.class.getName()), consumerProperties.get(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG));
assertEquals(Arrays.asList(RangeAssignor.class.getName()), consumerProperties.get(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG));
assertEquals(MantisKafkaConsumerConfig.getGroupId(), consumerProperties.get(ConsumerConfig.GROUP_ID_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_MAX_POLL_INTERVAL_MS, consumerProperties.get(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_MAX_POLL_RECORDS, consumerProperties.get(ConsumerConfig.MAX_POLL_RECORDS_CONFIG));
assertEquals(MantisKafkaConsumerConfig.DEFAULT_REQUEST_TIMEOUT_MS, consumerProperties.get(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG));
}
@Test
public void testJobParamOverrides() {
Context context = mock(Context.class);
String testTopic = "topic123";
String testConsumerGroupId = "testKafkaConsumer-1";
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.PREFIX + ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest",
KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, testConsumerGroupId,
KafkaSourceParameters.PREFIX + ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, 500);
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaConsumerConfig mantisKafkaConsumerConfig = new MantisKafkaConsumerConfig(context);
Map<String, Object> consumerProperties = mantisKafkaConsumerConfig.getConsumerProperties();
// MantisKafkaConsumerConfig only affects Kafka's ConsumerConfig defined properties
assertFalse(ConsumerConfig.configNames().contains(KafkaSourceParameters.TOPIC));
assertFalse(consumerProperties.containsKey(KafkaSourceParameters.TOPIC));
assertEquals("earliest", consumerProperties.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG));
assertEquals(testConsumerGroupId, consumerProperties.get(ConsumerConfig.GROUP_ID_CONFIG));
assertEquals(500, consumerProperties.get(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG));
}
}
| 4,479 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/MantisKafkaSourceConfigTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.connector.kafka.source.serde.ParserType;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import org.junit.Test;
import org.mockito.stubbing.Answer;
public class MantisKafkaSourceConfigTest {
@Test
public void testDefaultConsumerConfig() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, "testTopic");
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaSourceConfig mantisKafkaSourceConfig = new MantisKafkaSourceConfig(context);
assertEquals(MantisKafkaSourceConfig.DEFAULT_CONSUMER_POLL_TIMEOUT_MS, mantisKafkaSourceConfig.getConsumerPollTimeoutMs());
assertEquals(CheckpointStrategyOptions.NONE, mantisKafkaSourceConfig.getCheckpointStrategy());
assertEquals(MantisKafkaConsumerConfig.DEFAULT_CHECKPOINT_INTERVAL_MS, mantisKafkaSourceConfig.getCheckpointIntervalMs());
assertEquals(MantisKafkaSourceConfig.DEFAULT_MAX_BYTES_IN_PROCESSING, mantisKafkaSourceConfig.getMaxBytesInProcessing());
assertEquals(ParserType.SIMPLE_JSON.getPropName(), mantisKafkaSourceConfig.getMessageParserType());
assertEquals(MantisKafkaSourceConfig.DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER, mantisKafkaSourceConfig.getNumConsumerInstances());
assertEquals(MantisKafkaSourceConfig.DEFAULT_PARSE_MSG_IN_SOURCE, mantisKafkaSourceConfig.getParseMessageInSource());
assertEquals(MantisKafkaSourceConfig.DEFAULT_RETRY_CHECKPOINT_CHECK_DELAY_MS, mantisKafkaSourceConfig.getRetryCheckpointCheckDelayMs());
assertEquals(MantisKafkaSourceConfig.DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN, mantisKafkaSourceConfig.getStaticPartitionAssignmentEnabled());
assertEquals(Optional.empty(), mantisKafkaSourceConfig.getTopicPartitionCounts());
assertEquals(Arrays.asList("testTopic"), mantisKafkaSourceConfig.getTopics());
}
@Test
public void testJobParamOverrides() {
Context context = mock(Context.class);
String testTopic = "topic123";
int checkpointIntervalOverride = 100;
boolean staticPartitionAssignEnableOverride = true;
Parameters params = ParameterTestUtils.createParameters(KafkaSourceParameters.TOPIC, testTopic,
KafkaSourceParameters.CHECKPOINT_INTERVAL_MS, checkpointIntervalOverride,
KafkaSourceParameters.ENABLE_STATIC_PARTITION_ASSIGN, staticPartitionAssignEnableOverride,
KafkaSourceParameters.TOPIC_PARTITION_COUNTS, testTopic+":1024");
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaSourceConfig mantisKafkaSourceConfig = new MantisKafkaSourceConfig(context);
assertEquals(checkpointIntervalOverride, mantisKafkaSourceConfig.getCheckpointIntervalMs());
assertEquals(staticPartitionAssignEnableOverride, mantisKafkaSourceConfig.getStaticPartitionAssignmentEnabled());
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put(testTopic, 1024);
assertEquals(Optional.ofNullable(topicPartitionCounts), mantisKafkaSourceConfig.getTopicPartitionCounts());
}
}
| 4,480 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/checkpoint/strategy/FileBasedOffsetCheckpointStrategyTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.strategy;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import java.time.Instant;
import java.util.Collections;
import java.util.Date;
import java.util.Optional;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import io.mantisrx.connector.kafka.source.serde.OffsetAndMetadataDeserializer;
import io.mantisrx.connector.kafka.source.serde.OffsetAndMetadataSerializer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.junit.Test;
public class FileBasedOffsetCheckpointStrategyTest {
private FileBasedOffsetCheckpointStrategy strategy = new FileBasedOffsetCheckpointStrategy();
@Test
public void testSaveAndLoadCheckpoint() {
final TopicPartition topicPartition = new TopicPartition("test-topic", 1);
strategy.init(Collections.singletonMap(FileBasedOffsetCheckpointStrategy.CHECKPOINT_DIR_PROP, FileBasedOffsetCheckpointStrategy.DEFAULT_CHECKPOINT_DIR));
final OffsetAndMetadata oam = new OffsetAndMetadata(100, Date.from(Instant.now()).toString());
strategy.persistCheckpoint(Collections.singletonMap(topicPartition, oam));
final Optional<OffsetAndMetadata> actual = strategy.loadCheckpoint(topicPartition);
assertEquals(true, actual.isPresent());
assertEquals(oam, actual.get());
}
@Test
public void testOffsetAndMetadataSerialization() {
OffsetAndMetadata expected = new OffsetAndMetadata(100, "tempmeta");
final SimpleModule module = new SimpleModule().addSerializer(OffsetAndMetadata.class, new OffsetAndMetadataSerializer())
.addDeserializer(OffsetAndMetadata.class, new OffsetAndMetadataDeserializer());
final ObjectMapper mapper = new ObjectMapper()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
mapper.registerModule(module);
try {
final String s = mapper.writeValueAsString(expected);
final OffsetAndMetadata actual = mapper.readValue(s, OffsetAndMetadata.class);
assertEquals(expected, actual);
} catch (IOException e) {
e.printStackTrace();
}
}
}
| 4,481 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/source/assignor/StaticPartitionAssignorTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.assignor;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.kafka.common.TopicPartition;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class StaticPartitionAssignorTest {
private static final Logger LOGGER = LoggerFactory.getLogger(StaticPartitionAssignorTest.class);
@Test
public void testStaticAssign1() {
Map<String, Integer> topicPartitionCounts = generateTopicPartitionCounts(15, 2);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
int totalNumConsumers = 20;
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
for (int i = 0; i < totalNumConsumers; i++) {
List<TopicPartition> assignedPartitions = partitionAssigner.assignPartitionsToConsumer(i, topicPartitionCounts, totalNumConsumers);
assertTrue(assignedPartitions.size() >= 1 && assignedPartitions.size() <= 2);
LOGGER.info("Consumer[{}] -> {}", i, assignedPartitions);
}
}
@Test
public void testStaticAssignMoreConsumersThanPartitions() {
Map<String, Integer> topicPartitionCounts = generateTopicPartitionCounts(15, 2);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
int totalNumConsumers = 40;
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
for (int i = 0; i < totalNumConsumers; i++) {
List<TopicPartition> assignedPartitions = partitionAssigner.assignPartitionsToConsumer(i, topicPartitionCounts, totalNumConsumers);
// assertTrue(assignedPartitions.size() >= 1 && assignedPartitions.size() <= 2);
LOGGER.info("Consumer[{}] -> {}", i, assignedPartitions);
}
}
private Map<String, Integer> generateTopicPartitionCounts(int numTopics, int partitionRange) {
Map<String, Integer> topicPartitionMap = new HashMap<>();
int partitionCnt = 1;
for (int i = 0; i < numTopics; i++) {
topicPartitionMap.put("topic_" + i, partitionCnt++);
if (partitionCnt == partitionRange + 1) {
partitionCnt = 1;
}
}
return topicPartitionMap;
}
@Test
public void testStaticAssign2() {
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put("topic_0", 1400);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
int totalNumConsumers = 20;
Map<String, List<TopicPartition>> assignmentMap = new HashMap<>();
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
for (int i = 0; i < totalNumConsumers; i++) {
List<TopicPartition> assignedPartitions = partitionAssigner.assignPartitionsToConsumer(i, topicPartitionCounts, totalNumConsumers);
assertEquals(70, assignedPartitions.size());
assignmentMap.put("" + i, assignedPartitions);
LOGGER.info("Consumer[{}] -> {}", i, assignedPartitions);
}
}
@Test
public void testStaticAssign3() {
String tpList = "testtopic:1,testtopic2:7,testtopic3:1,testtopic4:46";
Map<String, Integer> tpMap = new HashMap<>();
String[] topicPartitionTuples = tpList.split(",");
for (int i = 0; i < topicPartitionTuples.length; i++) {
String[] topPart = topicPartitionTuples[i].split(":");
tpMap.put(topPart[0], Integer.valueOf(topPart[1]));
}
int totalNumConsumers = 12;
Map<String, List<TopicPartition>> assignmentMap = new HashMap<>();
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
for (int i = 0; i < totalNumConsumers; i++) {
List<TopicPartition> assignedPartitions = partitionAssigner.assignPartitionsToConsumer(i, tpMap, totalNumConsumers);
// assertEquals(70, assignedPartitions.size());
assignmentMap.put("" + i, assignedPartitions);
LOGGER.info("Consumer[{}] -> {}", i, assignedPartitions);
}
}
@Test
public void invalidConsumerIndexTest() {
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put("topic_0", 1400);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
int totalNumConsumers = 20;
Map<String, List<TopicPartition>> assignmentMap = new HashMap<>();
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
try {
partitionAssigner.assignPartitionsToConsumer(-1, topicPartitionCounts, totalNumConsumers);
fail();
} catch (IllegalArgumentException e) {
}
try {
partitionAssigner.assignPartitionsToConsumer(100, topicPartitionCounts, totalNumConsumers);
fail();
} catch (IllegalArgumentException e) {
}
}
@Test
public void invalidTotalConsumersTest() {
Map<String, Integer> topicPartitionCounts = new HashMap<>();
topicPartitionCounts.put("topic_0", 1400);
LOGGER.info("TopicPartitionMap {}", topicPartitionCounts);
Map<String, List<TopicPartition>> assignmentMap = new HashMap<>();
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
try {
int totalNumConsumers = -1;
partitionAssigner.assignPartitionsToConsumer(1, topicPartitionCounts, totalNumConsumers);
fail();
} catch (IllegalArgumentException e) {
}
}
@Test
public void invalidTopicPartitionMapTest() {
Map<String, Integer> topicPartitionCounts = null;
StaticPartitionAssignor partitionAssigner = new StaticPartitionAssignorImpl();
try {
partitionAssigner.assignPartitionsToConsumer(1, topicPartitionCounts, 20);
fail();
} catch (NullPointerException e) {
}
}
}
| 4,482 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/sink/MantisKafkaProducerConfigTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.Arrays;
import java.util.Map;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.metrics.JmxReporter;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.junit.Test;
import org.mockito.stubbing.Answer;
public class MantisKafkaProducerConfigTest {
@Test
public void testDefaultKafkaProducerConfig() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters();
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaProducerConfig mantisKafkaProducerConfig= new MantisKafkaProducerConfig(context);
Map<String, Object> producerProperties = mantisKafkaProducerConfig.getProducerProperties();
assertEquals(ByteArraySerializer.class, producerProperties.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG));
assertEquals(ByteArraySerializer.class, producerProperties.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG));
assertEquals(Arrays.asList(MantisKafkaProducerConfig.DEFAULT_BOOTSTRAP_SERVERS_CONFIG), producerProperties.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG));
assertEquals(Arrays.asList(JmxReporter.class.getName()), producerProperties.get(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG));
}
@Test
public void testJobParamOverrides() {
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSinkJobParameters.PREFIX + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
MantisKafkaProducerConfig mantisKafkaProducerConfig= new MantisKafkaProducerConfig(context);
Map<String, Object> producerProperties = mantisKafkaProducerConfig.getProducerProperties();
assertEquals(StringSerializer.class, producerProperties.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG));
assertEquals(ByteArraySerializer.class, producerProperties.get(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG));
}
}
| 4,483 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/test/java/io/mantisrx/connector/kafka/sink/KafkaSinkTest.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.sink;
import java.util.List;
import java.util.Random;
import java.util.concurrent.atomic.AtomicInteger;
import com.netflix.spectator.api.NoopRegistry;
import info.batey.kafka.unit.KafkaUnit;
import io.mantisrx.connector.kafka.source.KafkaSourceTest;
import io.mantisrx.connector.kafka.ParameterTestUtils;
import io.mantisrx.runtime.MantisJobDurationType;
import io.mantisrx.runtime.WorkerInfo;
import io.mantisrx.runtime.parameter.Parameters;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.PortRequest;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
// Test ignored until kafka-unit dependency for kafka v2.2.+ is released after merging PR https://github.com/chbatey/kafka-unit/pull/69
@Ignore
public class KafkaSinkTest {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaSourceTest.class);
private static final KafkaUnit kafkaServer = new KafkaUnit(5000, 9092);
private static final Random random = new Random(System.currentTimeMillis());
private static final AtomicInteger topicNum = new AtomicInteger();
@BeforeClass
public static void startup() {
kafkaServer.startup();
}
@AfterClass
public static void shutdown() {
kafkaServer.shutdown();
}
@Test
public void testKafkaSink() throws InterruptedException {
String testTopic = "testTopic" + topicNum.incrementAndGet();
int numPartitions = 1;
kafkaServer.createTopic(testTopic, numPartitions);
int numMessages = 10;
KafkaSink<String> kafkaSink = new KafkaSink<>(new NoopRegistry(), s -> s.getBytes());
Context context = mock(Context.class);
Parameters params = ParameterTestUtils.createParameters(KafkaSinkJobParameters.TOPIC, testTopic);
when(context.getParameters()).then((Answer<Parameters>) invocation -> params);
when(context.getWorkerInfo()).then((Answer<WorkerInfo>) invocation -> new WorkerInfo("testJobName", "testJobName-1", 1, 0, 1, MantisJobDurationType.Perpetual, "1.1.1.1"));
when(context.getJobId()).then((Answer<String>) invocation -> "testJobName-1");
kafkaSink.call(context, mock(PortRequest.class), Observable.range(0, numMessages).map(x -> String.valueOf(x)));
List<String> messages = kafkaServer.readAllMessages(testTopic);
LOGGER.info("got {}", messages);
assertEquals(numMessages, messages.size());
for (int i = 0; i < numMessages; i++) {
assertEquals(i, Integer.parseInt(messages.get(i)));
}
kafkaServer.deleteTopic(testTopic);
}
} | 4,484 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/KafkaDataNotification.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
public class KafkaDataNotification {
public enum Kind {
ACK,
NACK,
ERR
}
public static KafkaDataNotification ack(KafkaData event, long elapsedMillis) {
return new KafkaDataNotification(event, Kind.ACK, null, elapsedMillis);
}
public static KafkaDataNotification nack(KafkaData event, long elapsedMillis) {
return new KafkaDataNotification(event, Kind.NACK, null, elapsedMillis);
}
public static KafkaDataNotification error(KafkaData request, Throwable t, long elapsedMillis) {
return new KafkaDataNotification(request, Kind.ERR, t, elapsedMillis);
}
private final KafkaData value;
private final Kind kind;
private final Throwable error;
private long elapsedMillis;
protected KafkaDataNotification(KafkaData value, Kind kind, Throwable error, long elapsedMillis) {
this.value = value;
this.kind = kind;
this.error = error;
this.elapsedMillis = elapsedMillis;
}
public Throwable getError() {
return error;
}
public boolean hasError() {
return error != null;
}
/**
* @return
*/
public Kind getKind() {
return kind;
}
public KafkaData getValue() {
return value;
}
public boolean hasValue() {
return value != null;
}
public boolean isError() {
return kind.equals(Kind.ERR);
}
public boolean isSuccess() {
return kind.equals(Kind.ACK);
}
/**
* Time it took to execute the operation for which this notification is generated
*
* @return
*/
public long getElapsed() {
return elapsedMillis;
}
}
| 4,485 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/KafkaAckable.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
import rx.subjects.SerializedSubject;
import rx.subjects.Subject;
import java.util.concurrent.TimeUnit;
/**
* Ackable used to wrap the data read from kafka to allow providing feedback to the source when the payload is consumed.
*/
public class KafkaAckable {
private final Subject<KafkaDataNotification, KafkaDataNotification> subject;
private final KafkaData kafkaData;
private final long createTimeNano = System.nanoTime();
public KafkaAckable(KafkaData data, SerializedSubject<KafkaDataNotification, KafkaDataNotification> ackSubject) {
this.kafkaData = data;
this.subject = ackSubject;
}
public KafkaAckable(KafkaData data, Subject<KafkaDataNotification, KafkaDataNotification> ackSubject) {
this.kafkaData = data;
this.subject = ackSubject;
}
public void ack() {
KafkaDataNotification n = KafkaDataNotification.ack(getKafkaData(),
TimeUnit.MILLISECONDS.convert(System.nanoTime() - createTimeNano, TimeUnit.NANOSECONDS));
subject.onNext(n);
}
/**
* NACK indicating that the message was not processed and should be
* returned to the source.
*
*/
public void nack() {
KafkaDataNotification n = KafkaDataNotification.nack(getKafkaData(),
TimeUnit.MILLISECONDS.convert(System.nanoTime() - createTimeNano, TimeUnit.NANOSECONDS));
subject.onNext(n);
}
/**
* There was an error processing the message. Depending on the implementation
* of the source the message may either be,
* 1. Dropped
* 2. Replayed
* 3. Posted to a poison queue
* @param t
*/
public void error(Throwable t) {
KafkaDataNotification n = KafkaDataNotification.error(getKafkaData(), t,
TimeUnit.MILLISECONDS.convert(System.nanoTime() - createTimeNano, TimeUnit.NANOSECONDS));
subject.onNext(n);
}
/**
* @return Get the internal message being Ackable'd
*/
public KafkaData getKafkaData() {
return kafkaData;
}
}
| 4,486 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/KafkaData.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
import java.util.Arrays;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import org.apache.kafka.clients.consumer.ConsumerRecord;
public class KafkaData {
private final String topic;
private final int partition;
private final long offset;
private final byte[] rawBytes;
/* parsedEvent is present if the raw bytes were already decoded */
private volatile Optional<Map<String, Object>> parsedEvent;
private final Optional<String> key;
private final String streamId;
private int mantisKafkaConsumerId;
public KafkaData(String topic,
int partition,
long offset,
byte[] rawBytes,
Optional<Map<String, Object>> parsedEvent,
Optional<String> key,
int mantisKafkaConsumerId) {
this.topic = topic;
this.partition = partition;
this.offset = offset;
this.rawBytes = rawBytes;
this.parsedEvent = parsedEvent;
this.key = key;
this.mantisKafkaConsumerId = mantisKafkaConsumerId;
this.streamId = new StringBuilder(topic).append('-').append(partition).toString();
}
public KafkaData(ConsumerRecord<String, byte[]> m,
Optional<Map<String, Object>> parsedEvent,
Optional<String> key,
int mantisKafkaConsumerId) {
this(m.topic(), m.partition(), m.offset(), m.value(), parsedEvent, key, mantisKafkaConsumerId);
}
public String getTopic() {
return topic;
}
public int getPartition() {
return partition;
}
public long getOffset() {
return offset;
}
public byte[] getRawBytes() {
return rawBytes;
}
public int getMantisKafkaConsumerId() {
return mantisKafkaConsumerId;
}
public String getStreamId() {
return this.streamId;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
KafkaData kafkaData = (KafkaData) o;
return partition == kafkaData.partition &&
offset == kafkaData.offset &&
mantisKafkaConsumerId == kafkaData.mantisKafkaConsumerId &&
topic.equals(kafkaData.topic) &&
Arrays.equals(rawBytes, kafkaData.rawBytes) &&
parsedEvent.equals(kafkaData.parsedEvent) &&
key.equals(kafkaData.key);
}
@Override
public int hashCode() {
int result = Objects.hash(topic, partition, offset, parsedEvent, key, mantisKafkaConsumerId);
result = 31 * result + Arrays.hashCode(rawBytes);
return result;
}
public Optional<Map<String, Object>> getParsedEvent() {
return parsedEvent;
}
public void setParsedEvent(final Map<String, Object> parsedEvent) {
this.parsedEvent = Optional.ofNullable(parsedEvent);
}
public Optional<String> getKey() {
return key;
}
@Override
public String toString() {
return "KafkaData{" +
"topic='" + topic + '\'' +
", partition=" + partition +
", offset=" + offset +
", rawBytes=" + Arrays.toString(rawBytes) +
", parsedEvent=" + parsedEvent +
", key=" + key +
'}';
}
}
| 4,487 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/KafkaSourceParameters.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka;
public class KafkaSourceParameters {
public static final String PREFIX= "kafka.source.consumer.";
public static final String CHECKPOINT_STRATEGY = "checkpointStrategy";
public static final String CONSUMER_POLL_TIMEOUT_MS = "consumerPollTimeoutMs";
public static final String NUM_KAFKA_CONSUMER_PER_WORKER = "numKafkaConsumerPerWorker";
public static final String TOPIC = PREFIX + "topic";
public static final String MAX_BYTES_IN_PROCESSING = "maxBytesInProcessing";
public static final String PARSER_TYPE = "messageParserType";
public static final String PARSE_MSG_IN_SOURCE = "parseMessageInKafkaConsumerThread";
public static final String RETRY_CHECKPOINT_CHECK_DELAY_MS = "retryCheckpointCheckDelayMs";
public static final String CHECKPOINT_INTERVAL_MS = "checkpointIntervalMs";
// Enable static partition assignment, this disables Kafka's default consumer group management
public static final String ENABLE_STATIC_PARTITION_ASSIGN = "enableStaticPartitionAssign";
// Number of partitions per topic, used only when Static Partition assignment is enabled
public static final String TOPIC_PARTITION_COUNTS = "numPartitionsPerTopic";
}
| 4,488 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/KafkaSource.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import com.netflix.spectator.api.Registry;
import io.mantisrx.connector.kafka.source.metrics.ConsumerMetrics;
import io.mantisrx.connector.kafka.source.serde.ParseException;
import io.mantisrx.connector.kafka.source.serde.Parser;
import io.mantisrx.connector.kafka.source.serde.ParserType;
import io.mantisrx.connector.kafka.KafkaData;
import io.mantisrx.connector.kafka.KafkaDataNotification;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategy;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
import io.mantisrx.connector.kafka.source.checkpoint.trigger.CheckpointTrigger;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.KafkaAckable;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.type.BooleanParameter;
import io.mantisrx.runtime.parameter.type.IntParameter;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import io.mantisrx.runtime.source.Index;
import io.mantisrx.runtime.source.Source;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.TimeoutException;
import org.apache.kafka.common.record.InvalidRecordException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.observables.SyncOnSubscribe;
import rx.schedulers.Schedulers;
import rx.subjects.PublishSubject;
import rx.subjects.SerializedSubject;
import java.util.*;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.CONSUMER_RECORD_OVERHEAD_BYTES;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_MAX_BYTES_IN_PROCESSING;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER;
import static io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig.DEFAULT_PARSE_MSG_IN_SOURCE;
/**
* Mantis Kafka Source wraps a kafka v2.2.+ consumer with back pressure semantics, the consumer polls data from kafka
* only as fast as the data is processed & ack'ed by the processing stage of the Mantis Job.
* <p>
* The {@value KafkaSourceParameters#NUM_KAFKA_CONSUMER_PER_WORKER} Job param decides the number of Kafka consumer instances spawned on each Mantis worker,
* Each kafka consumer instance runs in their own thread and poll data from kafka as part of the same consumer group
*/
public class KafkaSource implements Source<KafkaAckable> {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaSource.class);
private final AtomicBoolean done = new AtomicBoolean();
private final Map<Integer, MantisKafkaConsumer<?>> idToConsumerMap = new HashMap<>();
private final Registry registry;
private final SerializedSubject<KafkaDataNotification, KafkaDataNotification> ackSubject =
new SerializedSubject<>(PublishSubject.create());
public KafkaSource(final Registry registry) {
this.registry = registry;
}
private Observable<MantisKafkaConsumer<?>> createConsumers(final Context context,
final MantisKafkaSourceConfig kafkaSourceConfig,
final int totalNumWorkers) {
final List<MantisKafkaConsumer<?>> consumers = new ArrayList<>();
for (int i = 0; i < kafkaSourceConfig.getNumConsumerInstances(); i++) {
final int consumerIndex = context.getWorkerInfo().getWorkerIndex() + (totalNumWorkers * i);
MantisKafkaConsumer<?> mantisKafkaConsumer = new MantisKafkaConsumer.Builder()
.withKafkaSourceConfig(kafkaSourceConfig)
.withTotalNumConsumersForJob(totalNumWorkers * kafkaSourceConfig.getNumConsumerInstances())
.withContext(context)
.withConsumerIndex(consumerIndex)
.withRegistry(registry)
.build();
idToConsumerMap.put(mantisKafkaConsumer.getConsumerId(), mantisKafkaConsumer);
LOGGER.info("created consumer {}", mantisKafkaConsumer);
consumers.add(mantisKafkaConsumer);
}
return Observable.from(consumers);
}
private int getPayloadSize(ConsumerRecord<String, byte[]> record) {
return record.value().length + CONSUMER_RECORD_OVERHEAD_BYTES;
}
/**
* Create an observable with back pressure semantics from the consumer records fetched using consumer.
*
* @param mantisKafkaConsumer non thread-safe KafkaConsumer
* @param kafkaSourceConfig configuration for the Mantis Kafka Source
*/
private Observable<KafkaAckable> createBackPressuredConsumerObs(final MantisKafkaConsumer<?> mantisKafkaConsumer,
final MantisKafkaSourceConfig kafkaSourceConfig) {
CheckpointStrategy checkpointStrategy = mantisKafkaConsumer.getStrategy();
final CheckpointTrigger trigger = mantisKafkaConsumer.getTrigger();
final ConsumerMetrics consumerMetrics = mantisKafkaConsumer.getConsumerMetrics();
final TopicPartitionStateManager partitionStateManager = mantisKafkaConsumer.getPartitionStateManager();
int mantisKafkaConsumerId = mantisKafkaConsumer.getConsumerId();
SyncOnSubscribe<Iterator<ConsumerRecord<String, byte[]>>, KafkaAckable> syncOnSubscribe = SyncOnSubscribe.createStateful(
() -> {
final ConsumerRecords<String, byte[]> records = mantisKafkaConsumer.poll(kafkaSourceConfig.getConsumerPollTimeoutMs());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("topic listing for consumer {}", mantisKafkaConsumer.listTopics());
}
LOGGER.info("consumer subscribed to topic-partitions {}", mantisKafkaConsumer.assignment());
return records.iterator();
},
(consumerRecordIterator, observer) -> {
Iterator<ConsumerRecord<String, byte[]>> it = consumerRecordIterator;
final Set<TopicPartition> partitions = mantisKafkaConsumer.assignment();
if (trigger.shouldCheckpoint()) {
long startTime = System.currentTimeMillis();
final Map<TopicPartition, OffsetAndMetadata> checkpoint =
partitionStateManager.createCheckpoint(partitions);
checkpointStrategy.persistCheckpoint(checkpoint);
long now = System.currentTimeMillis();
consumerMetrics.recordCheckpointDelay(now - startTime);
consumerMetrics.incrementCommitCount();
trigger.reset();
}
if (!done.get()) {
try {
if (!consumerRecordIterator.hasNext()) {
final ConsumerRecords<String, byte[]> consumerRecords =
mantisKafkaConsumer.poll(kafkaSourceConfig.getConsumerPollTimeoutMs());
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("poll returned {} records", consumerRecords.count());
}
it = consumerRecords.iterator();
}
if (it.hasNext()) {
final ConsumerRecord<String, byte[]> m = it.next();
final TopicPartition topicPartition = new TopicPartition(m.topic(), m.partition());
consumerMetrics.incrementInCount();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("updating read offset to " + m.offset() + " read " + m.value());
}
if (m.value() != null) {
try {
trigger.update(getPayloadSize(m));
if (kafkaSourceConfig.getParseMessageInSource()) {
final Parser parser = ParserType.parser(kafkaSourceConfig.getMessageParserType()).getParser();
if (parser.canParse(m.value())) {
final Map<String, Object> parsedKafkaValue = parser.parseMessage(m.value());
final KafkaData kafkaData = new KafkaData(m, Optional.ofNullable(parsedKafkaValue), Optional.ofNullable(m.key()), mantisKafkaConsumerId);
final KafkaAckable ackable = new KafkaAckable(kafkaData, ackSubject);
// record offset consumed in TopicPartitionStateManager before onNext to avoid race condition with Ack being processed before the consume is recorded
partitionStateManager.recordMessageRead(topicPartition, m.offset());
consumerMetrics.recordReadOffset(topicPartition, m.offset());
observer.onNext(ackable);
} else {
consumerMetrics.incrementParseFailureCount();
}
} else {
final KafkaData kafkaData = new KafkaData(m, Optional.empty(), Optional.ofNullable(m.key()), mantisKafkaConsumerId);
final KafkaAckable ackable = new KafkaAckable(kafkaData, ackSubject);
// record offset consumed in TopicPartitionStateManager before onNext to avoid race condition with Ack being processed before the consume is recorded
partitionStateManager.recordMessageRead(topicPartition, m.offset());
consumerMetrics.recordReadOffset(topicPartition, m.offset());
observer.onNext(ackable);
}
} catch (ParseException pe) {
consumerMetrics.incrementErrorCount();
LOGGER.warn("failed to parse {}:{} message {}", m.topic(), m.partition(), m.value(), pe);
}
} else {
consumerMetrics.incrementKafkaMessageValueNullCount();
}
} else {
consumerMetrics.incrementWaitForDataCount();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Reached head of partition, waiting for more data");
}
TimeUnit.MILLISECONDS.sleep(200);
}
} catch (TimeoutException toe) {
consumerMetrics.incrementWaitForDataCount();
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Reached head of partition waiting for more data");
}
} catch (OffsetOutOfRangeException oore) {
LOGGER.warn("offsets out of range " + oore.partitions() + " will seek to beginning", oore);
final Set<TopicPartition> topicPartitionSet = oore.partitions();
for (TopicPartition tp : topicPartitionSet) {
LOGGER.info("partition {} consumer position {}", tp, mantisKafkaConsumer.position(tp));
}
mantisKafkaConsumer.seekToBeginning(oore.partitions().toArray(new TopicPartition[oore.partitions().size()]));
} catch (InvalidRecordException ire) {
consumerMetrics.incrementErrorCount();
LOGGER.warn("iterator error with invalid message. message will be dropped " + ire.getMessage());
} catch (KafkaException e) {
consumerMetrics.incrementErrorCount();
LOGGER.warn("Other Kafka exception, message will be dropped. " + e.getMessage());
} catch (InterruptedException ie) {
LOGGER.error("consumer interrupted", ie);
Thread.currentThread().interrupt();
} catch (Exception e) {
consumerMetrics.incrementErrorCount();
LOGGER.warn("caught exception", e);
}
} else {
mantisKafkaConsumer.close();
}
return it;
},
consumerRecordIterator -> {
LOGGER.info("closing Kafka consumer on unsubscribe" + mantisKafkaConsumer.toString());
mantisKafkaConsumer.close();
});
return Observable.create(syncOnSubscribe)
.subscribeOn(Schedulers.newThread())
.doOnUnsubscribe(() -> LOGGER.info("consumer {} stopped due to unsubscribe", mantisKafkaConsumerId))
.doOnError((t) -> {
LOGGER.error("consumer {} stopped due to error", mantisKafkaConsumerId, t);
consumerMetrics.incrementErrorCount();
})
.doOnTerminate(() -> LOGGER.info("consumer {} terminated", mantisKafkaConsumerId));
}
@Override
public Observable<Observable<KafkaAckable>> call(Context context, Index index) {
final int totalNumWorkers = index.getTotalNumWorkers();
MantisKafkaSourceConfig mantisKafkaSourceConfig = new MantisKafkaSourceConfig(context);
startAckProcessor();
return Observable.create((Observable.OnSubscribe<Observable<KafkaAckable>>) child -> {
final Observable<MantisKafkaConsumer<?>> consumers =
createConsumers(context, mantisKafkaSourceConfig, totalNumWorkers);
consumers.subscribe(consumer -> {
final Observable<KafkaAckable> mantisKafkaAckableObs =
createBackPressuredConsumerObs(consumer, mantisKafkaSourceConfig);
child.onNext(mantisKafkaAckableObs);
});
})
.doOnUnsubscribe(() -> {
LOGGER.info("unsubscribed");
done.set(true);
}).doOnSubscribe(() -> {
LOGGER.info("subscribed");
done.set(false);
});
}
private void processAckNotification(final KafkaDataNotification notification) {
final KafkaData kafkaData = notification.getValue();
final TopicPartition topicPartition = new TopicPartition(kafkaData.getTopic(), kafkaData.getPartition());
MantisKafkaConsumer<?> mantisKafkaConsumer = idToConsumerMap.get(kafkaData.getMantisKafkaConsumerId());
if (mantisKafkaConsumer != null) {
mantisKafkaConsumer.getPartitionStateManager().recordMessageAck(topicPartition, kafkaData.getOffset());
if (!notification.isSuccess()) {
// TODO provide a hook for the user to add handling for messages that could not be processed
LOGGER.debug("Got negative acknowledgement {}", notification);
}
mantisKafkaConsumer.getConsumerMetrics().incrementProcessedCount();
} else {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("got Ack for consumer id {} not in idToConsumerMap (topic {})", kafkaData.getMantisKafkaConsumerId(), kafkaData.getTopic());
}
}
}
private void startAckProcessor() {
LOGGER.info("Acknowledgement processor started");
ackSubject.subscribe((KafkaDataNotification notification) -> processAckNotification(notification));
}
@Override
public List<ParameterDefinition<?>> getParameters() {
final List<ParameterDefinition<?>> params = new ArrayList<>();
params.add(new StringParameter()
.name(KafkaSourceParameters.TOPIC)
.description("Kafka topic to connect to")
.validator(Validators.notNullOrEmpty())
.required()
.build());
// Optional parameters
params.add(new StringParameter()
.name(KafkaSourceParameters.CHECKPOINT_STRATEGY)
.description("checkpoint strategy one of " + CheckpointStrategyOptions.values() + " (ensure enable.auto.commit param is set to false when enabling this)")
.defaultValue(CheckpointStrategyOptions.NONE)
.validator(Validators.alwaysPass())
.build());
params.add(new IntParameter()
.name(KafkaSourceParameters.NUM_KAFKA_CONSUMER_PER_WORKER)
.description("No. of Kafka consumer instances per Mantis worker")
.validator(Validators.range(1, 16))
.defaultValue(DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER)
.build());
params.add(new IntParameter()
.name(KafkaSourceParameters.MAX_BYTES_IN_PROCESSING)
.description("The maximum amount of data per-consumer awaiting acks to trigger an offsets commit. " +
"These commits are in addition to any commits triggered by commitIntervalMs timer")
.defaultValue(DEFAULT_MAX_BYTES_IN_PROCESSING)
.validator(Validators.range(1, Integer.MAX_VALUE))
.build());
params.add(new IntParameter()
.name(KafkaSourceParameters.CONSUMER_POLL_TIMEOUT_MS)
.validator(Validators.range(100, 10_000))
.defaultValue(250)
.build());
params.add(new StringParameter()
.name(KafkaSourceParameters.PARSER_TYPE)
.validator(Validators.notNullOrEmpty())
.defaultValue(ParserType.SIMPLE_JSON.getPropName())
.build());
params.add(new BooleanParameter()
.name(KafkaSourceParameters.PARSE_MSG_IN_SOURCE)
.validator(Validators.alwaysPass())
.defaultValue(DEFAULT_PARSE_MSG_IN_SOURCE)
.build());
params.add(new BooleanParameter()
.name(KafkaSourceParameters.ENABLE_STATIC_PARTITION_ASSIGN)
.validator(Validators.alwaysPass())
.defaultValue(DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN)
.description("Disable Kafka's default consumer group management and statically assign partitions to job workers. When enabling static partition assignments, disable auto-scaling and set the numPartitionsPerTopic job parameter")
.build());
params.add(new StringParameter()
.name(KafkaSourceParameters.TOPIC_PARTITION_COUNTS)
.validator(Validators.alwaysPass())
.defaultValue("")
.description("Configures number of partitions on a kafka topic when static partition assignment is enabled. Format <topic1>:<numPartitions Topic1>,<topic2>:<numPartitions Topic2> Example: nf_errors_log:9,clevent:450")
.build());
params.addAll(MantisKafkaConsumerConfig.getJobParameterDefinitions());
return params;
}
}
| 4,489 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/KafkaConsumerRebalanceListener.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import java.util.Collection;
import java.util.Optional;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategy;
public class KafkaConsumerRebalanceListener<S> implements ConsumerRebalanceListener {
private static final Logger LOGGER = LoggerFactory.getLogger(KafkaConsumerRebalanceListener.class);
private final KafkaConsumer<?, ?> consumer;
private final TopicPartitionStateManager partitionStateManager;
private final CheckpointStrategy<S> checkpointStrategy;
public KafkaConsumerRebalanceListener(final KafkaConsumer<?, ?> consumer,
final TopicPartitionStateManager partitionStateManager,
final CheckpointStrategy<S> checkpointStrategy) {
this.consumer = consumer;
this.partitionStateManager = partitionStateManager;
this.checkpointStrategy = checkpointStrategy;
}
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
// When partitions are revoked, clear all partition state. We don't try to checkpoint here as we can be stuck indefinitely if the processing is slow and
// we try to wait for all acks to create a checkpoint and commit the offsets/state to data store.
LOGGER.info("partitions revoked, resetting partition state: {}", partitions.toString());
partitions.stream().forEach(tp -> partitionStateManager.resetCounters(tp));
}
/**
* Assumption is onPartitionsRevoked will always be called before onPartitionsAssigned
*/
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
LOGGER.info("new partitions assigned: {}", partitions.toString());
try {
for (TopicPartition tp : partitions) {
Optional<S> checkpointState = checkpointStrategy.loadCheckpoint(tp);
checkpointState
.filter(x -> x instanceof OffsetAndMetadata)
.map(OffsetAndMetadata.class::cast)
.ifPresent(oam -> {
long offset = oam.offset();
LOGGER.info("seeking consumer to checkpoint'ed offset {} for partition {} on assignment", offset, tp);
try {
consumer.seek(tp, offset);
} catch (Exception e) {
LOGGER.error("caught exception seeking consumer to offset {} on topic partition {}", offset, tp, e);
}
});
}
} catch (Exception e) {
LOGGER.error("caught exception on partition assignment {}", partitions, e);
}
}
}
| 4,490 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/TopicPartitionStateManager.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Registry;
import io.netty.util.internal.ConcurrentSet;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong;
public class TopicPartitionStateManager {
private static final Logger LOGGER = LoggerFactory.getLogger(TopicPartitionStateManager.class);
// Default to 20ms delay between retries for checkpoint ready
private final int checkpointReadyCheckDelayMs;
private final Counter waitingForAckCount;
public TopicPartitionStateManager(Registry registry, String kafkaClientId, int checkpointReadyCheckDelayMs) {
this.checkpointReadyCheckDelayMs = checkpointReadyCheckDelayMs;
this.waitingForAckCount = registry.counter("waitingOnAck", "client-id", kafkaClientId);
}
public static final long DEFAULT_LAST_READ_OFFSET = 0;
private class State {
private final AtomicLong lastReadOffset = new AtomicLong(DEFAULT_LAST_READ_OFFSET);
private final ConcurrentSet<Long> unAckedOffsets = new ConcurrentSet<>();
}
private final ConcurrentMap<TopicPartition, State> partitionState = new ConcurrentHashMap<>();
/**
* Track the message with this offset as read from Kafka but waiting on acknowledgement from the processing stage.
*
* @param tp TopicPartition the message was read from
* @param offset kafka offset for the message
*/
public void recordMessageRead(final TopicPartition tp, final long offset) {
// add to set
if (!partitionState.containsKey(tp)) {
partitionState.putIfAbsent(tp, new State());
}
partitionState.get(tp).unAckedOffsets.add(offset);
partitionState.get(tp).lastReadOffset.set(offset);
}
/**
* Records the message identified by this offset has been processed and ack'ed by the processing stage.
*
* @param tp TopicPartition the message was read from
* @param offset kafka offset for the message
*/
public void recordMessageAck(final TopicPartition tp, final long offset) {
// remove from set
if (!partitionState.containsKey(tp)) {
return;
}
partitionState.get(tp).unAckedOffsets.remove(offset);
}
/**
* Get last read offset from this topic partition.
*
* @param tp TopicPartition
*
* @return last offset read from give TopicPartition
*/
public Optional<Long> getLastOffset(final TopicPartition tp) {
if (!partitionState.containsKey(tp)) {
return Optional.empty();
}
return Optional.of(partitionState.get(tp).lastReadOffset.get());
}
private boolean allMessagesAcked(final TopicPartition tp) {
if (!partitionState.containsKey(tp)) {
// no messages, no acks needed
return true;
}
return partitionState.get(tp).unAckedOffsets.size() == 0;
}
public Map<TopicPartition, OffsetAndMetadata> createCheckpoint(final Collection<TopicPartition> partitions) {
if (partitionState.isEmpty()) {
return Collections.emptyMap();
}
final Map<TopicPartition, OffsetAndMetadata> checkpoint = new HashMap<>(partitions.size());
for (TopicPartition tp : partitions) {
while (!allMessagesAcked(tp)) {
try {
waitingForAckCount.increment();
Thread.sleep(checkpointReadyCheckDelayMs);
} catch (InterruptedException e) {
LOGGER.info("thread interrupted when creating checkpoint for {}", tp);
Thread.currentThread().interrupt();
throw new RuntimeException("thread interrupted when creating checkpoint", e);
}
}
final State pState = partitionState.get(tp);
final Optional<Long> lastOffset = Optional.ofNullable(pState != null ? pState.lastReadOffset.get() : null);
if (lastOffset.isPresent() && lastOffset.get() != DEFAULT_LAST_READ_OFFSET) {
checkpoint.put(tp, new OffsetAndMetadata(lastOffset.get() + 1, String.valueOf(System.currentTimeMillis())));
}
}
return checkpoint;
}
/* reset partition counters */
public void resetCounters(final TopicPartition tp) {
if (!partitionState.containsKey(tp)) {
return;
}
partitionState.get(tp).unAckedOffsets.clear();
partitionState.get(tp).lastReadOffset.set(DEFAULT_LAST_READ_OFFSET);
}
/* reset all counters */
public void resetCounters() {
LOGGER.info("resetting all counters");
if (partitionState.isEmpty()) {
return;
}
partitionState.values().stream().forEach(state -> {
state.unAckedOffsets.clear();
state.lastReadOffset.set(DEFAULT_LAST_READ_OFFSET);
});
}
}
| 4,491 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/MantisKafkaConsumer.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.impl.Preconditions;
import io.mantisrx.connector.kafka.source.assignor.StaticPartitionAssignor;
import io.mantisrx.connector.kafka.source.assignor.StaticPartitionAssignorImpl;
import io.mantisrx.connector.kafka.source.metrics.ConsumerMetrics;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyFactory;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
import io.mantisrx.connector.kafka.source.checkpoint.trigger.CheckpointTrigger;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategy;
import io.mantisrx.connector.kafka.source.checkpoint.trigger.CheckpointTriggerFactory;
import io.mantisrx.runtime.Context;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import rx.Observable;
import rx.Subscription;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
public class MantisKafkaConsumer<S> {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisKafkaConsumer.class);
private final int consumerId;
private final KafkaConsumer<String, byte[]> consumer;
private final CheckpointStrategy<S> strategy;
private final CheckpointTrigger trigger;
private final ConsumerMetrics consumerMetrics;
private final TopicPartitionStateManager partitionStateManager;
private final AtomicLong pollTimestamp = new AtomicLong(System.currentTimeMillis());
private final AtomicLong pollReturnedDataTimestamp = new AtomicLong(System.currentTimeMillis());
private volatile Subscription metricSubscription = null;
public MantisKafkaConsumer(final int consumerId,
final KafkaConsumer<String, byte[]> consumer,
final TopicPartitionStateManager partitionStateManager,
final CheckpointStrategy<S> strategy,
final CheckpointTrigger trigger,
final ConsumerMetrics metrics) {
this.consumerId = consumerId;
this.consumerMetrics = metrics;
this.consumer = consumer;
this.partitionStateManager = partitionStateManager;
this.strategy = strategy;
this.trigger = trigger;
setupMetricPublish();
}
private void setupMetricPublish() {
if (metricSubscription == null) {
this.metricSubscription = Observable.interval(1, TimeUnit.SECONDS).subscribe((tick) -> {
consumerMetrics.recordTimeSinceLastPollMs(timeSinceLastPollMs());
consumerMetrics.recordTimeSinceLastPollWithDataMs(timeSinceLastPollWithDataMs());
});
}
}
public int getConsumerId() {
return consumerId;
}
public KafkaConsumer<String, byte[]> getConsumer() {
return consumer;
}
public CheckpointStrategy<S> getStrategy() {
return strategy;
}
public CheckpointTrigger getTrigger() {
return trigger;
}
public TopicPartitionStateManager getPartitionStateManager() {
return partitionStateManager;
}
public long timeSinceLastPollMs() {
return (System.currentTimeMillis() - pollTimestamp.get());
}
public long timeSinceLastPollWithDataMs() {
return (System.currentTimeMillis() - pollReturnedDataTimestamp.get());
}
public ConsumerMetrics getConsumerMetrics() {
return consumerMetrics;
}
public void close() {
if (metricSubscription != null && !metricSubscription.isUnsubscribed()) {
metricSubscription.unsubscribe();
}
if (trigger.isActive()) {
final Set<TopicPartition> partitions = consumer.assignment();
LOGGER.warn("clearing partition state when closing consumer {}, partitions {}", this.toString(), partitions.toString());
partitions.stream().forEach(tp -> partitionStateManager.resetCounters(tp));
consumer.close();
trigger.shutdown();
}
}
/**
* {@link KafkaConsumer#poll(Duration)} ()}
*/
public ConsumerRecords<String, byte[]> poll(final long consumerPollTimeoutMs) {
final long now = System.currentTimeMillis();
pollTimestamp.set(now);
final ConsumerRecords<String, byte[]> consumerRecords = consumer.poll(Duration.ofMillis(consumerPollTimeoutMs));
if (consumerRecords.count() > 0) {
pollReturnedDataTimestamp.set(now);
}
return consumerRecords;
}
/**
* {@link KafkaConsumer#assignment()}
*/
public Set<TopicPartition> assignment() {
return consumer.assignment();
}
/**
* {@link KafkaConsumer#listTopics()}
*/
public Map<String, List<PartitionInfo>> listTopics() {
return consumer.listTopics();
}
/**
* {@link KafkaConsumer#position(TopicPartition)} ()}
*/
public long position(TopicPartition partition) {
return consumer.position(partition);
}
/**
* {@link KafkaConsumer#seekToBeginning(Collection)} ()}
*/
public void seekToBeginning(TopicPartition... partitions) {
consumer.seekToBeginning(Arrays.asList(partitions));
}
/**
* {@link KafkaConsumer#pause(Collection)} ()}
*/
public void pause(TopicPartition... partitions) {
LOGGER.debug("pausing {} partitions", partitions.length);
consumer.pause(Arrays.asList(partitions));
consumerMetrics.incrementPausePartitionCount();
}
/**
* {@link KafkaConsumer#resume(Collection)} ()}
*/
public void resume(TopicPartition... partitions) {
try {
LOGGER.debug("resuming {} partitions", partitions.length);
consumer.resume(Arrays.asList(partitions));
consumerMetrics.incrementResumePartitionCount();
} catch (IllegalStateException e) {
LOGGER.warn("resuming partitions failed", e);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
MantisKafkaConsumer that = (MantisKafkaConsumer) o;
return consumerId == that.consumerId &&
consumer.equals(that.consumer) &&
strategy.equals(that.strategy) &&
trigger.equals(that.trigger) &&
consumerMetrics.equals(that.consumerMetrics) &&
partitionStateManager.equals(that.partitionStateManager);
}
@Override
public int hashCode() {
return Objects.hash(consumerId, consumer, strategy, trigger, consumerMetrics, partitionStateManager);
}
@Override
public String toString() {
return "MantisKafkaConsumer{" +
"consumerId=" + consumerId +
", consumer=" + consumer +
", strategy=" + strategy +
", trigger=" + trigger +
'}';
}
static class Builder {
private Context context;
private int consumerIndex;
private int totalNumConsumersForJob;
private Registry registry;
private MantisKafkaSourceConfig kafkaSourceConfig;
private static final AtomicInteger consumerId = new AtomicInteger(0);
private final StaticPartitionAssignor staticPartitionAssignor = new StaticPartitionAssignorImpl();
public Builder withContext(Context context) {
this.context = context;
return this;
}
public Builder withKafkaSourceConfig(MantisKafkaSourceConfig kafkaSourceConfig) {
this.kafkaSourceConfig = kafkaSourceConfig;
return this;
}
public Builder withConsumerIndex(int consumerIndex) {
this.consumerIndex = consumerIndex;
return this;
}
public Builder withTotalNumConsumersForJob(int totalNumConsumersForJob) {
this.totalNumConsumersForJob = totalNumConsumersForJob;
return this;
}
public Builder withRegistry(Registry registry) {
this.registry = registry;
return this;
}
private void doStaticPartitionAssignment(final KafkaConsumer<String, byte[]> consumer,
final ConsumerRebalanceListener rebalanceListener,
final int consumerIndex,
final int totalNumConsumers,
final Map<String, Integer> topicPartitionCounts,
final Registry registry) {
if (totalNumConsumers <= 0) {
LOGGER.error("total num consumers {} is invalid", totalNumConsumers);
context.completeAndExit();
return;
}
if (consumerIndex < 0 || consumerIndex >= totalNumConsumers) {
LOGGER.error("consumerIndex {} is invalid (numConsumers: {})", consumerIndex, totalNumConsumers);
context.completeAndExit();
return;
}
final List<TopicPartition> topicPartitions = staticPartitionAssignor.assignPartitionsToConsumer(consumerIndex, topicPartitionCounts, totalNumConsumers);
if (topicPartitions.isEmpty()) {
LOGGER.error("topic partitions to assign list is empty");
throw new RuntimeException("static partition assignment is enabled and no topic partitions were assigned, please check numPartitionsPerTopic job param is set correctly and the job has num(kafka consumer) <= num(partition)");
} else {
LOGGER.info("Statically assigned topic partitions(): {}", topicPartitions);
topicPartitions.forEach(tp ->
registry.gauge("staticPartitionAssigned",
"topic", tp.topic(), "partition", String.valueOf(tp.partition())).set(1.0));
consumer.assign(topicPartitions);
// reuse onPartitionsAssigned() so the consumer can seek to checkpoint'ed offset from offset store
rebalanceListener.onPartitionsAssigned(topicPartitions);
}
}
public MantisKafkaConsumer<?> build() {
Preconditions.checkNotNull(context, "context");
Preconditions.checkNotNull(kafkaSourceConfig, "kafkaSourceConfig");
Preconditions.checkNotNull(registry, "registry");
Preconditions.checkArg(consumerIndex >= 0, "consumerIndex must be greater than or equal to 0");
Preconditions.checkArg(totalNumConsumersForJob > 0, "total number of consumers for job must be greater than 0");
final int kafkaConsumerId = consumerId.incrementAndGet();
Map<String, Object> consumerProps = kafkaSourceConfig.getConsumerConfig().getConsumerProperties();
final String clientId = String.format("%s-%d-%d", context.getJobId(), context.getWorkerInfo().getWorkerNumber(), kafkaConsumerId);
consumerProps.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId);
// hard-coding key to String type and value to byte[]
final KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(consumerProps);
final TopicPartitionStateManager partitionStateManager = new TopicPartitionStateManager(registry, clientId, kafkaSourceConfig.getRetryCheckpointCheckDelayMs());
final ConsumerMetrics metrics = new ConsumerMetrics(registry, kafkaConsumerId, context);
final CheckpointStrategy<?> strategy = CheckpointStrategyFactory.getNewInstance(context, consumer, kafkaSourceConfig.getCheckpointStrategy(), metrics);
if (kafkaSourceConfig.getStaticPartitionAssignmentEnabled()) {
final KafkaConsumerRebalanceListener kafkaConsumerRebalanceListener = new KafkaConsumerRebalanceListener(consumer, partitionStateManager, strategy);
kafkaSourceConfig.getTopicPartitionCounts().ifPresent(topicPartitionCounts -> {
doStaticPartitionAssignment(consumer, kafkaConsumerRebalanceListener, consumerIndex, totalNumConsumersForJob, topicPartitionCounts, registry);
});
} else {
if (kafkaSourceConfig.getCheckpointStrategy() != CheckpointStrategyOptions.NONE) {
consumer.subscribe(kafkaSourceConfig.getTopics(),
new KafkaConsumerRebalanceListener(consumer, partitionStateManager, strategy));
} else {
consumer.subscribe(kafkaSourceConfig.getTopics());
}
}
final CheckpointTrigger trigger = CheckpointTriggerFactory.getNewInstance(kafkaSourceConfig);
return new MantisKafkaConsumer<>(kafkaConsumerId, consumer, partitionStateManager, strategy, trigger, metrics);
}
}
}
| 4,492 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/MantisKafkaSourceConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import com.google.common.base.Splitter;
import io.mantisrx.connector.kafka.source.serde.ParserType;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.Parameters;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MantisKafkaSourceConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisKafkaSourceConfig.class);
public static final int DEFAULT_CONSUMER_POLL_TIMEOUT_MS = 100;
public static final int DEFAULT_RETRY_CHECKPOINT_CHECK_DELAY_MS = 20;
public static final boolean DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN = false;
public static final int CONSUMER_RECORD_OVERHEAD_BYTES = 100;
public static final int DEFAULT_MAX_BYTES_IN_PROCESSING = 128_000_000;
public static final int DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER = 1;
public static final boolean DEFAULT_PARSE_MSG_IN_SOURCE = true;
private final List<String> topics;
private final int numConsumerInstances;
private final int consumerPollTimeoutMs;
private final int maxBytesInProcessing;
private final String messageParserType;
private final String checkpointStrategy;
private final Boolean parseMessageInSource;
private final int retryCheckpointCheckDelayMs;
private final int checkpointIntervalMs;
private final Boolean staticPartitionAssignmentEnabled;
private final Optional<Map<String, Integer>> topicPartitionCounts;
private final MantisKafkaConsumerConfig consumerConfig;
public MantisKafkaSourceConfig(Context context) {
final Parameters parameters = context.getParameters();
final String topicStr = (String) parameters.get(KafkaSourceParameters.TOPIC);
this.topics = Splitter.on(',').trimResults().omitEmptyStrings().splitToList(topicStr);
this.numConsumerInstances = (int) parameters.get(KafkaSourceParameters.NUM_KAFKA_CONSUMER_PER_WORKER, DEFAULT_NUM_KAFKA_CONSUMER_PER_WORKER);
this.consumerPollTimeoutMs = (int) parameters.get(KafkaSourceParameters.CONSUMER_POLL_TIMEOUT_MS, DEFAULT_CONSUMER_POLL_TIMEOUT_MS);
this.maxBytesInProcessing = (int) parameters.get(KafkaSourceParameters.MAX_BYTES_IN_PROCESSING, DEFAULT_MAX_BYTES_IN_PROCESSING);
this.messageParserType = (String) parameters.get(KafkaSourceParameters.PARSER_TYPE, ParserType.SIMPLE_JSON.getPropName());
this.checkpointStrategy = (String) parameters.get(KafkaSourceParameters.CHECKPOINT_STRATEGY, CheckpointStrategyOptions.NONE);
this.parseMessageInSource = (boolean) parameters.get(KafkaSourceParameters.PARSE_MSG_IN_SOURCE, DEFAULT_PARSE_MSG_IN_SOURCE);
this.retryCheckpointCheckDelayMs = (int) parameters.get(KafkaSourceParameters.RETRY_CHECKPOINT_CHECK_DELAY_MS, DEFAULT_RETRY_CHECKPOINT_CHECK_DELAY_MS);
this.checkpointIntervalMs = (int) parameters.get(KafkaSourceParameters.CHECKPOINT_INTERVAL_MS, MantisKafkaConsumerConfig.DEFAULT_CHECKPOINT_INTERVAL_MS);
this.staticPartitionAssignmentEnabled = (boolean) parameters.get(KafkaSourceParameters.ENABLE_STATIC_PARTITION_ASSIGN, DEFAULT_ENABLE_STATIC_PARTITION_ASSIGN);
if (staticPartitionAssignmentEnabled) {
final String topicPartitionsStr = (String) parameters.get(KafkaSourceParameters.TOPIC_PARTITION_COUNTS, "");
this.topicPartitionCounts = Optional.ofNullable(getTopicPartitionCounts(topicPartitionsStr, topics));
} else {
this.topicPartitionCounts = Optional.empty();
}
consumerConfig = new MantisKafkaConsumerConfig(context);
LOGGER.info("checkpointStrategy: {} numConsumerInstances: {} topics: {} consumerPollTimeoutMs: {} retryCheckpointCheckDelayMs {} consumer config: {}",
checkpointStrategy, numConsumerInstances, topics, consumerPollTimeoutMs, retryCheckpointCheckDelayMs, consumerConfig.values().toString());
}
private Map<String, Integer> getTopicPartitionCounts(String topicPartitionsStr, List<String> topicList) {
final List<String> topicPartitionCountList = Splitter.on(',').trimResults().omitEmptyStrings().splitToList(topicPartitionsStr);
final Map<String, Integer> topicPartitionCounts = new HashMap<>();
// parse topic partition counts only if Static partition assignment is enabled
for (String tp : topicPartitionCountList) {
final String[] topicPartitionCount = tp.split(":");
if (topicPartitionCount.length == 2) {
final String topic = topicPartitionCount[0];
if (topicList.contains(topic)) {
topicPartitionCounts.put(topic, Integer.parseInt(topicPartitionCount[1]));
} else {
final String errorMsg = String.format("topic %s specified in Job Parameter '%s' does not match topics specified for Job Parameter '%s'",
topic, KafkaSourceParameters.TOPIC_PARTITION_COUNTS, KafkaSourceParameters.TOPIC);
LOGGER.error(errorMsg);
throw new RuntimeException(errorMsg);
}
} else {
final String errorMsg = String.format("failed to parse topic partition count string %s", tp);
LOGGER.error(errorMsg);
throw new RuntimeException(errorMsg);
}
}
// validate all topics have partition counts specified
final Set<String> partitionCountTopics = topicPartitionCounts.keySet();
if (!partitionCountTopics.containsAll(topicList) ||
!topicList.containsAll(partitionCountTopics)) {
final String errorMsg = String.format("topics '%s' specified for Job Parameter '%s' don't match topics '%s' specified for Job Parameter '%s'",
partitionCountTopics, KafkaSourceParameters.TOPIC_PARTITION_COUNTS, topicList, KafkaSourceParameters.TOPIC);
LOGGER.error(errorMsg);
throw new RuntimeException(errorMsg);
}
LOGGER.info("enableStaticPartitionAssignment: {} [ topic partition counts: {} ]", staticPartitionAssignmentEnabled, topicPartitionCounts);
return topicPartitionCounts;
}
public List<String> getTopics() {
return topics;
}
public int getNumConsumerInstances() {
return numConsumerInstances;
}
public int getConsumerPollTimeoutMs() {
return consumerPollTimeoutMs;
}
public int getMaxBytesInProcessing() {
return maxBytesInProcessing;
}
public String getMessageParserType() {
return messageParserType;
}
public String getCheckpointStrategy() {
return checkpointStrategy;
}
public Boolean getParseMessageInSource() {
return parseMessageInSource;
}
public int getRetryCheckpointCheckDelayMs() {
return retryCheckpointCheckDelayMs;
}
public int getCheckpointIntervalMs() {
return checkpointIntervalMs;
}
public Boolean getStaticPartitionAssignmentEnabled() {
return staticPartitionAssignmentEnabled;
}
public Optional<Map<String, Integer>> getTopicPartitionCounts() {
return topicPartitionCounts;
}
public MantisKafkaConsumerConfig getConsumerConfig() {
return consumerConfig;
}
}
| 4,493 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/MantisKafkaConsumerConfig.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import io.mantisrx.connector.kafka.KafkaSourceParameters;
import io.mantisrx.connector.kafka.sink.KafkaSinkJobParameters;
import io.mantisrx.runtime.Context;
import io.mantisrx.runtime.parameter.ParameterDefinition;
import io.mantisrx.runtime.parameter.Parameters;
import io.mantisrx.runtime.parameter.type.StringParameter;
import io.mantisrx.runtime.parameter.validator.Validators;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.RangeAssignor;
import org.apache.kafka.common.metrics.JmxReporter;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utility Class for handling Kafka ConsumerConfig defaults and Job parameter overrides
*/
public class MantisKafkaConsumerConfig extends ConsumerConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(MantisKafkaConsumerConfig.class);
public MantisKafkaConsumerConfig(Map<String, Object> props,
Context context) {
super(applyJobParamOverrides(context, props));
}
public MantisKafkaConsumerConfig(Context context) {
this(defaultProps(), context);
}
public static final String DEFAULT_AUTO_OFFSET_RESET = "latest";
public static final String DEFAULT_AUTO_COMMIT_ENABLED = "false";
public static final String DEFAULT_BOOTSTRAP_SERVERS_CONFIG = "localhost:9092";
public static final int DEFAULT_AUTO_COMMIT_INTERVAL_MS = 5000;
public static final int DEFAULT_HEARTBEAT_INTERVAL_MS = 3000;
public static final int DEFAULT_SESSION_TIMEOUT_MS = 10_000;
public static final int DEFAULT_FETCH_MIN_BYTES = 1024;
public static final int DEFAULT_FETCH_MAX_WAIT_MS = 100;
public static final int DEFAULT_REQUEST_TIMEOUT_MS = 40000;
public static final int DEFAULT_CHECKPOINT_INTERVAL_MS = 5_000;
public static final int DEFAULT_MAX_POLL_INTERVAL_MS = 300_000;
public static final int DEFAULT_MAX_POLL_RECORDS = 500;
public static final int DEFAULT_MAX_PARTITION_FETCH_BYTES = 10_000_000;
public static final int DEFAULT_RECEIVE_BUFFER_BYTES = 32768;
public static final int DEFAULT_SEND_BUFFER_BYTES = 131072;
public static final Class<StringDeserializer> DEFAULT_KEY_DESERIALIZER = StringDeserializer.class;
public static final Class<ByteArrayDeserializer> DEFAULT_VALUE_DESERIALIZER = ByteArrayDeserializer.class;
public static Map<String, Object> defaultProps() {
final Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, DEFAULT_AUTO_COMMIT_ENABLED);
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_AUTO_COMMIT_INTERVAL_MS));
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, DEFAULT_AUTO_OFFSET_RESET);
props.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, String.valueOf(DEFAULT_FETCH_MAX_WAIT_MS));
props.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, String.valueOf(DEFAULT_FETCH_MIN_BYTES));
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_HEARTBEAT_INTERVAL_MS));
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, String.valueOf(DEFAULT_SESSION_TIMEOUT_MS));
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, DEFAULT_KEY_DESERIALIZER);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, DEFAULT_VALUE_DESERIALIZER);
props.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, String.valueOf(DEFAULT_MAX_PARTITION_FETCH_BYTES));
props.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, String.valueOf(DEFAULT_RECEIVE_BUFFER_BYTES));
props.put(ConsumerConfig.SEND_BUFFER_CONFIG, String.valueOf(DEFAULT_SEND_BUFFER_BYTES));
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, DEFAULT_BOOTSTRAP_SERVERS_CONFIG);
props.put(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, JmxReporter.class.getName());
props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(DEFAULT_REQUEST_TIMEOUT_MS));
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, String.valueOf(DEFAULT_MAX_POLL_RECORDS));
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, String.valueOf(DEFAULT_MAX_POLL_INTERVAL_MS));
props.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, RangeAssignor.class.getName());
return props;
}
/**
* Get kafka consumer group ID to use by default.
* @return default group ID to use for kafka consumer based on Mantis Job Id when running in cloud, else default to local consumer id
*/
@VisibleForTesting
static String getGroupId() {
String jobId = System.getenv("JOB_ID");
if (jobId != null && !jobId.isEmpty()) {
LOGGER.info("default consumer groupId to {} if not overridden by job param", "mantis-kafka-source-" + jobId);
return "mantis-kafka-source-" + jobId;
}
return "mantis-kafka-source-fallback-consumer-id";
}
private static Map<String, Object> applyJobParamOverrides(Context context, Map<String, Object> parsedValues) {
final Parameters parameters = context.getParameters();
if (!parsedValues.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
// set consumerGroupId if not already set
final String consumerGroupId = (String) parameters.get(KafkaSourceParameters.PREFIX + ConsumerConfig.GROUP_ID_CONFIG, getGroupId());
parsedValues.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
}
for (String key : configNames()) {
Object value = parameters.get(KafkaSourceParameters.PREFIX + key, null);
if (value != null) {
LOGGER.info("job param override for key {} -> {}", key, value);
parsedValues.put(key, value);
}
}
return parsedValues;
}
/**
* Helper class to get all Kafka Consumer configs as Job Parameters to allow overriding Kafka consumer config settings at Job submit time.
*
* @return
*/
public static List<ParameterDefinition<?>> getJobParameterDefinitions() {
List<ParameterDefinition<?>> params = new ArrayList<>();
Map<String, Object> defaultProps = defaultProps();
for (String key : configNames()) {
ParameterDefinition.Builder<String> builder = new StringParameter()
.name(KafkaSourceParameters.PREFIX + key)
.validator(Validators.alwaysPass())
.description(KafkaSourceParameters.PREFIX + key);
if (defaultProps.containsKey(key)) {
Object value = defaultProps.get(key);
if (value instanceof Class) {
builder = builder.defaultValue(((Class) value).getCanonicalName());
} else {
builder = builder.defaultValue((String) value);
}
}
params.add(builder.build());
}
return params;
}
public String getConsumerConfigStr() {
return values().toString();
}
public Map<String, Object> getConsumerProperties() {
return values().entrySet().stream()
.filter(x -> x.getKey() != null && x.getValue() != null)
.collect(Collectors.toMap(x -> x.getKey(),
x -> (Object) x.getValue()));
}
}
| 4,494 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/metrics/ConsumerMetrics.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.metrics;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import com.netflix.spectator.api.Counter;
import com.netflix.spectator.api.Gauge;
import com.netflix.spectator.api.Id;
import com.netflix.spectator.api.Registry;
import com.netflix.spectator.api.Tag;
import io.mantisrx.runtime.Context;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
public class ConsumerMetrics {
private static final String METRICS_PREFIX = "MantisKafkaConsumer_";
private static final String METRIC_KAFKA_IN_COUNT = "kafkaInCount";
private static final String METRIC_KAFKA_PROCESSED_COUNT = "kafkaProcessedCount";
private static final String METRIC_KAFKA_ERROR_COUNT = "kafkaErrorCount";
private static final String METRIC_KAFKA_WAIT_FOR_DATA_COUNT = "kafkaWaitForDataCount";
private static final String METRIC_KAFKA_COMMIT_COUNT = "kafkaCommitCount";
private static final String METRIC_CHECKPOINT_DELAY = "checkpointDelay";
private static final String METRIC_PARSE_FAILURE_COUNT = "parseFailureCount";
private static final String METRIC_KAFKA_MSG_VALUE_NULL_COUNT = "kafkaMessageValueNull";
private static final String METRIC_TIME_SINCE_LAST_POLL_MS = "timeSinceLastPollMs";
private static final String METRIC_TIME_SINCE_LAST_POLL_WITH_DATA_MS = "timeSinceLastPollWithDataMs";
private static final String METRIC_KAFKA_PAUSE_PARTITIONS = "kafkaPausePartitions";
private static final String METRIC_KAFKA_RESUME_PARTITIONS = "kafkaResumePartitions";
private final Registry registry;
private final List<Tag> commonTags;
private final Counter kafkaInCount;
private final Counter kafkaProcessedCount;
private final Counter kafkaErrorCount;
private final Counter kafkaWaitForDataCount;
private final Counter kafkaCommitCount;
private final Counter parseFailureCount;
private final Counter kafkaPausePartitions;
private final Counter kafkaResumePartitions;
private final Counter kafkaMsgValueNullCount;
private final Gauge checkpointDelay;
private final Gauge timeSinceLastPollMs;
private final Gauge timeSinceLastPollWithDataMs;
private final ConcurrentMap<TopicPartition, Gauge> committedOffsets = new ConcurrentHashMap<>();
private final ConcurrentMap<TopicPartition, Gauge> readOffsets = new ConcurrentHashMap<>();
public ConsumerMetrics(final Registry registry, final int consumerId, final Context context) {
this.registry = registry;
this.commonTags = createCommonTags(context, consumerId);
this.kafkaErrorCount = registry.counter(createId(METRIC_KAFKA_ERROR_COUNT));
this.kafkaInCount = registry.counter(createId(METRIC_KAFKA_IN_COUNT));
this.kafkaProcessedCount = registry.counter(createId(METRIC_KAFKA_PROCESSED_COUNT));
this.kafkaWaitForDataCount = registry.counter(createId(METRIC_KAFKA_WAIT_FOR_DATA_COUNT));
this.kafkaCommitCount = registry.counter(createId(METRIC_KAFKA_COMMIT_COUNT));
this.checkpointDelay = registry.gauge(createId(METRIC_CHECKPOINT_DELAY));
this.timeSinceLastPollMs = registry.gauge(createId(METRIC_TIME_SINCE_LAST_POLL_MS));
this.timeSinceLastPollWithDataMs = registry.gauge(createId(METRIC_TIME_SINCE_LAST_POLL_WITH_DATA_MS));
this.parseFailureCount = registry.counter(createId(METRIC_PARSE_FAILURE_COUNT));
this.kafkaPausePartitions = registry.counter(createId(METRIC_KAFKA_PAUSE_PARTITIONS));
this.kafkaResumePartitions = registry.counter(createId(METRIC_KAFKA_RESUME_PARTITIONS));
this.kafkaMsgValueNullCount = registry.counter(createId(METRIC_KAFKA_MSG_VALUE_NULL_COUNT));
}
private List<Tag> createCommonTags(final Context context, final int consumerId) {
return Arrays.asList(Tag.of("mantisWorkerNum", Integer.toString(context.getWorkerInfo().getWorkerNumber())),
Tag.of("mantisWorkerIndex", Integer.toString(context.getWorkerInfo().getWorkerIndex())),
Tag.of("mantisJobName", context.getWorkerInfo().getJobClusterName()),
Tag.of("mantisJobId", context.getJobId()),
Tag.of("consumerId", String.valueOf(consumerId)));
}
private Id createId(final String metricName) {
return registry.createId(METRICS_PREFIX + metricName, commonTags);
}
public void recordCheckpointDelay(final long value) {
checkpointDelay.set(value);
}
public void recordTimeSinceLastPollMs(long value) {
timeSinceLastPollMs.set(value);
}
public void recordTimeSinceLastPollWithDataMs(long value) {
timeSinceLastPollWithDataMs.set(value);
}
public void incrementInCount() {
kafkaInCount.increment();
}
public void incrementProcessedCount() {
kafkaProcessedCount.increment();
}
public void incrementErrorCount() {
kafkaErrorCount.increment();
}
public void incrementWaitForDataCount() {
kafkaWaitForDataCount.increment();
}
public void incrementCommitCount() {
kafkaCommitCount.increment();
}
public void incrementParseFailureCount() {
parseFailureCount.increment();
}
public void incrementPausePartitionCount() {
kafkaPausePartitions.increment();
}
public void incrementResumePartitionCount() {
kafkaResumePartitions.increment();
}
public void incrementKafkaMessageValueNullCount() {
kafkaMsgValueNullCount.increment();
}
public void recordCommittedOffset(final Map<TopicPartition, OffsetAndMetadata> checkpoint) {
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : checkpoint.entrySet()) {
final TopicPartition tp = entry.getKey();
if (!committedOffsets.containsKey(tp)) {
ArrayList<Tag> tags = new ArrayList<>(commonTags);
tags.add(Tag.of("topic", tp.topic()));
tags.add(Tag.of("partition", String.valueOf(tp.partition())));
Gauge gauge = registry.gauge("committedOffsets", tags);
committedOffsets.putIfAbsent(tp, gauge);
}
committedOffsets.get(tp).set(entry.getValue().offset());
}
}
public void recordReadOffset(final TopicPartition tp, final long offset) {
if (!readOffsets.containsKey(tp)) {
ArrayList<Tag> tags = new ArrayList<>(commonTags);
tags.add(Tag.of("topic", tp.topic()));
tags.add(Tag.of("partition", String.valueOf(tp.partition())));
Gauge gauge = registry.gauge("minReadOffsets", tags);
readOffsets.putIfAbsent(tp, gauge);
}
readOffsets.get(tp).set(offset);
}
}
| 4,495 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/trigger/CheckpointTrigger.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.trigger;
public interface CheckpointTrigger {
/**
* true indicates checkpoint now, else don't.
*
* @return
*/
boolean shouldCheckpoint();
/**
* update internal state based on provided count (typically current message size).
*
* @param count
*/
void update(int count);
/**
* hook to reset all internal state after a checkpoint is persisted.
*/
void reset();
/**
* true indicates the trigger is in active and valid state.
*/
boolean isActive();
/**
* cleanup resources.
*/
void shutdown();
}
| 4,496 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/trigger/CheckpointingDisabledTrigger.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.trigger;
import java.util.concurrent.atomic.AtomicBoolean;
public class CheckpointingDisabledTrigger implements CheckpointTrigger {
private final AtomicBoolean isActive;
public CheckpointingDisabledTrigger() {
this.isActive = new AtomicBoolean(true);
}
@Override
public boolean shouldCheckpoint() {
return false;
}
@Override
public void update(final int count) {
// do nothing
}
@Override
public void reset() {
// do nothing
}
@Override
public boolean isActive() {
return isActive.get();
}
@Override
public void shutdown() {
if (isActive()) {
isActive.compareAndSet(true, false);
}
}
}
| 4,497 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/trigger/CheckpointTriggerFactory.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.trigger;
import io.mantisrx.connector.kafka.source.MantisKafkaSourceConfig;
import io.mantisrx.connector.kafka.source.checkpoint.strategy.CheckpointStrategyOptions;
public final class CheckpointTriggerFactory {
private CheckpointTriggerFactory() { }
/**
* Factory method to create instance of {@link CheckpointTrigger}
* @param kafkaSourceConfig mantis kafka source configuration
* @return {@link CheckpointTrigger} instance based on config
*/
public static CheckpointTrigger getNewInstance(final MantisKafkaSourceConfig kafkaSourceConfig) {
switch (kafkaSourceConfig.getCheckpointStrategy()) {
case CheckpointStrategyOptions.OFFSETS_ONLY_DEFAULT:
case CheckpointStrategyOptions.FILE_BASED_OFFSET_CHECKPOINTING:
return new CountingCheckpointTrigger(kafkaSourceConfig.getMaxBytesInProcessing(), kafkaSourceConfig.getCheckpointIntervalMs());
case CheckpointStrategyOptions.NONE:
default:
return new CheckpointingDisabledTrigger();
}
}
}
| 4,498 |
0 | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint | Create_ds/mantis-connectors/mantis-connector-kafka/src/main/java/io/mantisrx/connector/kafka/source/checkpoint/trigger/CountingCheckpointTrigger.java | /*
* Copyright 2019 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.mantisrx.connector.kafka.source.checkpoint.trigger;
import rx.Observable;
import rx.Subscription;
import rx.functions.Action1;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
/**
* Time and count based CheckpointTrigger that triggers a checkpoint either if accumulated count exceeds threshold or time
* sine last checkpoint exceeds the configured checkpoint trigger interval.
*/
public class CountingCheckpointTrigger implements CheckpointTrigger {
private final int threshold;
private final AtomicInteger counter;
private final AtomicBoolean checkpoint = new AtomicBoolean(false);
private final AtomicBoolean isActive;
private final Subscription checkpointOffsetsTimer;
public CountingCheckpointTrigger(final int threshold, final int triggerIntervalMs) {
this.threshold = threshold;
this.counter = new AtomicInteger(0);
this.isActive = new AtomicBoolean(true);
checkpointOffsetsTimer = Observable.interval(triggerIntervalMs, TimeUnit.MILLISECONDS).subscribe(new Action1<Long>() {
@Override
public void call(Long aLong) {
checkpoint.set(true);
}
});
}
@Override
public boolean shouldCheckpoint() {
return (counter.get() > threshold) || checkpoint.get();
}
@Override
public void update(final int count) {
counter.addAndGet(count);
}
@Override
public void reset() {
counter.set(0);
checkpoint.set(false);
}
@Override
public boolean isActive() {
return isActive.get();
}
@Override
public void shutdown() {
if (isActive()) {
checkpointOffsetsTimer.unsubscribe();
reset();
isActive.compareAndSet(true, false);
}
}
}
| 4,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.