index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/config/EncryptConfig.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.config;
import com.amazonaws.c3r.Transformer;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.utils.FileUtil;
import lombok.Builder;
import lombok.Getter;
import lombok.NonNull;
import javax.crypto.SecretKey;
import java.util.Map;
/**
* Information needed when encrypting a data file.
*/
@Getter
public final class EncryptConfig extends SimpleFileConfig {
/**
* Directory to write temporary files in if two passes are needed to encrypt the data.
*/
private final String tempDir;
/**
* Clean room cryptographic settings.
*/
private final ClientSettings settings;
/**
* How the data in the input file maps to data in the output file.
*/
private final TableSchema tableSchema;
/**
* Cryptographic transforms available.
*
* <p>
* This method will be deprecated in the next major release. See its replacement at
* {@link Transformer#initTransformers(SecretKey, String, ClientSettings, boolean)}
*/
@Deprecated
private final Map<ColumnType, Transformer> transformers;
/**
* Set up configuration that will be used for encrypting data.
*
* @param secretKey Clean room key used to generate sub-keys for HMAC and encryption
* @param sourceFile Location of input data
* @param fileFormat Format of input data
* @param targetFile Where output should be saved
* @param tempDir Where to write temporary files if needed
* @param overwrite Whether to overwrite the target file if it exists already
* @param csvInputNullValue What value should be interpreted as {@code null} for CSV files
* @param csvOutputNullValue What value should be saved in output to represent {@code null} values for CSV
* @param salt Salt that can be publicly known but adds to randomness of cryptographic operations
* @param settings Clean room cryptographic settings
* @param tableSchema How data in the input file maps to data in the output file
*/
@Builder
private EncryptConfig(@NonNull final SecretKey secretKey,
@NonNull final String sourceFile,
final FileFormat fileFormat,
final String targetFile,
@NonNull final String tempDir,
final boolean overwrite,
final String csvInputNullValue,
final String csvOutputNullValue,
@NonNull final String salt,
@NonNull final ClientSettings settings,
@NonNull final TableSchema tableSchema) {
super(secretKey, sourceFile, fileFormat, targetFile, overwrite, csvInputNullValue, csvOutputNullValue, salt);
this.tempDir = tempDir;
this.settings = settings;
this.tableSchema = tableSchema;
this.transformers = Transformer.initTransformers(secretKey, salt, settings, false);
validate();
FileUtil.initFileIfNotExists(getTargetFile());
}
/**
* Verifies that settings are consistent.
* - Make sure the program can write to the temporary file directory
* - If the clean room doesn't allow cleartext columns, verify none are in the schema
*
* @throws C3rIllegalArgumentException If any of the rules are violated
*/
private void validate() {
FileUtil.verifyWritableDirectory(tempDir);
TableSchema.validateSchemaAgainstClientSettings(tableSchema, settings);
}
}
| 2,600 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/config/TableSchema.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.config;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.internal.Limits;
import com.amazonaws.c3r.internal.Validatable;
import lombok.EqualsAndHashCode;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
/**
* Description of how columns of data in a CSV cleartext file map to the values in a CSV ciphertext file.
*/
@EqualsAndHashCode
public abstract class TableSchema implements Validatable {
/**
* Whether the data source has header values specified.
*
* <p>
* Of note, this does need to be a {@code Boolean} and not a {@code boolean}. Since the latter has a default value of false,
* it causes different error messages to be returned between {@code PositionalTableSchema} and {@code MappedTableSchema} when
* the object isn't initialized properly from a JSON file. Different exception types are thrown from different points in the
* code with {@code boolean} is used so {@code Boolean} provides a better user experience.
*/
private Boolean headerRow;
/**
* Specifications for output columns.
*
* @return Descriptions for how each output column should be created
*/
public abstract List<ColumnSchema> getColumns();
/**
* If an input file does not contain column headers, this function will return position-based column headers that
* can be used in their place.
*
* @return Positional names to use for columns in an input file if applicable, else {@code null}
*/
public abstract List<ColumnHeader> getPositionalColumnHeaders();
/**
* Determines if there's a need to run through the source file in order to ensure configuration constraints.
* <p>
* allowDuplicates set to false would require knowing if any data appears more than once to ensure the
* restriction is met.
* </p>
*
* @return {@code true} If there are any settings that require preprocessing
*/
public boolean requiresPreprocessing() {
return getColumns().stream().anyMatch(ColumnSchema::requiresPreprocessing);
}
/**
* Check schema for valid configuration state.
* <ul>
* <li>There must be at least one column</li>
* <li>There can't be more than the number of allowed columns in the output</li>
* <li>Each target header name can only be used once</li>
* </ul>
*
* @throws C3rIllegalArgumentException If one of the rules is violated
*/
@Override
public void validate() {
// Make sure we actually have a schema
if (headerRow == null && getColumns() == null) {
throw new C3rIllegalArgumentException("Schema was not initialized.");
}
// Check that headerRow is valid
if (headerRow == null) {
throw new C3rIllegalArgumentException("Schema must specify whether or not data has a header row.");
}
// Validate column information now that schema is complete
final var columns = getColumns();
if (columns == null || columns.isEmpty()) {
throw new C3rIllegalArgumentException("At least one data column must provided in the config file.");
}
if (columns.size() > Limits.ENCRYPTED_OUTPUT_COLUMN_COUNT_MAX) {
throw new C3rIllegalArgumentException(
"An encrypted table can have at most "
+ Limits.ENCRYPTED_OUTPUT_COLUMN_COUNT_MAX
+ " columns "
+ " but this schema specifies "
+ getColumns().size()
+ ".");
}
// Verify we have no duplicate target column headers
// NOTE: target column headers must have already been normalized when checking for duplicates here
// to ensure we don't get different column headers than end up being the same post-normalization.
final Set<ColumnHeader> duplicateTargets = getColumns().stream()
.collect(Collectors.groupingBy(ColumnSchema::getTargetHeader)).entrySet()
.stream().filter(e -> e.getValue().size() > 1)
.map(Map.Entry::getKey)
.collect(Collectors.toSet());
if (!duplicateTargets.isEmpty()) {
final String duplicates = duplicateTargets.stream().map(ColumnHeader::toString)
.collect(Collectors.joining(", "));
throw new C3rIllegalArgumentException("Target header name can only be used once. Duplicates found: " + duplicates);
}
}
/**
* The set of all column headers named in the schema (i.e., source and target).
* If a source column name is used more than once or is reused as a target it will only be here once by definition of a set.
*
* @return Set of column names used in this schema
*/
public Set<ColumnHeader> getSourceAndTargetHeaders() {
return getColumns().stream()
.flatMap(c -> Stream.of(c.getSourceHeader(), c.getTargetHeader()))
.collect(Collectors.toSet());
}
/**
* Set whether the table schema has a header row.
*
* @param hasHeaderRow {@code true} if the data has a header row
*/
protected void setHeaderRowFlag(final boolean hasHeaderRow) {
headerRow = hasHeaderRow;
}
/**
* Get whether the table schema has a header row.
*
* @return {@code true} if the data has a header row
*/
public Boolean getHeaderRowFlag() {
return headerRow;
}
/**
* Verifies that settings are consistent.
* - If the clean room doesn't allow cleartext columns, verify none are in the schema
*
* @param schema The TableSchema to validate
* @param settings The ClientSettings to validate the TableSchema against
* @throws C3rIllegalArgumentException If any of the rules are violated
*/
public static void validateSchemaAgainstClientSettings(final TableSchema schema, final ClientSettings settings) {
if (!settings.isAllowCleartext()) {
final Map<ColumnType, List<ColumnSchema>> typeMap = schema.getColumns().stream()
.collect(Collectors.groupingBy(ColumnSchema::getType));
if (typeMap.containsKey(ColumnType.CLEARTEXT)) {
final String targetColumns = typeMap.get(ColumnType.CLEARTEXT).stream()
.map(column -> column.getTargetHeader().toString())
.collect(Collectors.joining("`, `"));
throw new C3rIllegalArgumentException(
"Cleartext columns found in the schema, but allowCleartext is false. Target " +
"column names: [`" + targetColumns + "`]");
}
}
}
}
| 2,601 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/config/ColumnHeader.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.config;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.internal.Limits;
import com.amazonaws.c3r.internal.Validatable;
import lombok.EqualsAndHashCode;
import lombok.NonNull;
import java.io.Serializable;
/**
* A column name (column header) that is normalized and validated by default.
*/
@EqualsAndHashCode
public class ColumnHeader implements Validatable, Serializable {
/**
* Default suffix for unspecified sealed target column names.
*/
public static final String DEFAULT_SEALED_SUFFIX = "_sealed";
/**
* Default suffix for unspecified fingerprint column names.
*/
public static final String DEFAULT_FINGERPRINT_SUFFIX = "_fingerprint";
/**
* Whether {@link #header} was normalized.
*/
private final boolean normalized;
/**
* The name of the Column.
*/
private final String header;
/**
* Create a column header from the given name, normalizing it if necessary.
*
* @param header The name to use (possibly trimmed, made all lowercase)
*/
public ColumnHeader(final String header) {
this(header, true);
}
/**
* Construct a header, optionally normalizing it.
*
* @param header Header content
* @param normalizeHeader Whether to normalize the header
*/
private ColumnHeader(final String header, final boolean normalizeHeader) {
this.normalized = normalizeHeader;
this.header = normalizeHeader ? normalize(header) : header;
validate();
}
/**
* Creates a default target column header based off of source column header name and cryptographic primitive.
*
* @param sourceHeader Name of the source column
* @param type Type of cryptographic transform being applied
* @return Default name for output column
*/
private static ColumnHeader addDefaultColumnTypeSuffix(@NonNull final ColumnHeader sourceHeader, @NonNull final ColumnType type) {
switch (type) {
case SEALED:
return new ColumnHeader(sourceHeader + DEFAULT_SEALED_SUFFIX);
case FINGERPRINT:
return new ColumnHeader(sourceHeader + DEFAULT_FINGERPRINT_SUFFIX);
default:
return sourceHeader;
}
}
/**
* Creates a default target column header based off of source column header name and cryptographic primitive if a specific header was
* not provided.
*
* @param sourceHeader Name of the source column
* @param targetHeader Name of the target header (if one was provided)
* @param type Type of cryptographic transform being applied
* @return Default name for output column
*/
public static ColumnHeader deriveTargetColumnHeader(final ColumnHeader sourceHeader,
final ColumnHeader targetHeader,
final ColumnType type) {
if (sourceHeader != null && targetHeader == null && type != null) {
return addDefaultColumnTypeSuffix(sourceHeader, type);
} else {
return targetHeader;
}
}
/**
* Create a raw column header from a string (i.e., perform no normalization).
*
* @param header Raw content to use (unmodified) for the header; cannot be null.
* @return The unmodified column header
*/
public static ColumnHeader ofRaw(final String header) {
return new ColumnHeader(header, false);
}
/**
* Construct the column name from a zero counted array.
*
* @param i Index of the column we want a name for
* @return ColumnHeader based on the index
* @throws C3rIllegalArgumentException If the index is negative
*/
public static ColumnHeader of(final int i) {
if (i < 0) {
throw new C3rIllegalArgumentException("Column index must be non-negative");
}
return new ColumnHeader("_c" + i);
}
/**
* Construct the column name from a zero counted array.
*
* @param i Index of the column we want a name for
* @return ColumnHeader based on the given index
* @throws C3rIllegalArgumentException If the index is negative
* @deprecated Use the {@link #of(int)} static factory method.
*/
@Deprecated
public static ColumnHeader getColumnHeaderFromIndex(final int i) {
return ColumnHeader.of(i);
}
/**
* Ensure all headers are turned into comparable strings by removing leading/trailing whitespace and making all headers lowercase.
*
* @param header Name to normalize
* @return Trimmed and lowercase version of name
*/
private String normalize(final String header) {
if (header != null) {
return header.trim().toLowerCase();
}
return null;
}
/**
* Get the name this ColumnHeader represents as a String.
*
* @return Header name
*/
@Override
public String toString() {
return header;
}
/**
* Make sure the column header meets particular rules.
* - The header must not be null or blank
* - The length of the header must be short enough to be accepted by Glue
* - The name must match the conventions set by Glue
*
* @throws C3rIllegalArgumentException If any of the rules are broken
*/
public void validate() {
if (header == null || header.isBlank()) {
throw new C3rIllegalArgumentException("Column header names must not be blank");
}
if (normalized) {
if (header.length() > Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH) {
throw new C3rIllegalArgumentException(
"Column header names cannot be longer than "
+ Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH
+ " characters, but found `"
+ header
+ "`.");
}
if (!Limits.AWS_CLEAN_ROOMS_HEADER_REGEXP.matcher(header).matches()) {
throw new C3rIllegalArgumentException(
"Column header name `"
+ header
+ "` does not match pattern `"
+ Limits.AWS_CLEAN_ROOMS_HEADER_REGEXP.pattern()
+ "`.");
}
}
}
}
| 2,602 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/config/DecryptConfig.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.config;
import com.amazonaws.c3r.Transformer;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.utils.FileUtil;
import lombok.Builder;
import lombok.Getter;
import lombok.NonNull;
import javax.crypto.SecretKey;
import java.util.Map;
/**
* Information needed when decrypting a data file.
*/
@Getter
public final class DecryptConfig extends SimpleFileConfig {
/**
* Cryptographic transforms available.
*
* <p>
* This method will be deprecated in the next major release. See its replacement at
* {@link Transformer#initTransformers(SecretKey, String, ClientSettings, boolean)}
*/
@Deprecated
private final Map<ColumnType, Transformer> transformers;
/**
* Whether to throw an error if a Fingerprint column is seen in the data.
*/
private final boolean failOnFingerprintColumns;
/**
* Set up configuration that will be used for decrypting data.
*
* @param secretKey Clean room key used to generate sub-keys for HMAC and encryption
* @param sourceFile Location of input data
* @param fileFormat Format of input data
* @param targetFile Where output should be saved
* @param overwrite Whether to overwrite the target file if it exists already
* @param csvInputNullValue What value should be interpreted as {@code null} for CSV files
* @param csvOutputNullValue What value should be saved in output to represent {@code null} values for CSV
* @param salt Salt that can be publicly known but adds to randomness of cryptographic operations
* @param failOnFingerprintColumns Whether to throw an error if a Fingerprint column is seen in the data
*/
@Builder
private DecryptConfig(@NonNull final SecretKey secretKey,
@NonNull final String sourceFile,
final FileFormat fileFormat,
final String targetFile,
final boolean overwrite,
final String csvInputNullValue,
final String csvOutputNullValue,
@NonNull final String salt,
final boolean failOnFingerprintColumns) {
super(secretKey, sourceFile, fileFormat, targetFile, overwrite, csvInputNullValue, csvOutputNullValue, salt);
this.transformers = Transformer.initTransformers(secretKey, salt, null, failOnFingerprintColumns);
this.failOnFingerprintColumns = failOnFingerprintColumns;
FileUtil.initFileIfNotExists(getTargetFile());
}
}
| 2,603 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/config/MappedTableSchema.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.config;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import lombok.EqualsAndHashCode;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* This class represents the mapping of named input columns to output columns.
* An example would be a CSV file with a header row or Parquet file.
*/
@EqualsAndHashCode(callSuper = true)
public class MappedTableSchema extends TableSchema {
/**
* Specifications for columns in the output file.
*/
private final List<ColumnSchema> columns;
/**
* Validated specifications.
*/
private transient List<ColumnSchema> validatedColumns;
/**
* Creates an instance of {@link TableSchema} for data files with header information.
*
* @param columnSchemas Specifications for how output columns should be created from input columns
* @throws C3rIllegalArgumentException If schema doesn't contain at least one column
*/
public MappedTableSchema(final List<ColumnSchema> columnSchemas) {
setHeaderRowFlag(true);
if (columnSchemas == null) {
throw new C3rIllegalArgumentException("At least one data column must provided in the config file.");
}
columns = new ArrayList<>(columnSchemas);
validate();
}
/**
* Take the input columns schemas, verify they match mapped schema rules and modify target header if needed.
*
* @return Validated and completed schemas
* @throws C3rIllegalArgumentException If source header is missing
*/
private List<ColumnSchema> validateAndConfigureColumnSchemas() {
final ArrayList<ColumnSchema> modifiedSchemas = new ArrayList<>(columns.size());
for (ColumnSchema cs : columns) {
if (cs.getSourceHeader() == null) {
throw new C3rIllegalArgumentException("Source header is required.");
}
final var targetHeader = ColumnHeader.deriveTargetColumnHeader(cs.getSourceHeader(), cs.getTargetHeader(), cs.getType());
modifiedSchemas.add(ColumnSchema.builder()
.sourceHeader(cs.getSourceHeader())
.targetHeader(targetHeader)
.internalHeader(cs.getInternalHeader())
.pad(cs.getPad())
.type(cs.getType())
.build());
}
return Collections.unmodifiableList(modifiedSchemas);
}
/**
* {@inheritDoc}
*/
@Override
public List<ColumnSchema> getColumns() {
if (validatedColumns == null) {
validatedColumns = validateAndConfigureColumnSchemas();
}
return new ArrayList<>(validatedColumns);
}
/**
* MappedTableSchemas do not autogenerate any header names so {@code null} is always returned.
*
* @return {@code null}
*/
@Override
public List<ColumnHeader> getPositionalColumnHeaders() {
return null;
}
/**
* Validates the remaining requirement that mapped table schemas must have a header row and requirements for schemas overall.
*
* @throws C3rIllegalArgumentException If a rule for mapped table schemas is not followed
*/
@Override
public void validate() {
if (getHeaderRowFlag() == null || !getHeaderRowFlag()) {
throw new C3rIllegalArgumentException("Mapped Table Schemas require a header row in the data.");
}
if (getPositionalColumnHeaders() != null) {
throw new C3rIllegalArgumentException("Mapped schemas should not have any unspecified input headers.");
}
if (validatedColumns == null) {
validatedColumns = validateAndConfigureColumnSchemas();
}
super.validate();
}
}
| 2,604 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/config/PadType.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.config;
import java.io.Serializable;
/**
* Differentiators for padding types.
*/
public enum PadType implements Serializable {
/**
* Values are not padded.
*/
NONE,
/**
* Values are padded to a user-specified {@code PAD_LENGTH}.
*/
FIXED,
/**
* Values are padded to {@code MAX_SIZE + PAD_LENGTH} where {@code MAX_SIZE} is the size of the
* longest value in the column and {@code PAD_LENGTH} is user-specified.
*/
MAX
} | 2,605 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/config/Config.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.config;
import com.amazonaws.c3r.encryption.keys.DerivedRootEncryptionKey;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import lombok.Getter;
import lombok.NonNull;
import javax.crypto.SecretKey;
import java.io.File;
import java.nio.charset.StandardCharsets;
/**
* Basic information needed whether encrypting or decrypting data.
*/
@Getter
public abstract class Config {
/**
* Location of input data.
*/
private final String sourceFile;
/**
* Data type.
*/
private final FileFormat fileFormat;
/**
* Where output should be saved.
*/
private final String targetFile;
/**
* Whether to overwrite the target file.
*/
private final boolean overwrite;
/**
* What value should be interpreted as {@code null} for CSV files.
*/
private final String csvInputNullValue;
/**
* What value should be saved in output to represent {@code null} values for CSV.
*/
private final String csvOutputNullValue;
/**
* Clean room key used to generate sub-keys for HMAC and encryption.
*/
private final SecretKey secretKey;
/**
* Salt that can be publicly known but adds to randomness of cryptographic operations.
*/
private final String salt;
/**
* Basic configuration information needed for encrypting or decrypting data.
*
* @param secretKey Clean room key used to generate sub-keys for HMAC and encryption
* @param sourceFile Location of input data
* @param fileFormat Format of input data
* @param targetFile Where output should be saved
* @param overwrite Whether to overwrite the target file if it exists already
* @param csvInputNullValue What value should be interpreted as {@code null} for CSV files
* @param csvOutputNullValue What value should be saved in output to represent {@code null} values for CSV
* @param salt Salt that can be publicly known but adds to randomness of cryptographic operations
*/
protected Config(@NonNull final SecretKey secretKey, @NonNull final String sourceFile, final FileFormat fileFormat,
final String targetFile, final boolean overwrite, final String csvInputNullValue, final String csvOutputNullValue,
@NonNull final String salt) {
this.secretKey = secretKey;
this.sourceFile = sourceFile;
this.fileFormat = fileFormat == null ? FileFormat.fromFileName(sourceFile) : fileFormat;
this.targetFile = targetFile == null ? getDefaultTargetFile(sourceFile) : targetFile;
this.overwrite = overwrite;
this.csvInputNullValue = csvInputNullValue;
this.csvOutputNullValue = csvOutputNullValue;
this.salt = salt;
validate();
}
/**
* Get a default target file name based on a source file name, maintaining the file extension if one exists.
*
* @param sourceFile Name of source file
* @return Default target name
*/
static String getDefaultTargetFile(@NonNull final String sourceFile) {
final File file = new File(sourceFile);
final String sourceFileNameNoPath = file.getName();
final int extensionIndex = sourceFileNameNoPath.lastIndexOf(".");
if (extensionIndex < 0) {
return sourceFileNameNoPath + ".out";
} else {
return sourceFileNameNoPath.substring(0, extensionIndex) + ".out" + sourceFileNameNoPath.substring(extensionIndex);
}
}
/**
* Verifies that settings are consistent.
* - Make sure the shared secret is valid
*
* @throws C3rIllegalArgumentException If any of the rules are violated
*/
private void validate() {
// Validates that a key can be derived early instead of waiting for Transformer initialization later.
new DerivedRootEncryptionKey(secretKey, salt.getBytes(StandardCharsets.UTF_8));
}
}
| 2,606 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/config/PositionalTableSchema.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.config;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import lombok.EqualsAndHashCode;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
/**
* Class that implements {@link TableSchema} for data without named columns.
* The length of the outer list must be the same as the number of columns in the data. The inner list is the various output columns each
* input column should map to. If that column is not mapped to anything, the inner list can be empty or {@code null}. The outer list is
* used in order of columns read, i.e., index 0 is the first column of data, index 1 is the second column of data, etc.
*/
@EqualsAndHashCode(callSuper = true)
public class PositionalTableSchema extends TableSchema {
/**
* The schema for each column created in the output file, as specified verbatim by the user.
*/
private final List<List<ColumnSchema>> columns;
/**
* Columns updated to have a source default, positional column name.
*/
private transient List<ColumnSchema> mappedColumns;
/**
* The names of columns in the file.
*/
private transient List<ColumnHeader> sourceHeaders;
/**
* Construct a {@link TableSchema} and validates it for files without a header row.
*
* @param positionalColumns Specification for how each input column maps to a list of 0 or more columns that are in the output
*/
public PositionalTableSchema(final List<List<ColumnSchema>> positionalColumns) {
setHeaderRowFlag(false);
columns = (positionalColumns == null) ? null : Collections.unmodifiableList(positionalColumns);
validate();
}
/**
* Generate positional source headers of the specified length.
*
* @param sourceColumnCount Number of positional headers
* @return The list of positional headers in ascending order
*/
public static List<ColumnHeader> generatePositionalSourceHeaders(final int sourceColumnCount) {
return IntStream.range(0, sourceColumnCount)
.mapToObj(ColumnHeader::of)
.collect(Collectors.toUnmodifiableList());
}
/**
* Take the positional column schemas and transform them into fully defined mapped column schemas for data processing.
*
* @return List of all specified columns
* @throws C3rIllegalArgumentException If an invalid positional column schema is encountered
*/
private List<ColumnSchema> mapPositionalColumns() {
final List<ColumnSchema> localColumns = new ArrayList<>();
for (int i = 0; i < columns.size(); i++) {
final var columnI = columns.get(i);
if (columnI != null && !columnI.isEmpty()) {
for (ColumnSchema csJ : columnI) {
if (csJ == null) {
throw new C3rIllegalArgumentException("Invalid empty column specification found for column " + (i + 1));
}
localColumns.add(validateAndConfigureColumnSchema(i, csJ));
}
}
}
return Collections.unmodifiableList(localColumns);
}
/**
* {@inheritDoc}
*/
@Override
public List<ColumnSchema> getColumns() {
if (mappedColumns == null && columns != null) {
mappedColumns = mapPositionalColumns();
}
return mappedColumns != null ? new ArrayList<>(mappedColumns) : null;
}
/**
* The names we are using for columns without a header value. "Column index" will be used.
*
* @return List of created column names
*/
@Override
public List<ColumnHeader> getPositionalColumnHeaders() {
if (sourceHeaders == null) {
sourceHeaders = generatePositionalSourceHeaders(columns.size());
}
return new ArrayList<>(sourceHeaders);
}
/**
* Make sure specification matches requirements for a CSV file without a header row.
*
* @param sourceColumnIndex Index of column source content is derived from
* @param column Column to validate and finish filling out
* @return Schema mapping an input column to an output column
* @throws C3rIllegalArgumentException If the source header has a value or target header does not have a value
*/
private ColumnSchema validateAndConfigureColumnSchema(final int sourceColumnIndex, final ColumnSchema column) {
if (column.getSourceHeader() != null) {
throw new C3rIllegalArgumentException("Positional table schemas cannot have `sourceHeader` properties in column schema, but " +
"found one in column " + (sourceColumnIndex + 1) + ".");
}
if (column.getTargetHeader() == null) {
throw new C3rIllegalArgumentException("Positional table schemas must have a target header name for each column schema. " +
"Missing target header in column " + (sourceColumnIndex + 1) + ".");
}
return ColumnSchema.builder()
.sourceHeader(ColumnHeader.of(sourceColumnIndex))
.targetHeader(column.getTargetHeader())
.pad(column.getPad())
.type(column.getType())
.build();
}
/**
* Validates the final requirements of a positional schema (no header row in the data) and checks the rules for schemas overall.
*
* @throws C3rIllegalArgumentException If a rule for positional table schemas is not followed
*/
@Override
public void validate() {
if (getHeaderRowFlag() != null && getHeaderRowFlag()) {
throw new C3rIllegalArgumentException("Positional Table Schemas cannot use data containing a header row");
}
if (columns == null || columns.isEmpty()) {
throw new C3rIllegalArgumentException("At least one data column must provided in the config file.");
}
if (mappedColumns == null) {
mappedColumns = mapPositionalColumns();
}
if (sourceHeaders == null) {
sourceHeaders = IntStream.range(0, columns.size()).mapToObj(ColumnHeader::of)
.collect(Collectors.toUnmodifiableList());
}
super.validate();
}
} | 2,607 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/config/Pad.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.config;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.internal.PadUtil;
import com.amazonaws.c3r.internal.Validatable;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import java.io.Serializable;
/**
* The pad type and pad length (if applicable) that should be used on a {@link ColumnType#SEALED} column.
*/
@EqualsAndHashCode
@Getter
public final class Pad implements Validatable, Serializable {
/**
* Default specifications for padding.
*/
public static final Pad DEFAULT = new Pad(PadType.NONE, null);
/**
* User specified padding type.
*
* @see PadType
*/
private final PadType type;
/**
* How many bytes should be used for the pad type.
*
* @see PadType
*/
private final Integer length;
/**
* How a {@link ColumnType#SEALED} column should be padded.
*
* @param type Type of padding
* @param length Number of bytes to use with pad type
* @see PadType
*/
@Builder
private Pad(final PadType type, final Integer length) {
this.type = type;
this.length = length;
validate();
}
/**
* Checks if the combination of pad type and length is valid.
* <ul>
* <li>Padding and length must be unspecified</li>
* <li>If the pad type is {@link PadType#NONE} the length must be unspecified</li>
* <li>If the pad type is {@link PadType#FIXED} or {@link PadType#MAX}, the length is between 0 and
* {@link PadUtil#MAX_PAD_BYTES}</li>
* </ul>
*
* @throws C3rIllegalArgumentException If the pad type and length do not follow the dependency rules
*/
public void validate() {
// If pad doesn't exist (type and length are null), there's nothing to validate
if (type == null && length == null) {
return;
} else if (type == null) {
throw new C3rIllegalArgumentException("A pad type is required if a pad length is specified but only a pad length was " +
"provided.");
}
// When a padLength is provided, a valid PadType must be sealed
switch (type) {
case NONE:
if (length != null) {
throw new C3rIllegalArgumentException("A pad length was provided with an invalid pad type "
+ type + ". A pad length is only permitted when the pad type is 'fixed' or 'max'.");
}
return;
case MAX:
case FIXED:
if (length == null) {
throw new C3rIllegalArgumentException("A pad length must be provided when pad type is not 'none'.");
} else {
if (length < 0 || length > PadUtil.MAX_PAD_BYTES) {
throw new C3rIllegalArgumentException(
"A pad length of " + length
+ " was provided provided for padded column. A pad length "
+ "between 0 and " + PadUtil.MAX_PAD_BYTES
+ " must be used when pad type is not 'none'.");
}
}
break;
default:
final String badType = type.toString();
throw new C3rIllegalArgumentException("Unknown padding type " + badType + ".");
}
}
/**
* Whether two passes will be needed to process the data because of the type of padding.
*
* @return {@code true} if the padding is {@link PadType#FIXED} or {@link PadType#MAX}
*/
public boolean requiresPreprocessing() {
return type != null && type != PadType.NONE;
}
}
| 2,608 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/config/ColumnType.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.config;
import com.amazonaws.c3r.CleartextTransformer;
import com.amazonaws.c3r.FingerprintTransformer;
import com.amazonaws.c3r.SealedTransformer;
import java.io.Serializable;
/**
* Differentiators for how a column is represented.
*/
public enum ColumnType implements Serializable {
/**
* Encrypted, meant to be used in the SELECT clause of an SQL query.
*/
SEALED("sealed", SealedTransformer.class),
/**
* HMACed, meant to be used in ON clauses of an SQL query.
*/
FINGERPRINT("fingerprint", FingerprintTransformer.class),
/**
* Cleartext, can be used in any clause of an SQL query.
*/
CLEARTEXT("cleartext", CleartextTransformer.class);
/**
* Associated transformer.
*/
private final Class<?> transformerClass;
/**
* Formatted version of name.
*/
private final String name;
/**
* Associates column type with a specific transformer.
*
* @param name How the enum should be displayed when transformed to a string
* @param clazz Name of transformer class
* @see com.amazonaws.c3r.Transformer
*/
ColumnType(final String name, final Class<?> clazz) {
this.name = name;
this.transformerClass = clazz;
}
/**
* Get the type of transformer that should be used for the column type.
*
* @return Corresponding transformer
*/
public Class<?> getTransformerType() {
return transformerClass;
}
/**
* {@inheritDoc}
*/
@Override
public String toString() {
return name;
}
} | 2,609 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/config/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Classes that contain all the information needed to perform cryptographic computations on input data.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.config; | 2,610 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io/FileFormat.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import java.util.Arrays;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Provides support for file format type information such as file extensions.
*/
public enum FileFormat {
/**
* CSV formatted file.
*/
CSV(".csv"),
/**
* Parquet formatted file.
*/
PARQUET(".parquet");
/**
* Lookup table to map string extension to enum representation of extension.
*/
private static final Map<String, FileFormat> EXTENSIONS = Arrays.stream(FileFormat.values()).collect(
Collectors.toMap(
fmt -> fmt.extension,
Function.identity()
));
/**
* String containing the file extension.
*/
private final String extension;
/**
* Construct a FileFormat with the given file extension.
*
* @param extension Supported file type extension to use
*/
FileFormat(final String extension) {
this.extension = extension;
}
/**
* Check and see if the input file name has an extension specifying the file type.
*
* @param fileName Input file name
* @return Supported data type or {@code null}
*/
public static FileFormat fromFileName(final String fileName) {
final int extensionStart = fileName.lastIndexOf('.');
if (extensionStart < 0) {
return null;
}
return EXTENSIONS.get(fileName.substring(extensionStart).toLowerCase());
}
}
| 2,611 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io/RowWriter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.data.Value;
import lombok.NonNull;
import java.util.Collection;
/**
* Resource for storing record entries to some store/backend.
*
* @param <T> Specific data type that will be written
*/
public interface RowWriter<T extends Value> {
/**
* Gets the headers for the output file.
*
* @return The ColumnHeaders of the output file
*/
Collection<ColumnHeader> getHeaders();
/**
* Write a record to the store.
*
* @param row Row to write with columns mapped to respective values
*/
void writeRow(@NonNull Row<T> row);
/**
* Close this record source so the resource is no longer in use.
*/
void close();
/**
* Flush the write buffer.
*/
void flush();
/**
* Gets the target file name.
*
* @return Name of target file
*/
String getTargetName();
}
| 2,612 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io/RowReader.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.data.Value;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Limits;
import lombok.Getter;
import java.util.List;
/**
* Source from which records can be loaded in a streaming/as-needed fashion.
*
* @param <T> Specific data type being read
*/
public abstract class RowReader<T extends Value> {
/**
* How many rows have been read so far.
*/
@Getter
private long readRowCount = 0;
/**
* Gets the headers found in the input file, ordered as they were found.
*
* @return The headers of the input file
*/
public abstract List<ColumnHeader> getHeaders();
/**
* Close this record source so the resource is no longer in use.
*/
public abstract void close();
/**
* Called by child classes to initialize/update the {@code Row}
* returned by {@link #peekNextRow}.
*/
protected abstract void refreshNextRow();
/**
* Return the row populated from the data source by {@code refreshNextRow}.
*
* @return The next Row of data without advancing the reader's position
*/
protected abstract Row<T> peekNextRow();
/**
* Whether there is another row remaining to be read from the source.
*
* @return {@code true} if there is still more data
*/
public boolean hasNext() {
return peekNextRow() != null;
}
/**
* Returns the next row if {@code hasNext() == true}.
*
* @return The next row, or {@code null} if none remain
* @throws C3rRuntimeException If the SQL row count limit is exceeded
*/
public Row<T> next() {
final Row<T> currentRow = peekNextRow();
if (currentRow == null) {
return null;
}
refreshNextRow();
readRowCount++;
if (readRowCount >= Limits.ROW_COUNT_MAX) {
throw new C3rRuntimeException("A table cannot contain more than " + Limits.ROW_COUNT_MAX + " rows.");
}
return currentRow;
}
/**
* Set number of already read rows in case this row reader is adding to some already existing set of rows.
*
* @param readRowCount How many rows have <i>already</i> been read
* @throws C3rRuntimeException Table exceeds the maximum number of allowed rows
*/
void setReadRowCount(final long readRowCount) {
if (readRowCount > Limits.ROW_COUNT_MAX) {
throw new C3rRuntimeException("A table cannot contain more than " + Limits.ROW_COUNT_MAX + " rows.");
}
this.readRowCount = readRowCount;
}
/**
* Describes where rows are being read from in a human-friendly fashion.
*
* @return Name of source being used
*/
public abstract String getSourceName();
}
| 2,613 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io/CsvRowWriter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.CsvValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.univocity.parsers.csv.CsvWriter;
import com.univocity.parsers.csv.CsvWriterSettings;
import lombok.Builder;
import lombok.Getter;
import lombok.NonNull;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.OutputStreamWriter;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
/**
* Resource for writing record entries to an underlying store for processing.
*/
public final class CsvRowWriter implements RowWriter<CsvValue> {
/**
* Where to write CSV data.
*/
@Getter
private final String targetName;
/**
* Column names for the output CSV file.
*/
@Getter
private final List<ColumnHeader> headers;
/**
* Interprets and writes CSV data to target.
*/
private final CsvWriter writer;
/**
* Creates a record writer for a CSV file.
*
* @param targetName File name for CSV to be written to
* @param outputNullValue The value representing NULL in the output file. By default, `,,` will represent NULL
* @param headers The headers of the individual columns in the output CSV
* @param fileCharset Character set of the file. Defaults to {@code UTF_8} if {@code null}
* @throws C3rRuntimeException If {@code fileName} cannot be opened for reading
*/
@Builder
private CsvRowWriter(
@NonNull final String targetName,
final String outputNullValue,
@NonNull final List<ColumnHeader> headers,
final Charset fileCharset) {
this.targetName = targetName;
final CsvWriterSettings writerSettings = new CsvWriterSettings();
this.headers = new ArrayList<>(headers);
writerSettings.setHeaders(this.headers.stream().map(ColumnHeader::toString).toArray(String[]::new));
// encode NULL as the user requests, or `,,` if not specified
writerSettings.setNullValue(Objects.requireNonNullElse(outputNullValue, ""));
if (outputNullValue == null || outputNullValue.isBlank()) {
// If NULL is being encoded as a blank, then use an empty string
// encoding which can be distinguished if needed.
writerSettings.setEmptyValue("\"\"");
} else {
// otherwise just write it out as the empty string (i.e., without quotes)
// like any other quoted values without whitespace inside
writerSettings.setEmptyValue("");
}
// Err on the side of safety w.r.t. quoting parsed content that contains any whitespace
// by not trimming said content _and_ by adding quotes if any whitespace characters are seen
// after parsing.
writerSettings.trimValues(false);
writerSettings.setQuotationTriggers(' ', '\t', '\n', '\r', '\f');
try {
writer = new CsvWriter(
new OutputStreamWriter(
new FileOutputStream(targetName),
Objects.requireNonNullElse(fileCharset, StandardCharsets.UTF_8)),
writerSettings);
} catch (FileNotFoundException e) {
throw new C3rRuntimeException("Unable to write to output CSV file " + targetName + ".", e);
}
writer.writeHeaders();
}
/**
* Write a record to the store.
*
* @param row Data to be written (as is)
*/
public void writeRow(@NonNull final Row<CsvValue> row) {
final String[] toWrite = new String[headers.size()];
for (int i = 0; i < toWrite.length; i++) {
toWrite[i] = row.getValue(headers.get(i)).toString();
}
writer.writeRow(toWrite);
}
/**
* Close the connection to the file.
*/
@Override
public void close() {
writer.close();
}
/**
* Write all pending data to the file.
*/
@Override
public void flush() {
writer.flush();
}
}
| 2,614 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io/SqlRowWriter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnInsight;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.data.Value;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.sql.SqlTable;
import com.amazonaws.c3r.io.sql.TableGenerator;
import lombok.Getter;
import lombok.NonNull;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Class for adding rows of data to an existing table. The Connection passed to this class should be grabbed
* from {@link TableGenerator#initTable}.
*
* @param <T> Data type being written to the database.
*/
public class SqlRowWriter<T extends Value> implements RowWriter<T> {
/**
* Generate a statement to write the next row to the database.
*/
private final PreparedStatement insertStatement;
/**
* Connection to SQL database for this session.
*/
private final Connection connection;
/**
* A map of columns to their (1-based) positions in the insert statement.
*/
private final Map<ColumnHeader, Integer> columnStatementPositions = new LinkedHashMap<>();
/**
* Maps target column headers to internal headers.
*/
private final Map<ColumnHeader, ColumnHeader> internalToTargetColumnHeaders;
/**
* Name of file to write to.
*/
@Getter
private final String targetName;
/**
* Configures a connection to a SQL database for writing data.
*
* @param columnInsights Metadata information about columns being written
* @param nonceHeader Name for column where nonces will be stored
* @param sqlTable SQL table connection
* @throws C3rRuntimeException If a connection to the SQL database couldn't be established
*/
public SqlRowWriter(final Collection<ColumnInsight> columnInsights, final ColumnHeader nonceHeader,
@NonNull final SqlTable sqlTable) {
final List<ColumnInsight> columns = new ArrayList<>(columnInsights);
for (int i = 0; i < columnInsights.size(); i++) {
columnStatementPositions.put(columns.get(i).getInternalHeader(), i + 1);
}
columnStatementPositions.put(nonceHeader, columnInsights.size() + 1); // Add the nonce column to the end
internalToTargetColumnHeaders = columnInsights.stream()
.collect(Collectors.toMap(ColumnSchema::getInternalHeader, ColumnSchema::getTargetHeader));
internalToTargetColumnHeaders.put(nonceHeader, nonceHeader);
this.connection = sqlTable.getConnection();
this.insertStatement = initInsertStatement();
try {
this.targetName = connection.getCatalog();
} catch (SQLException e) {
throw new C3rRuntimeException("Internal error: failed to connect to local SQL database.", e);
}
}
/**
* Generates the SQL statement used in the PreparedStatement for inserting each row of data.
*
* @param statement A statement to be used purely for escaping column names
* @param columnStatementPositions A map of column names to their desired positions in the insert statement
* @return The SQL statement for inserting a row of data
*/
static String getInsertStatementSql(final Statement statement, final Map<ColumnHeader, Integer> columnStatementPositions) {
final StringBuilder sb = new StringBuilder();
sb.append("INSERT INTO ").append(TableGenerator.DEFAULT_TABLE_NAME).append(" (");
// Ensure columns in insert statement are properly ordered
final List<ColumnHeader> columnNames = columnStatementPositions.entrySet().stream()
.sorted(Map.Entry.comparingByValue())
.map(Map.Entry::getKey)
.collect(Collectors.toList());
// Append all the escaped column names
sb.append(columnNames.stream()
.map(column -> {
try {
return statement.enquoteIdentifier(column.toString(), true);
} catch (SQLException e) {
throw new C3rRuntimeException("Could not prepare internal statement for temporary database. Failed to " +
"escape column header: " + column, e);
}
})
.collect(Collectors.joining(",")));
sb.append(")\n").append("VALUES (");
sb.append("?,".repeat(columnNames.size() - 1));
sb.append("?)");
return sb.toString();
}
/**
* {@inheritDoc}
*/
@Override
public Collection<ColumnHeader> getHeaders() {
return columnStatementPositions.keySet();
}
/**
* Generates the PreparedStatement for inserting a row of data based on the columns that were provided in the TableSchema.
*
* @return A PreparedStatement ready for inserting into the SQL table
* @throws C3rRuntimeException If there's an error while setting up write connection to SQL database
*/
PreparedStatement initInsertStatement() {
try {
final Statement statement = connection.createStatement(); // Used strictly to escape column names
final String sql = getInsertStatementSql(statement, columnStatementPositions);
return connection.prepareStatement(sql);
} catch (SQLException e) {
throw new C3rRuntimeException("Could not prepare internal statement for temporary database.", e);
}
}
/**
* Takes a map of all the column headers to their respective values and adds them to the SQL table.
* <ul>
* <li>Map should include the nonce.</li>
* <li>Values not included in the map provided will be inserted as nulls.</li>
* </ul>
*
* @param row A map of column headers to values to be added to the table
*/
@Override
public void writeRow(@NonNull final Row<T> row) {
try {
for (Map.Entry<ColumnHeader, Integer> entry : columnStatementPositions.entrySet()) {
final ColumnHeader targetColumn = internalToTargetColumnHeaders.get(entry.getKey());
insertStatement.setBytes(entry.getValue(), row.getValue(targetColumn).getBytes());
}
insertStatement.execute();
insertStatement.clearParameters();
} catch (SQLException e) {
throw new C3rRuntimeException("Unknown SQL error: could not add row to the temporary database. "
+ "Please review stack traces for more detail.", e);
}
}
/**
* No op as connection may be in use elsewhere.
*/
@Override
public void close() {
// Nothing to do. Can't close Connection as it may be in use elsewhere.
}
/**
* {@inheritDoc}
*/
@Override
public void flush() {
// Nothing to do.
}
}
| 2,615 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Support for reading and writing rows of data for various data sources.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.io; | 2,616 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io/SqlRowReader.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnInsight;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.data.RowFactory;
import com.amazonaws.c3r.data.Value;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Nonce;
import com.amazonaws.c3r.io.sql.SqlTable;
import com.amazonaws.c3r.io.sql.TableGenerator;
import lombok.Getter;
import lombok.NonNull;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Reads a row of the specified data type out of a SQL database.
*
* @param <T> Type of data stored in the database.
*/
public class SqlRowReader<T extends Value> extends RowReader<T> {
/**
* Connection to the SQL database for this session.
*/
private final SqlTable sqlTable;
/**
* Generate a statement to query the database for the next row.
*/
private final PreparedStatement selectStatement;
/**
* List of internal column headers.
*/
@Getter
private final List<ColumnHeader> headers;
/**
* Result of a query to the database.
*/
private final ResultSet resultSet;
/**
* Creates an empty row for the specified data type.
*/
private final RowFactory<T> rowFactory;
/**
* Name of the column that contains the nonce for each row.
*/
private final ColumnHeader nonceHeader;
/**
* Name of the source file.
*/
@Getter
private final String sourceName;
/**
* Maps internal column headers to target headers.
*/
private final Map<ColumnHeader, ColumnHeader> internalToTargetColumnHeaders;
/**
* Holds the next row of data to be read.
*/
private Row<T> nextRow;
/**
* Configures a connection to a SQL database to read out rows of data.
*
* @param columnInsights Metadata about columns
* @param nonceHeader Name of the column in the database that contains the nonce value for each row
* @param rowFactory Creates empty rows of the specified type
* @param sqlTable SQL database instance
* @throws C3rRuntimeException If the SQL database couldn't be accessed
*/
public SqlRowReader(@NonNull final Collection<ColumnInsight> columnInsights,
@NonNull final ColumnHeader nonceHeader,
@NonNull final RowFactory<T> rowFactory,
@NonNull final SqlTable sqlTable) {
this.rowFactory = rowFactory;
this.sqlTable = sqlTable;
this.internalToTargetColumnHeaders = columnInsights.stream()
.collect(Collectors.toMap(ColumnSchema::getInternalHeader, ColumnSchema::getTargetHeader));
this.headers = columnInsights.stream().map(ColumnSchema::getInternalHeader).collect(Collectors.toList());
this.nonceHeader = nonceHeader;
try {
this.sourceName = this.sqlTable.getConnection().getCatalog();
} catch (SQLException e) {
throw new C3rRuntimeException("Failed to connect to local SQL database.", e);
}
this.selectStatement = initSelectStatement();
try {
resultSet = selectStatement.executeQuery();
} catch (SQLException e) {
throw new C3rRuntimeException("Data could not be retrieved from the temporary database.", e);
}
refreshNextRow();
}
/**
* Generates the SQL statement used in the PreparedStatement for retrieving all the rows of data.
*
* @param statement A statement to be used purely for escaping column names
* @param columnNames The columns to be selected
* @param nonceHeader The column containing the nonce to order by
* @return The SQL statement for selecting data
* @throws C3rRuntimeException If columnNames or nonceHeader cannot be properly escaped
*/
static String getSelectStatementSql(final Statement statement, final List<ColumnHeader> columnNames, final ColumnHeader nonceHeader) {
final StringBuilder sb = new StringBuilder();
sb.append("SELECT ");
// Append all the escaped column names
sb.append(columnNames.stream().map(column -> {
try {
return statement.enquoteIdentifier(column.toString(), true);
} catch (SQLException e) {
throw new C3rRuntimeException("Could not prepare internal statement for temporary database. Failed to escape column " +
"header: " + column, e);
}
}).collect(Collectors.joining(",")));
try {
final String nonce = statement.enquoteIdentifier(nonceHeader.toString(), true);
sb.append(",").append(nonce).append(" FROM ").append(TableGenerator.DEFAULT_TABLE_NAME);
sb.append(" ORDER BY ").append(nonce);
return sb.toString();
} catch (SQLException e) {
throw new C3rRuntimeException("Invalid SQL identifier encountered.", e);
}
}
/**
* Generates the PreparedStatement for selecting all the data in the table, ordered by the nonce column.
*
* @return A PreparedStatement ready for selecting data from the SQL table
* @throws C3rRuntimeException If there's an error while setting up connection to SQL database
*/
PreparedStatement initSelectStatement() {
try {
final Statement statement = sqlTable.getConnection().createStatement(); // Used strictly to escape column names
final String sql = getSelectStatementSql(statement, headers, nonceHeader);
return sqlTable.getConnection().prepareStatement(sql);
} catch (SQLException e) {
throw new C3rRuntimeException("Could not prepare internal statement for temporary database.", e);
}
}
/**
* Load the next row from the executed query into the private {@code nextRow} field.
*/
@Override
protected void refreshNextRow() {
try {
if (resultSet.next()) {
nextRow = rowFactory.newRow();
for (ColumnHeader column : headers) {
nextRow.putBytes(internalToTargetColumnHeaders.get(column), resultSet.getBytes(column.toString()));
}
nextRow.putNonce(nonceHeader, new Nonce(resultSet.getBytes(nonceHeader.toString())));
} else {
nextRow = null;
}
} catch (SQLException e) {
throw new C3rRuntimeException("Data could not be retrieved from the temporary database.", e);
}
}
/**
* {@inheritDoc}
*/
@Override
public Row<T> peekNextRow() {
return nextRow != null ? nextRow.clone() : null;
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
try {
resultSet.close();
selectStatement.close();
} catch (SQLException e) {
throw new C3rRuntimeException("Could not close results from temporary database.", e);
}
}
}
| 2,617 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io/CsvRowReader.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.CsvRow;
import com.amazonaws.c3r.data.CsvValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.univocity.parsers.common.TextParsingException;
import com.univocity.parsers.common.record.Record;
import com.univocity.parsers.conversions.Conversions;
import com.univocity.parsers.csv.CsvParser;
import com.univocity.parsers.csv.CsvParserSettings;
import lombok.Builder;
import lombok.Getter;
import lombok.NonNull;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.function.Supplier;
import java.util.stream.Collectors;
/**
* Specific implementation for reading CSV files in preparation for data marshalling.
*/
public final class CsvRowReader extends RowReader<CsvValue> {
/**
* Maximum number of columns allowed.
*
* <p>
* This is defined at the implementation layer and not the RowReader interface in order to allow tuning among different formats.
*/
static final int MAX_COLUMN_COUNT = 10000;
/**
* Name of the CSV file for error reporting purposes.
*/
@Getter
final String sourceName;
/**
* Source CSV content is read from.
*/
private final Reader reader;
/**
* Charset in use for I/O.
*/
private final Charset fileCharset;
/**
* CSV parser configured to user settings.
*/
private final CsvParser parser;
/**
* Headers for columns in the CSV file.
*/
@Getter
private final List<ColumnHeader> headers;
/**
* Whether headers were normalized.
*/
private final boolean skipHeaderNormalization;
/**
* Whether the reader has been closed.
*/
private boolean closed = false;
/**
* The next row to be returned.
*/
private Row<CsvValue> nextRow;
/**
* Creates a record reader for a CSV file.
*
* <p>
* For customizing NULL treatment in input data, the `inputNullValue` parameter has the following semantics:
* <ul>
* <li> If `inputNullValue` is `null` then any blank (e.g., `,,`, `, ,`, etc) entry will be interpreted as NULL
* and empty quotes (`,"",`) will be interpreted as NULL, </li>
* <li> else if `inputNullValue.isBlank()` then any blank (e.g., `,,`, `, ,`, etc) entry will be interpreted
* as NULL but no quoted values will be interpreted as NULL. </li>
* <li>else if `inputNullValue` is `"\"\""` then _only_ `"\"\""` will be interpreted as NULL</li>
* <li>else non-blank values matching `inputNullValue` after being parsed will be considered NULL.</li>
* </ul>
*
* @param sourceName File name to be read as a CSV file path
* @param inputNullValue What should be interpreted as {@code null} in the input
* @param externalHeaders Strings to use as column header names if the file itself does not contain a header row
* @param fileCharset Character set of the file. Defaults to {@code UTF_8} if {@code null}
* @param skipHeaderNormalization Whether to skip the normalization of read in headers
* @throws C3rIllegalArgumentException If number of columns in file doesn't match number of columns in PositionalTableSchema or a parse
* error occurs
* @throws C3rRuntimeException If the file can't be read
*/
@Builder
private CsvRowReader(@NonNull final String sourceName,
final String inputNullValue,
final List<ColumnHeader> externalHeaders,
final Charset fileCharset,
final boolean skipHeaderNormalization) {
this.sourceName = sourceName;
this.fileCharset = fileCharset == null ? StandardCharsets.UTF_8 : fileCharset;
this.skipHeaderNormalization = skipHeaderNormalization;
this.reader = openReaderForFile(sourceName, this.fileCharset);
// Gather all the information we need for CSV parsing
final ParserConfiguration state = generateParserSettings(externalHeaders, inputNullValue);
// creates a CSV parser
parser = new CsvParser(state.csvParserSettings);
// Handle headers and row count validations depending on schema type
if (externalHeaders == null) {
this.headers = setupForHeaderFileParsing(state);
} else {
this.headers = setupForNoHeaderFileParsing(state, externalHeaders);
}
refreshNextRow();
}
/**
* Open a reader for the given file and Charset such that explicit errors are thrown if
* the encountered file contents is not in the expected Charset.
*
* @param sourceFile File to open
* @param fileCharset Charset to interpret file as, throwing an error if invalid data is encountered.
* @return A reader for the file
* @throws C3rIllegalArgumentException If the file is not found
*/
private static InputStreamReader openReaderForFile(@NonNull final String sourceFile,
@NonNull final Charset fileCharset) {
try {
// `fileCharset.newDecoder()` ensures an error is thrown if the content of the file is
// not compatible with the specified Charset
return new InputStreamReader(new FileInputStream(sourceFile), fileCharset.newDecoder());
} catch (FileNotFoundException e) {
throw new C3rIllegalArgumentException("Unable to locate file " + sourceFile + ".", e);
}
}
/**
* Extracts the header names from the file with no normalization (e.g., whitespace trimming, converting to lowercase, etc).
*
* @param csvFileName File to extract headers from
* @param fileCharset Charset for file
* @return CSV header names sans normalization.
* @throws C3rRuntimeException If an I/O error occurs reading the file
*/
private static String[] extractHeadersWithoutNormalization(@NonNull final String csvFileName,
final Charset fileCharset) {
final Charset charset = fileCharset == null ? StandardCharsets.UTF_8 : fileCharset;
try (var reader = openReaderForFile(csvFileName, charset)) {
final CsvParserSettings csvParserSettings = generateParserSettings(null, null).csvParserSettings;
csvParserSettings.trimValues(false);
final CsvParser parser = new CsvParser(csvParserSettings);
beginParsing(parser, reader, charset);
final String[] headerStrings = executeTextParsing(() -> parser.getRecordMetadata().headers());
if (headerStrings == null) {
throw new C3rRuntimeException("Unable to read headers from " + csvFileName);
}
return headerStrings;
} catch (TextParsingException e) {
throw new C3rRuntimeException("Could not get column count: an error occurred while parsing " + csvFileName, e);
} catch (IOException e) {
throw new C3rRuntimeException("Could not get column count: an I/O error occurred while reading " + csvFileName, e);
}
}
/**
* Parse the first line in a CSV file to extract the column count.
*
* @param csvFileName CSV file to count columns in
* @param fileCharset Character encoding in file (defaults to {@link CsvRowReader} default encoding if {@code null})
* @return The column count for the given file
* @throws C3rRuntimeException If an I/O error occurs reading the file
*/
public static int getCsvColumnCount(@NonNull final String csvFileName, final Charset fileCharset) {
return extractHeadersWithoutNormalization(csvFileName, fileCharset).length;
}
/**
* Takes in information on headers and null values to create settings for parsing.
*
* @param externalHeaders List of external header names for if there are any (positional schemas only use this)
* @param inputNullValue Value to be used for a custom null
* @return All configuration information needed throughout parsing
*/
private static ParserConfiguration generateParserSettings(final List<ColumnHeader> externalHeaders, final String inputNullValue) {
final ParserConfiguration state = new ParserConfiguration();
state.csvParserSettings = new CsvParserSettings();
// configure reader settings
state.csvParserSettings.setLineSeparatorDetectionEnabled(true);
// `setNullValue` sets the value used when no characters appear in an entry (after trimming)
// `setEmptyValue` sets the value used when no characters appear within a quoted entry (`,"",`)
state.toNullConversionRequired = false;
if (inputNullValue == null) {
state.csvParserSettings.setNullValue(null);
state.csvParserSettings.setEmptyValue(null);
} else if (inputNullValue.isBlank()) {
state.csvParserSettings.setNullValue(null);
state.csvParserSettings.setEmptyValue("");
} else if (inputNullValue.trim().equals("\"\"")) {
state.csvParserSettings.setNullValue("");
state.csvParserSettings.setEmptyValue(null);
} else {
state.csvParserSettings.setNullValue("");
state.csvParserSettings.setEmptyValue("");
state.toNullConversionRequired = true;
}
// Set maximum number of supported columns
state.csvParserSettings.setMaxColumns(MAX_COLUMN_COUNT);
// Disable the check for max chars per column. This is enforced by the Transformers as they're processed and may unnecessarily
// restrict user data being used for column types like `fingerprint`.
state.csvParserSettings.setMaxCharsPerColumn(-1);
// Check if this is a positional file and turn off header extraction if it is
state.csvParserSettings.setHeaderExtractionEnabled(externalHeaders == null);
// Save custom null value for when we need it for substitution
state.nullValue = inputNullValue;
// Save the number of columns expected in a positional file
state.numberOfColumns = (externalHeaders == null) ? null : externalHeaders.size();
return state;
}
/**
* Starts iteration style parsing of the CSV file.
*
* @param parser CSV data parser
* @param reader Source to read CSV data from
* @param fileCharset Charset for the contents the reader is reading
* @throws C3rRuntimeException If the file can't be parsed
*/
private static void beginParsing(@NonNull final CsvParser parser,
@NonNull final Reader reader,
@NonNull final Charset fileCharset) {
try {
parser.beginParsing(reader);
} catch (TextParsingException e) {
throw new C3rRuntimeException("Couldn't begin parsing of input file. This is most likely due to the file not being correctly" +
" formatted as " + fileCharset + ". Please review the stack trace for more details.", e);
}
}
/**
* Gets the parser set up to read a CSV file with header rows.
* - Gets headers from file to use for mapping values
* - Sets up null value conversion if needed
*
* @param state Parser configuration
* @return Name of all columns
* @throws C3rRuntimeException If the file can't be read
*/
private List<ColumnHeader> setupForHeaderFileParsing(final ParserConfiguration state) {
// This file has headers, read them from file
beginParsing(parser, reader, fileCharset);
final String[] headerStrings;
final List<ColumnHeader> headers;
if (skipHeaderNormalization) {
headerStrings = extractHeadersWithoutNormalization(sourceName, fileCharset);
headers = Arrays.stream(headerStrings).map(ColumnHeader::ofRaw).collect(Collectors.toList());
} else {
headerStrings = executeTextParsing(() -> parser.getRecordMetadata().headers());
if (headerStrings == null) {
throw new C3rRuntimeException(String.format("Unable to read headers from %s", sourceName));
}
headers = Arrays.stream(headerStrings).map(ColumnHeader::new).collect(Collectors.toList());
}
// Set null value if needed
if (state.toNullConversionRequired) {
executeTextParsing(() -> parser.getRecordMetadata().convertFields(Conversions.toNull(state.nullValue)).set(headerStrings));
}
return headers;
}
/**
* Sets up the parser for reading a CSV file without headers.
*
* @param state Parser configuration
* @param headers Names for unspecified columns
* @return Name of all columns
* @throws C3rRuntimeException If unable to access CSV file or a parse error occurs
*/
private List<ColumnHeader> setupForNoHeaderFileParsing(final ParserConfiguration state, final List<ColumnHeader> headers) {
// Check to make sure CSV column count matches what we think we have
// This uses a unique reader for this to avoid extraneous error handling by using file markers and rewinding the file
try (Reader columnCounter = new InputStreamReader(new FileInputStream(sourceName), ((InputStreamReader) reader).getEncoding())) {
beginParsing(parser, columnCounter, fileCharset);
setNullValueState(state);
final Record record = executeTextParsing(parser::parseNextRecord);
if (record.getValues().length != state.numberOfColumns) {
throw new C3rRuntimeException("Positional table schemas must match the same number of columns as the data. " +
"Expected: " + state.numberOfColumns + ", found: " + record.getValues().length + ".");
}
parser.stopParsing();
} catch (IOException e) {
throw new C3rRuntimeException("Unable to access source file " + sourceName + ".", e);
}
// Change to primary reader and set null value if needed
beginParsing(parser, reader, fileCharset);
setNullValueState(state);
return Collections.unmodifiableList(headers);
}
/**
* Looks at the parser configuration and sets the null value representation for each column.
*
* @param state Parser configuration
*/
private void setNullValueState(final ParserConfiguration state) {
// Set null value if needed
if (state.toNullConversionRequired) {
final ArrayList<Integer> indices = new ArrayList<>();
for (int i = 0; i < state.numberOfColumns; i++) {
indices.add(i);
}
executeTextParsing(() -> parser.getRecordMetadata().convertIndexes(Conversions.toNull(state.nullValue)).set(indices));
}
}
/**
* Stage the next CSV row.
*
* @throws C3rRuntimeException If there's a mismatch between the expected column count and the read column count
*/
protected void refreshNextRow() {
if (closed) {
nextRow = null;
return;
}
final Record record = executeTextParsing(parser::parseNextRecord);
if (record == null) {
nextRow = null;
return;
}
if (record.getValues().length != headers.size()) {
throw new C3rRuntimeException("Column count mismatch at row " + this.getReadRowCount() + " of input file. Expected "
+ headers.size() + " columns, but found " + record.getValues().length + ".");
}
nextRow = new CsvRow();
for (int i = 0; i < headers.size(); i++) {
nextRow.putValue(headers.get(i), new CsvValue(record.getString(i)));
}
}
/**
* Look at the next CSV row.
*
* @return Parsed and normalized CSV values
*/
protected Row<CsvValue> peekNextRow() {
return nextRow;
}
/**
* Close the resources reading the CSV file.
*
* @throws C3rRuntimeException If there's an error closing the file connection
*/
@Override
public void close() {
if (!closed) {
parser.stopParsing();
try {
reader.close();
} catch (IOException e) {
throw new C3rRuntimeException("Error closing connection to CSV file.", e);
}
closed = true;
}
}
/**
* Adds information when the CSV parser encounters an error to give the user more context.
*
* @param executable Call to CsvParser
* @param <T> The type that is returned by the supplier if no text parsing errors occur
* @return A parsed value
* @throws C3rRuntimeException If the data can't be parsed
*/
private static <T> T executeTextParsing(final Supplier<T> executable) {
try {
return executable.get();
} catch (TextParsingException e) {
if (e.getColumnIndex() > MAX_COLUMN_COUNT - 1) {
throw new C3rRuntimeException("Couldn't parse row " + (e.getLineIndex() + 1) + " at column " + (e.getColumnIndex() + 1)
+ " of input file. Please verify that column count does not exceed " + MAX_COLUMN_COUNT, e);
} else {
throw new C3rRuntimeException("An unknown error occurred parsing row " + (e.getLineIndex() + 1) + " at column " +
(e.getColumnIndex() + 1) + " of input file. Please review the stack trace for more details.", e);
}
}
}
/**
* Simple class used to hold the CsvParserSettings from the parsing library plus three other settings we need to properly parse.
*/
private static class ParserConfiguration {
/**
* Univocity parser settings.
*/
private CsvParserSettings csvParserSettings;
/**
* Are we using a custom null value.
*/
private boolean toNullConversionRequired;
/**
* Custom null value, if any.
*/
private String nullValue;
/**
* How many columns are in the table.
*/
private Integer numberOfColumns;
}
}
| 2,618 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io/sql/TableGenerator.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.sql;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.utils.FileUtil;
import lombok.NonNull;
import java.io.File;
import java.io.IOException;
import java.nio.file.InvalidPathException;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Set;
/**
* Creates a SQL table database file.
*/
public abstract class TableGenerator {
/**
* Default name for database file.
*/
public static final String DEFAULT_TABLE_NAME = "c3rTmp";
/**
* Creates the temporary SQL table used for data processing. If the table is found to already exist, it is deleted before being
* recreated.
*
* @param schema User-provided schema for table to be generated
* @param nonceHeader The column name used for storing nonces
* @param tempDir The directory where the temporary SQL table will be created
* @return A Connection to the table
* @throws C3rRuntimeException If the temporary database file couldn't be created
*/
public static SqlTable initTable(final TableSchema schema, final ColumnHeader nonceHeader,
final String tempDir) {
// Clean up tmp table if it existed already
final File tempDbFile = initTableFile(tempDir);
try {
final SqlTable sqlTable = new SqlTable(tempDbFile);
final Statement stmt = sqlTable.getConnection().createStatement();
final String sql = getTableSchemaFromConfig(stmt, schema, nonceHeader);
stmt.execute(sql);
// Disable journaling to avoid leaving journal files on disk if there is an exception during execution,
// and set synchronous to OFF for performance.
// Neither should be an issue since our table is ephemeral and is not intended to live or be
// used outside a single program execution.
stmt.execute("PRAGMA synchronous = OFF;");
stmt.execute("PRAGMA journal_mode = OFF;");
stmt.close();
return sqlTable;
} catch (SQLException e) {
throw new C3rRuntimeException("The temporary database used for processing could not be created. File: "
+ tempDbFile.getAbsolutePath(), e);
}
}
/**
* Creates a temporary SQL table file used for data processing.
*
* @param tempDir The directory used for the temporary SQL table
* @return Temporary system file to use as database file
* @throws C3rRuntimeException If the database file could not be created
*/
static File initTableFile(@NonNull final String tempDir) {
try {
final File tempDbFile = File.createTempFile("c3rTmp", ".db", new File(tempDir));
// Set 600 file permissions
FileUtil.setOwnerReadWriteOnlyPermissions(tempDbFile);
// Ensure file is cleaned up when JVM is closed.
tempDbFile.deleteOnExit();
return tempDbFile;
} catch (InvalidPathException | IOException e) {
throw new C3rRuntimeException("The temporary database used for processing could not be created in the temp directory. " +
"Directory: " + tempDir, e);
}
}
/**
* Generates the SQL for creating a table based on the columns that were provided in the TableSchema.
*
* @param stmt The Statement from the Connection allowing all the columns to be properly escaped if necessary
* @param schema The schema used to create the SQL table
* @param nonceHeader The column name for storing nonces
* @return A SQL string for creating the dynamic table
* @throws C3rRuntimeException If SQL table could not be created
*/
static String getTableSchemaFromConfig(final Statement stmt, final TableSchema schema,
final ColumnHeader nonceHeader) {
try {
final StringBuilder sb = new StringBuilder();
// NOTE: we do not declare the nonce column to be a PRIMARY KEY up front to increase performance
// for large numbers of inserts, and instead we later make a UNIQUE INDEX on the nonce
// _after_ all the data is loaded. (See `getIndexStatement` in this file).
sb.append("CREATE TABLE ").append(DEFAULT_TABLE_NAME)
.append(" (\n")
.append(stmt.enquoteIdentifier(nonceHeader.toString(), false))
.append(" TEXT");
for (ColumnSchema columnSchema : schema.getColumns()) {
sb.append(",\n").append(stmt.enquoteIdentifier(columnSchema.getInternalHeader().toString(), true)).append(" TEXT");
}
sb.append(")");
return sb.toString();
} catch (SQLException e) {
throw new C3rRuntimeException("Invalid SQL identifier encountered.", e);
}
}
/**
* Generate the SQL for querying a column for duplicate non-null entries.
*
* @param stmt SQL Statement for identifier formatting
* @param columnHeader Header for column to check for duplicates
* @return The string-encoded SQL query
* @throws C3rRuntimeException If an error occurs formatting the column header name
*/
public static String getDuplicatesInColumnStatement(final Statement stmt, final ColumnHeader columnHeader) {
try {
final StringBuilder sb = new StringBuilder();
final String quotedColumnHeader = stmt.enquoteIdentifier(columnHeader.toString(), true);
sb.append("SELECT ")
.append(quotedColumnHeader)
.append(" FROM ")
.append(stmt.enquoteIdentifier(TableGenerator.DEFAULT_TABLE_NAME, true))
.append(" GROUP BY ")
.append(quotedColumnHeader)
.append(" HAVING COUNT(")
.append(quotedColumnHeader) // using column name here instead of `*` excludes NULL entries
.append(") > 1");
return sb.toString();
} catch (SQLException e) {
throw new C3rRuntimeException("Invalid SQL identifier encountered.", e);
}
}
/**
* Generate a fresh/unused header name.
*
* @param usedNames {@code ColumnHeader}s already in use
* @param nameBase Name to use if available. Serves as base for freshly generated name if already in use
* @return A unique header name
*/
public static ColumnHeader generateUniqueHeader(@NonNull final Set<ColumnHeader> usedNames, @NonNull final String nameBase) {
final ColumnHeader defaultNonceHeader = new ColumnHeader(nameBase);
// Generate a unique name for the nonce column that does not clash with any other column names,
// erring on the side of caution by avoiding both source and target headers (so we are guaranteed
// uniqueness regardless of how data is transformed).
// guarantee nonceHeader is unique
ColumnHeader nonceHeader = defaultNonceHeader;
int n = 0;
while (usedNames.contains(nonceHeader)) {
nonceHeader = new ColumnHeader(defaultNonceHeader.toString() + n);
n++;
}
return nonceHeader;
}
/**
* Generate an SQL statement to create a covering UNIQUE INDEX for each column, leading with the nonce on the left,
* so it is the primary key of the index.
*
* @param stmt Statement for escaping identifiers
* @param schema Original table schema (used to guarantee freshness of index name)
* @param nonceHeader Which column has the nonces
* @return A `CREATE UNIQUE INDEX ...` statement for the nonce column
* @throws C3rRuntimeException If an error is encountered using the Statement to enquote IDs
*/
public static String getCoveringIndexStatement(final Statement stmt, final TableSchema schema, final ColumnHeader nonceHeader) {
final Set<ColumnHeader> usedHeaders = schema.getSourceAndTargetHeaders();
usedHeaders.add(nonceHeader);
final ColumnHeader nonceIndexHeader = TableGenerator.generateUniqueHeader(usedHeaders, "row_nonce_idx");
try {
// NOTE: We lead with the nonce column on the left-most side of the index, since per
// https://www.sqlite.org/queryplanner.html "The left-most column is the primary key used for ordering
// the rows in the index."
final StringBuilder sb = new StringBuilder();
sb.append("CREATE UNIQUE INDEX ")
.append(stmt.enquoteIdentifier(nonceIndexHeader.toString(), true))
.append(" ON ")
.append(stmt.enquoteIdentifier(TableGenerator.DEFAULT_TABLE_NAME, true))
.append("(")
.append(stmt.enquoteIdentifier(nonceHeader.toString(), true));
// We include the other columns in the index since (a) it does not affect the ordering (the nonce is
// both unique and leftmost, so the other columns will never affect the ordering) and (b) this
// increases the performance of fetching the entire rows in order, since all the content is
// co-located in the index and fewer database fetches have to be performed than when _only_
// indexing on the nonce.
for (ColumnSchema columnSchema : schema.getColumns()) {
sb.append(", ").append(stmt.enquoteIdentifier(columnSchema.getTargetHeader().toString(), true));
}
sb.append(");");
return sb.toString();
} catch (SQLException e) {
throw new C3rRuntimeException("Invalid SQL identifier encountered.", e);
}
}
}
| 2,619 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io/sql/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Support classes for managing row-based access to a SQL database.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*
* @see com.amazonaws.c3r.io.SqlRowReader
* @see com.amazonaws.c3r.io.SqlRowWriter
*/
package com.amazonaws.c3r.io.sql; | 2,620 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/io/sql/SqlTable.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.sql;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import lombok.Getter;
import java.io.File;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
/**
* A SQL connection and the underlying database file.
*/
@Getter
public class SqlTable {
/**
* Connection to the SQL database for this session.
*/
private final Connection connection;
/**
* SQL database file.
*/
private final File databaseFile;
/**
* Creates a connection to a SQL database using the file as the database source.
*
* @param databaseFile File to use as a SQL database
* @throws C3rRuntimeException If there's an error accessing file
*/
public SqlTable(final File databaseFile) {
try {
this.connection = DriverManager.getConnection("jdbc:sqlite:" + databaseFile.getAbsolutePath());
} catch (SQLException e) {
throw new C3rRuntimeException("Could not access SQL database.", e);
}
this.databaseFile = databaseFile;
}
}
| 2,621 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/internal/InitializationVector.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.internal;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
/**
* The initialization data that is unique to the data field instance.
*/
public class InitializationVector {
/**
* Length of IVs.
*/
public static final int IV_BYTE_LENGTH = 12;
/**
* The IV value.
*/
private byte[] bytes;
/**
* Set the initialization vector to the specified value.
*
* @param bytes Value to use as IV
*/
public InitializationVector(final byte[] bytes) {
if (bytes != null) {
this.bytes = bytes.clone();
}
validate();
}
/**
* Derives the IV for a given column and nonce.
*
* @param label The label of the column the IV is being generated for
* @param nonce The nonce for the row the IV is being generated for
* @return an IV unique to the cell to be encrypted
* @throws C3rIllegalArgumentException If the data provided was incomplete
* @throws C3rRuntimeException If the hash algorithm is not available on this system
*/
public static InitializationVector deriveIv(final String label, final Nonce nonce) {
if (label == null || label.isBlank()) {
throw new C3rIllegalArgumentException("A column label must be provided when generating an IV, but was null or empty.");
}
if (nonce == null) {
throw new C3rIllegalArgumentException("A nonce must be provided when generating an IV, but was null.");
}
final byte[] labelBytes = label.getBytes(StandardCharsets.UTF_8);
final byte[] nonceBytes = nonce.getBytes();
final byte[] buffer = ByteBuffer.allocate(labelBytes.length + nonceBytes.length)
.put(labelBytes)
.put(nonceBytes)
.array();
final MessageDigest messageDigest;
try {
messageDigest = MessageDigest.getInstance(KeyUtil.HASH_ALG);
} catch (NoSuchAlgorithmException e) {
throw new C3rRuntimeException("Requested algorithm `" + KeyUtil.HASH_ALG + "` is not available!", e);
}
final byte[] hash = messageDigest.digest(buffer);
final byte[] iv = new byte[InitializationVector.IV_BYTE_LENGTH];
if (hash != null) {
ByteBuffer.wrap(hash).get(iv);
}
return new InitializationVector(iv);
}
/**
* Get a copy of the IV.
*
* @return IV value
*/
public byte[] getBytes() {
return bytes.clone();
}
/**
* Verifies an IV was specified, and it is of the required length.
*
* @throws C3rIllegalArgumentException If the data provided for IV is invalid
*/
private void validate() {
if (bytes == null) {
throw new C3rIllegalArgumentException("An IV may not be null.");
} else if (bytes.length != IV_BYTE_LENGTH) {
throw new C3rIllegalArgumentException(
"An IV must be " + IV_BYTE_LENGTH + " bytes in length, but was " + bytes.length + " bytes.");
}
}
}
| 2,622 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/internal/AdditionalAuthenticatedData.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.internal;
/**
* Stores AAD for use during operation to confirm origin and authenticity of data.
*/
public class AdditionalAuthenticatedData {
/**
* AAD value.
*/
private byte[] bytes;
/**
* Stores a value used to verify data is from the expected source.
*
* @param bytes Value to identify your data as authentic
*/
public AdditionalAuthenticatedData(final byte[] bytes) {
if (bytes != null) {
this.bytes = bytes.clone();
}
}
/**
* Get the AAD value.
*
* @return AAD
*/
public byte[] getBytes() {
if (bytes != null) {
return bytes.clone();
}
return null;
}
}
| 2,623 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/internal/Validatable.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.internal;
/**
* To be implemented by any class that requires a validation step after being constructed/deserialized.
*/
public interface Validatable {
/**
* Checks type instance for validity. Used to ensure consistent construction of data types between GSON and Java generated instances.
*/
void validate();
}
| 2,624 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/internal/Nonce.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.internal;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import java.security.SecureRandom;
/**
* Random number used for various cryptographic operations.
*/
public class Nonce {
/**
* Length of nonce in bytes.
*/
public static final int NONCE_BYTE_LENGTH = 32;
/**
* Use the most secure CSPRNG available on the system to generate random numbers.
*/
private static final SecureRandom NONCE_GENERATOR = new SecureRandom();
/**
* Random number.
*/
private byte[] bytes;
/**
* Store a copy of an already existing nonce.
*
* @param bytes Random value
*/
public Nonce(final byte[] bytes) {
if (bytes != null) {
this.bytes = bytes.clone();
}
validate();
}
/**
* Generates a cryptographically secure nonce of {@value Nonce#NONCE_BYTE_LENGTH} bytes.
*
* @return A nonce consisting of random bytes
*/
public static Nonce nextNonce() {
final byte[] nonceBytes = new byte[Nonce.NONCE_BYTE_LENGTH];
NONCE_GENERATOR.nextBytes(nonceBytes);
return new Nonce(nonceBytes);
}
/**
* Get a copy of the value.
*
* @return Unique copy of nonce
*/
public byte[] getBytes() {
return bytes.clone();
}
/**
* Validates that the nonce is not {@code null} and is of the required length {@value #NONCE_BYTE_LENGTH} bytes.
*
* @throws C3rIllegalArgumentException If the nonce does not meet requirements
*/
private void validate() {
if (bytes == null) {
throw new C3rIllegalArgumentException("A Nonce may not be null.");
} else if (bytes.length != NONCE_BYTE_LENGTH) {
throw new C3rIllegalArgumentException("An Nonce must be " + NONCE_BYTE_LENGTH + " bytes in length, but was " + bytes.length
+ " bytes.");
}
}
}
| 2,625 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/internal/Limits.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.internal;
import java.util.regex.Pattern;
/**
* Contains limits required for cryptographic guarantees
* and other global correctness properties in a single place
* for auditing.
*/
public final class Limits {
/**
* Max number of columns allowed in an output encrypted table.
*/
public static final int ENCRYPTED_OUTPUT_COLUMN_COUNT_MAX = 1600;
/**
* Max number of encrypted rows across all tables from all providers (2^41).
*/
public static final long ROW_COUNT_MAX = 2199023255552L;
/**
* Limit on header length (restricted by Glue).
*
* @deprecated This constant is no longer used - see {@link Limits#AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH} instead.
*/
@Deprecated
public static final int GLUE_MAX_HEADER_UTF8_BYTE_LENGTH = 255;
/**
* Valid characters used for headers in Glue.
*
* @deprecated This constant is no longer used - see {@link Limits#AWS_CLEAN_ROOMS_HEADER_REGEXP} instead.
*/
@Deprecated
// Checkstyle doesn't like the escape characters in this string, but it is verbatim from the GLUE docs
// and so it seems valuable to keep it as-is, so it's a 1-to-1 match.
// CHECKSTYLE:OFF
public static final String GLUE_VALID_HEADER_REGEXP = "[\u0020-\uD7FF\uE000-\uFFFD\uD800\uDC00-\uDBFF\uDFFF\t]*";
// CHECKSTYLE:ON
/**
* Limit on header length (restricted by AWS Clean Rooms).
*
* @see <a href="https://docs.aws.amazon.com/clean-rooms/latest/apireference/API_Column.html">AWS Clean Rooms API Reference</a>
*/
public static final int AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH = 128;
/**
* Valid pattern for headers an AWS Clean Rooms header.
*
* @see <a href="https://docs.aws.amazon.com/clean-rooms/latest/apireference/API_Column.html">AWS Clean Rooms API Reference</a>
*/
public static final Pattern AWS_CLEAN_ROOMS_HEADER_REGEXP =
Pattern.compile("[a-z0-9_](([a-z0-9_ ]+-)*([a-z0-9_ ]+))?");
/**
* Hidden utility class constructor.
*/
private Limits() {
}
} | 2,626 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/internal/PadUtil.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.internal;
import com.amazonaws.c3r.encryption.EncryptionContext;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import java.nio.ByteBuffer;
import java.util.Arrays;
/**
* Utility class for managing message padding.
*/
public abstract class PadUtil {
/**
* Max size of any cleartext bytes, post-padding, if being encrypted.
*/
public static final int MAX_PADDED_CLEARTEXT_BYTES = 12000;
/**
* Max pad length.
*/
public static final int MAX_PAD_BYTES = 10000;
/**
* Number of bytes used to encode the size of the padding, even if there is no padding.
*/
static final int PAD_LENGTH_BYTES = Short.BYTES;
/**
* Uses the EncryptionContext to append a pad to a message
* between 0 and {@value PadUtil#MAX_PAD_BYTES} bytes long.
*
* <p>
* The final padded message may not be longer than {@value PadUtil#MAX_PADDED_CLEARTEXT_BYTES}
*
* <p>
* The final padded message will always have {@value PadUtil#PAD_LENGTH_BYTES} appended to it in order to store the length of the pad.
* These extra bytes do not count towards the {@value PadUtil#MAX_PADDED_CLEARTEXT_BYTES}.
*
* <p>
* Padding types:
* <ul>
* <li>MAX - pads message to the total length of {@link EncryptionContext#getMaxValueLength} plus
* {@link EncryptionContext#getPadLength}</li>
* <li>FIXED - pads message to a specified length of {@link EncryptionContext#getPadLength}</li>
* <li>NONE - do not append any padding</li>
* </ul>
*
* @param encryptionContext The EncryptionContext for the column
* @param message The message to be padded
* @return The message padded with a random byte sequence followed by a byte containing the padding size
* @throws C3rIllegalArgumentException If the padding could not be created with provided data and info
*/
public static byte[] padMessage(final byte[] message, final EncryptionContext encryptionContext) {
if (encryptionContext == null) {
throw new C3rIllegalArgumentException("An EncryptionContext must be provided when padding.");
}
final byte[] nullSafeMessage = (message == null) ? new byte[0] : message;
final int paddingLength;
switch (encryptionContext.getPadType()) {
// MAX and FIXED use the same logic here, as the EncryptionContext
// reasons differently in `getTargetPaddedLength` on how much to pad.
case MAX:
case FIXED:
paddingLength = encryptionContext.getTargetPaddedLength() - nullSafeMessage.length;
final String baseError = "Error padding values for target column `" + encryptionContext.getColumnLabel() + "`:";
if (paddingLength < 0) {
// The message to be padded doesn't have the room to be padded to the fixed length
throw new C3rIllegalArgumentException(
baseError + " No room for padding! Target padding length is "
+ encryptionContext.getTargetPaddedLength()
+ " bytes but message is already " + nullSafeMessage.length + " bytes long.");
}
if (encryptionContext.getTargetPaddedLength() > MAX_PADDED_CLEARTEXT_BYTES) {
// The target message size exceeds the maximum
throw new C3rIllegalArgumentException(
baseError + " No room for padding! Target padding length is "
+ encryptionContext.getTargetPaddedLength()
+ " bytes but maximum padded size is " + (MAX_PADDED_CLEARTEXT_BYTES) + " bytes long.");
}
if (encryptionContext.getPadLength() < 0 || encryptionContext.getPadLength() > MAX_PAD_BYTES) {
// The target padding size exceeds the maximum
throw new C3rIllegalArgumentException(
baseError + " Padding length invalid! Padding length is "
+ encryptionContext.getPadLength()
+ " bytes but must be within the range of 0 to " + MAX_PAD_BYTES + " bytes long.");
}
break;
case NONE:
default:
paddingLength = 0;
}
final byte[] pad = generatePad(paddingLength);
final ByteBuffer paddedMessage = ByteBuffer.allocate(nullSafeMessage.length + pad.length + PAD_LENGTH_BYTES);
paddedMessage.put(nullSafeMessage);
paddedMessage.put(pad);
paddedMessage.putShort((short) paddingLength);
return paddedMessage.array();
}
/**
* Creates a pad of the specified length. Value can be constant as this will be encrypted.
*
* @param padLength The size of the pad to be created
* @return The pad of random bytes of length padLength
*/
static byte[] generatePad(final int padLength) {
final byte[] padFill = new byte[padLength];
Arrays.fill(padFill, (byte) 0x00);
return padFill;
}
/**
* Removes the pad from a padded message.
*
* @param paddedMessage The message with padding
* @return The unpadded message
*/
public static byte[] removePadding(final byte[] paddedMessage) {
final ByteBuffer message = ByteBuffer.wrap(paddedMessage);
// Last 2 bytes contain the length of the padding
final int padLength = message.getShort(paddedMessage.length - PAD_LENGTH_BYTES);
// Cleartext is the message up until the padding
final byte[] cleartext = new byte[paddedMessage.length - PAD_LENGTH_BYTES - padLength];
message.get(cleartext);
return cleartext;
}
}
| 2,627 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/internal/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Classes that are only used internally in C3R. These are in support of prepping data for a cryptographic operation or
* contain values used to execute those operations.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.internal; | 2,628 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/utils/FileUtil.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.utils;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import lombok.NonNull;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.nio.file.InvalidPathException;
import java.nio.file.Path;
import java.nio.file.attribute.AclEntry;
import java.nio.file.attribute.AclEntryPermission;
import java.nio.file.attribute.AclEntryType;
import java.nio.file.attribute.AclFileAttributeView;
import java.nio.file.attribute.UserPrincipal;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
/**
* Utility class for file related I/O actions.
*/
public final class FileUtil {
/**
* Directory executable is running from.
*/
public static final String CURRENT_DIR = System.getProperty("user.dir");
/**
* System default temporary directory location.
*/
public static final String TEMP_DIR = System.getProperty("java.io.tmpdir");
/**
* Hidden utility class constructor.
*/
private FileUtil() {
}
/**
* Reads bytes from a source file into a String.
*
* @param file source filename
* @return source file content
* @throws C3rIllegalArgumentException If the filepath is invalid
* @throws C3rRuntimeException If there's an error while reading file
*/
public static String readBytes(final String file) {
final Path p;
try {
p = FileSystems.getDefault().getPath(file);
} catch (InvalidPathException e) {
throw new C3rIllegalArgumentException("Failed to open file: " + file + ".");
}
final String bytes;
try {
// Note: Do not replace with `Files.readString()` as your IDE may suggest. Our encrypted values frequently involve
// padding (`=` characters at the end of the string) to make the binary values be even blocks of four. `Files.readString()`
// tries to convert the entire string, including the padding into UTF-8 which generates a malformed input exception.
// The constructor for the String class takes into account padding and removes it (specifically, it always replaces
// malformed input and unmappable character sequences with this charset's default replacement string). This means it always
// decodes the value, regardless of padding. If you do change to `Files.readString()`, unit tests will fail due to the errors.
bytes = new String(Files.readAllBytes(p), StandardCharsets.UTF_8);
} catch (IOException e) {
throw new C3rRuntimeException("Failed to read file: " + file + ".");
}
return bytes;
}
/**
* Checks if a location is readable and a file.
*
* @param location Filepath
* @throws C3rIllegalArgumentException If the filepath is empty, points to an invalid location or is not readable
*/
public static void verifyReadableFile(final String location) {
if (location.isBlank()) {
throw new C3rIllegalArgumentException("File path is empty.");
}
// Check that it's a file, not a directory, in a readable location
final Path inFile = Path.of(location);
if (Files.isDirectory(inFile)) {
throw new C3rIllegalArgumentException("Cannot read from file `" + location + "`. File is a directory.");
} else if (Files.notExists(inFile)) {
throw new C3rIllegalArgumentException("Cannot read from file `" + location + "`. File does not exist.");
} else if (!Files.isReadable(inFile)) {
throw new C3rIllegalArgumentException("Cannot read from file `" + location + "`. Permission denied.");
}
}
/**
* Checks if a location is readable and a directory.
*
* @param location Filepath
* @throws C3rIllegalArgumentException If the filepath is empty, points to an invalid location or is not readable
*/
public static void verifyReadableDirectory(final String location) {
if (location.isBlank()) {
throw new C3rIllegalArgumentException("File path is empty.");
}
// Check that it's a directory in a readable location
final Path inFile = Path.of(location);
if (!Files.isDirectory(inFile)) {
throw new C3rIllegalArgumentException("Cannot read from directory `" + location + "`. File is not a directory.");
} else if (Files.notExists(inFile)) {
throw new C3rIllegalArgumentException("Cannot read from directory `" + location + "`. File does not exist.");
} else if (!Files.isReadable(inFile)) {
throw new C3rIllegalArgumentException("Cannot read from directory `" + location + "`. Permission denied.");
}
}
/**
* Checks if a location is writable. If the file already exists, the location is only considered writable if the
* {@code override} flag is {@code true}. Directories are not considered files.
*
* @param location Filepath to check
* @param overwrite Indicates if we can overwrite an existing file
* @throws C3rIllegalArgumentException If the filepath is empty, points to an invalid location or is not a writable location
*/
public static void verifyWritableFile(final String location, final boolean overwrite) {
if (location.isBlank()) {
throw new C3rIllegalArgumentException("File path is empty.");
}
// Check if file exists and can be written to
final Path outFile = Path.of(location);
if (Files.exists(outFile) && !overwrite) {
throw new C3rIllegalArgumentException(
"Cannot write to file `" + location + "`. File already exists and overwrite flag is false.");
} else if (Files.isDirectory(outFile)) {
throw new C3rIllegalArgumentException("Cannot write to file `" + location + "`. File is a directory.");
} else if (Files.exists(outFile) && !Files.isWritable(outFile)) {
throw new C3rIllegalArgumentException("Cannot write to file `" + location + "`. Permission denied.");
}
}
/**
* Checks if a location is a writable directory. Directory must already exist.
*
* @param location Filepath to check
* @throws C3rIllegalArgumentException If the filepath is empty, not a directory or is not a writable location
*/
public static void verifyWritableDirectory(final String location) {
verifyWritableDirectory(location, true);
}
/**
* Checks if a location is a writable directory. Directory must already exist.
*
* @param location Filepath to check
* @param overwrite Indicates if we can overwrite an existing path
* @throws C3rIllegalArgumentException If the filepath is empty, not a directory or is not a writable location
*/
public static void verifyWritableDirectory(final String location, final boolean overwrite) {
if (location.isBlank()) {
throw new C3rIllegalArgumentException("File path is empty.");
}
// Check that it's a writeable directory
final Path outFileDirectory = Path.of(location);
if (Files.exists(outFileDirectory) && !overwrite) {
throw new C3rIllegalArgumentException(
"Cannot write to path `" + location + "`. path already exists and overwrite flag is false.");
} else if (Files.exists(outFileDirectory) && !Files.isDirectory(outFileDirectory)) {
throw new C3rIllegalArgumentException("Cannot write to path `" + location + "`. Path is not a directory.");
} else if (Files.exists(outFileDirectory) && !Files.isWritable(outFileDirectory)) {
throw new C3rIllegalArgumentException("Cannot write to path `" + location + "`. Permission denied.");
}
}
/**
* Creates the file passed in if it does not exist and sets RW permissions only for the owner.
*
* @param fileName the file to be created
* @throws C3rRuntimeException If the file could not be initialized
*/
public static void initFileIfNotExists(final String fileName) {
final Path file = Path.of(fileName);
if (!Files.exists(file)) {
// create the target file and set RO permissions
try {
Files.createFile(file);
setOwnerReadWriteOnlyPermissions(file.toFile());
} catch (IOException e) {
throw new C3rRuntimeException("Cannot initialize file: " + fileName, e);
}
}
}
/**
* Creates the directory passed in if it does not exist and sets RW permissions only for the owner.
*
* @param directoryName the directory to be created
* @throws C3rRuntimeException If the file could not be initialized
*/
public static void initDirectoryIfNotExists(final String directoryName) {
final Path file = Path.of(directoryName);
if (!Files.exists(file)) {
// create the target file and set RO permissions
try {
Files.createDirectory(file);
} catch (IOException e) {
throw new C3rRuntimeException("Cannot initialize directory: " + directoryName, e);
}
}
}
/**
* Sets permissions to RW for the owner only on the passed file.
*
* @param file Change file permissions at this location
* @throws C3rRuntimeException If the file permissions couldn't be set
*/
public static void setOwnerReadWriteOnlyPermissions(@NonNull final File file) {
if (isWindows()) {
setWindowsFilePermissions(file.toPath(), AclEntryType.ALLOW, Set.of(AclEntryPermission.READ_DATA,
AclEntryPermission.WRITE_DATA));
} else {
boolean success = file.setWritable(false, false);
success &= file.setWritable(true, true);
success &= file.setReadable(false, false);
success &= file.setReadable(true, true);
success &= file.setExecutable(false, false);
if (!success) {
throw new C3rRuntimeException("Unable to set permissions on file: " + file.getPath());
}
}
}
/**
* Set the file to the specified permissions on a computer running the Windows operating system.
*
* @param path Change file permissions at this location
* @param type Access control list entry type
* @param permissions Requested permissions
* @throws C3rRuntimeException If the file permissions couldn't be set
*/
static void setWindowsFilePermissions(final Path path, final AclEntryType type, final Set<AclEntryPermission> permissions) {
try {
final UserPrincipal owner = Files.getOwner(path);
final AclEntry entry = AclEntry.newBuilder()
.setType(type)
.setPrincipal(owner)
.setPermissions(permissions)
.build();
final AclFileAttributeView view = Files.getFileAttributeView(path, AclFileAttributeView.class);
final List<AclEntry> acl = view.getAcl() == null ? new ArrayList<>() : view.getAcl();
acl.add(0, entry);
Files.setAttribute(path, "acl:acl", acl);
} catch (IOException e) {
throw new C3rRuntimeException("Could not set file permissions for file: " + path, e);
}
}
/**
* Checks if the system is running the Windows operating system.
*
* @return {@code true} if the OS is Windows
*/
public static boolean isWindows() {
return System.getProperty("os.name").startsWith("Windows");
}
}
| 2,629 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/utils/C3rSdkProperties.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.utils;
import software.amazon.awssdk.core.ApiName;
/**
* Utility class for storing C3R-wide constants.
*/
public final class C3rSdkProperties {
/**
* C3R version.
*/
public static final String VERSION = "2.0.0";
/**
* C3R SDK user agent.
*/
public static final ApiName API_NAME = ApiName.builder().name("c3r-sdk").version(VERSION).build();
/**
* Hidden utility class constructor.
*/
private C3rSdkProperties() {
}
}
| 2,630 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/utils/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Utility classes that contain commonly used functionality across components.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.utils; | 2,631 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/action/CsvRowMarshaller.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.action;
import com.amazonaws.c3r.Transformer;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.EncryptConfig;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.CsvRowFactory;
import com.amazonaws.c3r.data.CsvValue;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.CsvRowReader;
import com.amazonaws.c3r.io.CsvRowWriter;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.io.RowReader;
import com.amazonaws.c3r.io.RowWriter;
import lombok.Builder;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Used to instantiate an instance of {@link RowMarshaller} that handles CSV data. {@link RowMarshaller} provides all the functionality
* except for creating the CSV file reader ({@link CsvRowReader}), writer ({@link CsvRowWriter}) and {@link CsvRowFactory} which is done
* here.
*/
@Slf4j
public final class CsvRowMarshaller {
/**
* Utility class, hide default constructor.
*/
private CsvRowMarshaller() {
}
/**
* Creates an instance of the marshaller based off of an {@link EncryptConfig}. Verifies the input file appears to contain CSV data
* before continuing.
*
* @param config Configuration information on how data will be transformed, file locations, etc.
* @return CSV data marshaller
* @throws C3rIllegalArgumentException If non-CSV data was found to be in the file
* @see EncryptConfig
*/
public static RowMarshaller<CsvValue> newInstance(@NonNull final EncryptConfig config) {
if (config.getFileFormat() != FileFormat.CSV) {
throw new C3rIllegalArgumentException("Expected a CSV encryption configuration, but found "
+ config.getFileFormat() + " encryption configuration instead.");
}
return CsvRowMarshaller.builder()
.sourceFile(config.getSourceFile())
.targetFile(config.getTargetFile())
.tempDir(config.getTempDir())
.inputNullValue(config.getCsvInputNullValue())
.outputNullValue(config.getCsvOutputNullValue())
.settings(config.getSettings())
.schema(config.getTableSchema())
.transforms(Transformer.initTransformers(config))
.build();
}
/**
* Creates an instance of the marshaller where each setting is specified individually.
*
* @param sourceFile Input CSV data file location
* @param targetFile Where to write CSV data
* @param tempDir Where to write temporary files if needed
* @param inputNullValue What the CSV input file uses to indicate {@code null}
* @param outputNullValue What the output CSV file should use to indicate {@code null}
* @param settings Cryptographic settings for the clean room
* @param schema Specification of how data in the input file will be transformed into encrypted data in the output file
* @param transforms Cryptographic transforms that are possible to use
* @return CSV data marshaller
*/
@Builder
private static RowMarshaller<CsvValue> newInstance(
@NonNull final String sourceFile,
@NonNull final String targetFile,
@NonNull final String tempDir,
final String inputNullValue,
final String outputNullValue,
@NonNull final ClientSettings settings,
@NonNull final TableSchema schema,
@NonNull final Map<ColumnType, Transformer> transforms) {
final RowReader<CsvValue> reader = CsvRowReader.builder().sourceName(sourceFile).inputNullValue(inputNullValue)
.externalHeaders(schema.getPositionalColumnHeaders()).build();
final List<ColumnHeader> targetHeaders = schema.getColumns().stream().map(ColumnSchema::getTargetHeader)
.collect(Collectors.toList());
final RowWriter<CsvValue> writer = CsvRowWriter.builder()
.targetName(targetFile)
.outputNullValue(outputNullValue)
.headers(targetHeaders)
.build();
validate(outputNullValue, schema);
return RowMarshaller.<CsvValue>builder()
.settings(settings)
.schema(schema)
.tempDir(tempDir)
.inputReader(reader)
.rowFactory(new CsvRowFactory())
.outputWriter(writer)
.transformers(transforms)
.build();
}
/**
* Verifies that settings are consistent.
* - Make sure that if csvOutputNULLValue is set, at least one cleartext target column exists
*
* @param outputNullValue What the output CSV file should use to indicate {@code null}
* @param schema Specification of how data in the input file will be transformed into encrypted data in the output file
*/
static void validate(final String outputNullValue, @NonNull final TableSchema schema) {
if (outputNullValue == null) {
return; // No custom null value was set
}
final boolean cleartextColumnExists = schema.getColumns().stream()
.anyMatch(columnSchema -> columnSchema.getType() == ColumnType.CLEARTEXT);
if (!cleartextColumnExists) {
log.warn("Received a custom output null value `" + outputNullValue + "`, but no cleartext columns were found. It will be " +
"ignored.");
}
}
}
| 2,632 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/action/CsvRowUnmarshaller.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.action;
import com.amazonaws.c3r.Transformer;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.DecryptConfig;
import com.amazonaws.c3r.data.CsvRowFactory;
import com.amazonaws.c3r.data.CsvValue;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.CsvRowReader;
import com.amazonaws.c3r.io.CsvRowWriter;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.io.RowReader;
import com.amazonaws.c3r.io.RowWriter;
import lombok.Builder;
import lombok.NonNull;
import java.util.Map;
/**
* Used to instantiate an instance of {@link RowUnmarshaller} that handles CSV data. {@link RowUnmarshaller} provides all the functionality
* except for creating the CSV file reader ({@link CsvRowReader}), writer ({@link CsvRowWriter}) and {@link CsvRowFactory} which is done
* here.
*/
public final class CsvRowUnmarshaller {
/**
* Utility class, hide default constructor.
*/
private CsvRowUnmarshaller() {
}
/**
* Creates an instance of the marshaller based off of an {@link DecryptConfig}. Verifies the input file appears to contain CSV data
* before continuing.
*
* @param config Configuration information on how data will be transformed, file locations, etc.
* @return CSV data unmarshaller
* @throws C3rIllegalArgumentException If non-CSV data was found to be in the file
* @see DecryptConfig
*/
public static RowUnmarshaller<CsvValue> newInstance(@NonNull final DecryptConfig config) {
if (config.getFileFormat() != FileFormat.CSV) {
throw new C3rIllegalArgumentException("Expected a CSV decryption configuration, but found "
+ config.getFileFormat() + " decryption configuration instead.");
}
return CsvRowUnmarshaller.builder()
.sourceFile(config.getSourceFile())
.targetFile(config.getTargetFile())
.csvInputNullValue(config.getCsvInputNullValue())
.csvOutputNullValue(config.getCsvOutputNullValue())
.transformers(Transformer.initTransformers(config))
.build();
}
/**
* Creates an instance of a CSV row unmarshaller based off of individually specified settings.
*
* @param sourceFile Input CSV file location
* @param targetFile Where to write CSV data
* @param csvInputNullValue What the CSV input file uses to indicate {@code null}
* @param csvOutputNullValue What the output CSV file should use to indicate {@code null}
* @param transformers Cryptographic transforms that are possible to use
* @return CSV data unmarshaller
*/
@Builder
private static RowUnmarshaller<CsvValue> newInstance(
@NonNull final String sourceFile,
@NonNull final String targetFile,
final String csvInputNullValue,
final String csvOutputNullValue,
@NonNull final Map<ColumnType, Transformer> transformers) {
final RowReader<CsvValue> reader = CsvRowReader.builder()
.sourceName(sourceFile)
.inputNullValue(csvInputNullValue)
.skipHeaderNormalization(true)
.build();
final RowWriter<CsvValue> writer = CsvRowWriter.builder()
.targetName(targetFile)
.outputNullValue(csvOutputNullValue)
.headers(reader.getHeaders())
.build();
return RowUnmarshaller.<CsvValue>builder()
.inputReader(reader)
.rowFactory(new CsvRowFactory())
.outputWriter(writer)
.transformers(transformers)
.build();
}
}
| 2,633 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/action/RowUnmarshaller.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.action;
import com.amazonaws.c3r.Transformer;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.data.RowFactory;
import com.amazonaws.c3r.data.Value;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.RowReader;
import com.amazonaws.c3r.io.RowWriter;
import lombok.AccessLevel;
import lombok.Builder;
import lombok.Getter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import java.util.Collections;
import java.util.Map;
/**
* Interface for loading and processing records for decryption.
*
* @param <T> Data format
* @see RowReader
* @see RowWriter
*/
@Slf4j
public final class RowUnmarshaller<T extends Value> {
/**
* Used for reading a row of data.
*/
@Getter
private final RowReader<T> inputReader;
/**
* Used for writing a row of data.
*/
@Getter
private final RowWriter<T> outputWriter;
/**
* Creates an empty row to be filled with data.
*/
private final RowFactory<T> rowFactory;
/**
* Cryptographic computation transformers in use for data processing.
*/
private final Map<ColumnType, Transformer> transformers;
/**
* Creates a {@code RowUnmarshaller}. See {@link #unmarshal} for primary functionality.
*
* @param rowFactory Creates new rows for writing out unmarshalled data
* @param inputReader Where marshalled records are read from
* @param outputWriter Where unmarshalled records are written to
* @param transformers The transformers for unmarshalling data
*/
@Builder(access = AccessLevel.PACKAGE)
private RowUnmarshaller(@NonNull final RowFactory<T> rowFactory,
@NonNull final RowReader<T> inputReader,
@NonNull final RowWriter<T> outputWriter,
@NonNull final Map<ColumnType, Transformer> transformers) {
this.rowFactory = rowFactory;
this.inputReader = inputReader;
this.outputWriter = outputWriter;
this.transformers = Collections.unmodifiableMap(transformers);
}
/**
* Reads in records/rows from the given source and outputs it to the given target after applying cryptographic transforms.
*
* @throws C3rRuntimeException If there's an error during decryption
*/
public void unmarshal() {
while (inputReader.hasNext()) {
final Row<T> marshalledRow = inputReader.next();
final Row<T> unmarshalledRow = rowFactory.newRow();
for (ColumnHeader header : marshalledRow.getHeaders()) {
final byte[] valueBytes = marshalledRow.getValue(header).getBytes();
Transformer transformer = transformers.get(ColumnType.CLEARTEXT); // Default to pass through
if (Transformer.hasDescriptor(transformers.get(ColumnType.SEALED), valueBytes)) {
transformer = transformers.get(ColumnType.SEALED);
} else if (Transformer.hasDescriptor(transformers.get(ColumnType.FINGERPRINT), valueBytes)) {
transformer = transformers.get(ColumnType.FINGERPRINT);
}
try {
unmarshalledRow.putBytes(
header,
transformer.unmarshal(valueBytes));
} catch (Exception e) {
throw new C3rRuntimeException("Failed while unmarshalling data for column `"
+ header + "` on row " + inputReader.getReadRowCount() + ". Error message received: " + e.getMessage(), e);
}
}
outputWriter.writeRow(unmarshalledRow);
if (inputReader.getReadRowCount() % RowMarshaller.LOG_ROW_UPDATE_FREQUENCY == 0) {
log.info("{} rows decrypted.", inputReader.getReadRowCount());
}
}
outputWriter.flush();
}
/**
* Closes connections to input source and output target.
*/
public void close() {
inputReader.close();
outputWriter.close();
}
}
| 2,634 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/action/RowMarshaller.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.action;
import com.amazonaws.c3r.Transformer;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnInsight;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.data.RowFactory;
import com.amazonaws.c3r.data.Value;
import com.amazonaws.c3r.data.ValueConverter;
import com.amazonaws.c3r.encryption.EncryptionContext;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Nonce;
import com.amazonaws.c3r.io.RowReader;
import com.amazonaws.c3r.io.RowWriter;
import com.amazonaws.c3r.io.SqlRowReader;
import com.amazonaws.c3r.io.SqlRowWriter;
import com.amazonaws.c3r.io.sql.SqlTable;
import com.amazonaws.c3r.io.sql.TableGenerator;
import lombok.AccessLevel;
import lombok.Builder;
import lombok.Getter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import java.io.IOException;
import java.nio.file.Files;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Interface for loading and processing records for encryption.
*
* @param <T> Data format
* @see RowReader
* @see RowWriter
*/
@Slf4j
public final class RowMarshaller<T extends Value> {
/**
* The number of rows to be read in at a time in order to help protect any columns marked with {@link PadType#MAX} on first pass.
**/
private static final int ROW_BATCH_SIZE = 100;
/**
* Number of inserts into the SQL database should be performed before committing the transaction.
*/
static final int INSERTS_PER_COMMIT = 100 * ROW_BATCH_SIZE;
/**
* Number of rows to process before logging an update on progress.
*/
static final int LOG_ROW_UPDATE_FREQUENCY = 10 * INSERTS_PER_COMMIT;
/**
* Cryptographic settings for the clean room.
*/
private final ClientSettings settings;
/**
* Name of nonce column.
*/
@Getter
private final ColumnHeader nonceHeader;
/**
* Stores information about data processed so far for each input column.
*/
private final Map<ColumnHeader, List<ColumnInsight>> sourceMappedColumnInsights;
/**
* Stores information about data processed so far for each output column.
*/
private final Map<ColumnHeader, ColumnInsight> targetMappedColumnInsights;
/**
* Underlying storage location for information about processed data so {@code sourceMappedColumnInsights} and
* {@code targetMappedColumnInsights} don't duplicate information or get out of sync.
*/
@Getter
private final Collection<ColumnInsight> columnInsights;
/**
* Used for reading a row of data.
*/
@Getter
private final RowReader<T> inputReader;
/**
* Used for writing a row of data.
*/
@Getter
private final RowWriter<T> outputWriter;
/**
* SQL table used for intermediate processing of data when two passes need to be done across the data.
*/
@Getter
private final SqlTable sqlTable;
/**
* Creates an empty row to be filled with data.
*/
private final RowFactory<T> rowFactory;
/**
* Cryptographic computation transformers in use for data processing.
*/
private final Map<ColumnType, Transformer> transformers;
/**
* Description of how input columns are transformed into output columns.
*/
private final TableSchema schema;
/**
* Creates a {@code RowMarshaller} for encrypting data. See {@link #marshal} for primary functionality.
*
* @param settings User-provided clean room settings
* @param schema User-provided table-specific schema
* @param rowFactory Creates new rows for the marshalled data
* @param inputReader Where records are derived from
* @param outputWriter Where records are stored after being marshalled
* @param tempDir Location where temp files may be created while processing the input
* @param transformers Transformers for each type of column
*/
@Builder(access = AccessLevel.PACKAGE)
private RowMarshaller(@NonNull final ClientSettings settings,
@NonNull final TableSchema schema,
@NonNull final RowFactory<T> rowFactory,
@NonNull final RowReader<T> inputReader,
@NonNull final RowWriter<T> outputWriter,
@NonNull final String tempDir,
@NonNull final Map<ColumnType, Transformer> transformers) {
this.settings = settings;
this.columnInsights = schema.getColumns().stream().map(ColumnInsight::new)
.collect(Collectors.toList());
this.sourceMappedColumnInsights = this.columnInsights.stream()
.collect(Collectors.groupingBy(ColumnSchema::getSourceHeader));
this.targetMappedColumnInsights = this.columnInsights.stream()
.collect(Collectors.toMap(ColumnSchema::getTargetHeader, Function.identity()));
this.inputReader = inputReader;
this.nonceHeader = TableGenerator.generateUniqueHeader(schema.getSourceAndTargetHeaders(), "row_nonce");
this.rowFactory = rowFactory;
this.outputWriter = outputWriter;
populateColumnSpecPositions();
this.sqlTable = TableGenerator.initTable(schema, this.nonceHeader, tempDir);
this.transformers = Collections.unmodifiableMap(transformers);
this.schema = schema;
validate();
}
/**
* Update {@link ColumnSchema}s with their source and target column positions.
*
* <p>
* Missing columns will cause an error to be thrown.
*/
private void populateColumnSpecPositions() {
// Maps source column headers to location in data file
final Map<ColumnHeader, Integer> columnPositions = new LinkedHashMap<>();
final List<ColumnHeader> headers = inputReader.getHeaders();
for (int i = 0; i < headers.size(); i++) {
columnPositions.put(headers.get(i), i);
}
columnInsights.forEach(ci -> {
if (columnPositions.containsKey(ci.getSourceHeader())) {
ci.setSourceColumnPosition(columnPositions.get(ci.getSourceHeader()));
}
});
}
/**
* Reads in records/rows from the given source and outputs it to the given target after applying cryptographic transforms.
*/
public void marshal() {
loadInput();
marshalOutput();
}
/**
* Check to see if more than one {@code null} value was seen and whether that should produce an error.
*
* @param insight Information about the data in a particular column
* @param value Check to see if this value is {@code null} and if it violates clean room settings
* @throws C3rRuntimeException If more than one {@code null} entry was found and repeated fingerprint values are not allowed
*/
private void checkForInvalidNullDuplicates(final ColumnInsight insight, final Value value) {
// Manually check for disallowed NULL duplicates because SQLite and many other
// database engines consider NULLs distinct in a UNIQUE column.
if (!settings.isAllowDuplicates()
&& !settings.isPreserveNulls()
&& insight.getType() == ColumnType.FINGERPRINT
&& value.isNull()
&& insight.hasSeenNull()) {
throw new C3rRuntimeException("Source column " + (insight.getSourceColumnPosition() + 1)
+ " cannot be used to construct the target fingerprint column `" + insight.getTargetHeader().toString() + "` because"
+ " the column contains more than one NULL entry"
+ " and the `allowDuplicates` setting is false.");
}
}
/**
* Reads in records/rows from the given source.
*
* @throws C3rRuntimeException If there's an error accessing the SQL database
*/
void loadInput() {
try {
log.debug("Loading data from {}.", inputReader.getSourceName());
long commitFuel = INSERTS_PER_COMMIT;
final long startTime = System.currentTimeMillis();
final RowWriter<T> sqlRowWriter = new SqlRowWriter<>(columnInsights, nonceHeader, sqlTable);
List<Row<T>> batchedRows = new ArrayList<>();
// For bulk operations, we want to explicitly commit the transaction less often for performance.
sqlTable.getConnection().setAutoCommit(false);
while (inputReader.hasNext()) {
final Row<T> sourceRow = inputReader.next();
sourceRow.forEach((column, value) -> {
// Observe and validate current row
if (sourceMappedColumnInsights.containsKey(column)) {
for (var columnInsight : sourceMappedColumnInsights.get(column)) {
checkForInvalidNullDuplicates(columnInsight, value);
columnInsight.observe(value);
}
}
});
batchedRows.add(sourceRow);
// If batch size or end of input is met, write to SQL and reset batch.
if (batchedRows.size() == ROW_BATCH_SIZE || !inputReader.hasNext()) {
writeInputBatchToSql(sqlRowWriter, batchedRows);
commitFuel = commitFuel - batchedRows.size();
batchedRows = new ArrayList<>();
if (commitFuel <= 0) {
sqlTable.getConnection().commit();
commitFuel = INSERTS_PER_COMMIT;
}
}
if (inputReader.getReadRowCount() % LOG_ROW_UPDATE_FREQUENCY == 0) {
log.info("{} rows loaded.", inputReader.getReadRowCount());
}
}
sqlTable.getConnection().commit();
// We've completed our bulk insert, so turn autocommit back on
// so any future one-off commands execute immediately.
sqlTable.getConnection().setAutoCommit(true);
final long endTime = System.currentTimeMillis();
log.debug("Done loading {} rows in {} seconds.", inputReader.getReadRowCount(),
TimeUnit.MILLISECONDS.toSeconds(endTime - startTime));
checkForInvalidDuplicates();
} catch (SQLException e) {
throw new C3rRuntimeException("Error accessing the SQL database.", e);
}
}
/**
* Writes a batch of rows to SQL.
*
* @param sqlRowWriter The writer for the SQL database
* @param batchedRows The rows to be written to the database
* @throws C3rRuntimeException If the transform couldn't be applied to a value or while writing to the database
*/
private void writeInputBatchToSql(final RowWriter<T> sqlRowWriter, final List<Row<T>> batchedRows) {
for (Row<T> sourceRow : batchedRows) {
final Row<T> targetRow = rowFactory.newRow();
final Nonce nonce = Nonce.nextNonce();
sourceRow.forEach((column, value) -> {
// Map source values to each target.
if (sourceMappedColumnInsights.containsKey(column)) {
for (var columnInsight : sourceMappedColumnInsights.get(column)) {
// Marshal sensitive data. Note that PadType.MAX may not be correct at this stage. It will require decrypting and
// re-encrypting when being sent to the final output. In the interim, it is based on the running max byte length,
// sampled in batches.
final Transformer transformer = transformers.get(columnInsight.getType());
final byte[] bytesToMarshall = ValueConverter.getBytesForColumn(value, columnInsight.getType());
final ClientDataType finalType = Objects.requireNonNullElse(columnInsight.getClientDataType(),
ClientDataType.STRING);
final var encryptionContext = new EncryptionContext(columnInsight, nonce, finalType);
try {
targetRow.putBytes(
columnInsight.getTargetHeader(),
transformer.marshal(bytesToMarshall, encryptionContext));
} catch (Exception e) {
throw new C3rRuntimeException("Failed while marshalling data for target column `"
+ encryptionContext.getColumnLabel() + "` on row " + inputReader.getReadRowCount() + ". Error message: "
+ e.getMessage(), e);
}
}
}
});
targetRow.putNonce(nonceHeader, nonce);
try {
sqlRowWriter.writeRow(targetRow);
} catch (Exception e) {
throw new C3rRuntimeException("Failed while marshalling data for row " + inputReader.getReadRowCount() + ". Error message: "
+ e.getMessage(), e);
}
}
}
/**
* Check the SQL database for duplicate values not allowed by client settings.
*
* @throws C3rRuntimeException If there are SQL exceptions or if invalid duplicates are found
*/
private void checkForInvalidDuplicates() {
final List<ColumnHeader> fingerprintColumns = columnInsights.stream()
.filter(ci -> ci.getType() == ColumnType.FINGERPRINT)
.map(ColumnInsight::getInternalHeader)
.collect(Collectors.toList());
if (settings.isAllowDuplicates() || fingerprintColumns.isEmpty()) {
return;
}
log.debug("Checking for duplicates in {} {} columns.", fingerprintColumns.size(), ColumnType.FINGERPRINT);
final long startTime = System.currentTimeMillis();
final List<String> columnsWithDuplicates = new ArrayList<>();
try (Statement stmt = this.sqlTable.getConnection().createStatement()) {
for (var columnHeader : fingerprintColumns) {
final ResultSet duplicates = stmt.executeQuery(TableGenerator.getDuplicatesInColumnStatement(stmt, columnHeader));
if (duplicates.next()) {
columnsWithDuplicates.add(columnHeader.toString());
}
duplicates.close();
}
} catch (SQLException e) {
throw new C3rRuntimeException("An SQL exception occurred during marshalling.", e);
}
final long endTime = System.currentTimeMillis();
log.debug("Finished checking for duplicates in {} {} columns in {} seconds.", fingerprintColumns.size(), ColumnType.FINGERPRINT,
TimeUnit.MILLISECONDS.toSeconds(endTime - startTime));
if (!columnsWithDuplicates.isEmpty()) {
throw new C3rRuntimeException("Duplicate entries found in the following " + ColumnType.FINGERPRINT + " columns "
+ "but the allowDuplicates setting for cryptographic computing is false: "
+ "[" + String.join(", ", columnsWithDuplicates) + "]");
}
}
/**
* Writes out records/rows to the given target. Must be called after {@link #loadInput()} or no data will be output.
*
* @throws C3rRuntimeException If an error is encountered while using the SQL database
*/
void marshalOutput() {
log.debug("Randomizing data order.");
long startTime = System.currentTimeMillis();
// Create a covering index for all rows to improve our ORDER BY performance
// to sort the table by nonce and induce a random order.
try {
final var stmt = this.sqlTable.getConnection().createStatement();
stmt.execute(TableGenerator.getCoveringIndexStatement(stmt, schema, nonceHeader));
} catch (SQLException e) {
throw new C3rRuntimeException("An SQL exception occurred during marshalling.", e);
}
long endTime = System.currentTimeMillis();
log.debug("Done randomizing data order in {} seconds.", TimeUnit.MILLISECONDS.toSeconds(endTime - startTime));
log.debug("Emitting encrypted data.");
startTime = System.currentTimeMillis();
final RowReader<T> sqlRowReader = new SqlRowReader<>(columnInsights, nonceHeader, rowFactory, sqlTable);
while (sqlRowReader.hasNext()) {
final Row<T> rowOut = sqlRowReader.next();
final Row<T> marshalledRow = rowFactory.newRow();
final Nonce nonce = new Nonce(rowOut.getValue(nonceHeader).getBytes());
// Nonces don't get written to final output
rowOut.removeColumn(nonceHeader);
rowOut.forEach((column, value) -> {
final var columnInsight = targetMappedColumnInsights.get(column);
final Transformer transformer = transformers.get(columnInsight.getType());
byte[] marshalledBytes = value.getBytes();
// Replace bytes for columns marked with PadType.MAX now that we know the longest value length.
// All other values are already marshalled correctly.
if (columnInsight.getPad() != null && columnInsight.getPad().getType() == PadType.MAX) {
final EncryptionContext encryptionContext = new EncryptionContext(columnInsight, nonce, value.getClientDataType());
final byte[] unmarshalledMaxColumnBytes = transformer.unmarshal(marshalledBytes);
marshalledBytes = transformer.marshal(unmarshalledMaxColumnBytes, encryptionContext);
}
marshalledRow.putBytes(column, marshalledBytes);
});
outputWriter.writeRow(marshalledRow);
if (sqlRowReader.getReadRowCount() % LOG_ROW_UPDATE_FREQUENCY == 0) {
log.info("{} rows emitted.", sqlRowReader.getReadRowCount());
}
}
outputWriter.flush();
endTime = System.currentTimeMillis();
log.debug("Done emitting {} encrypted rows in {} seconds.", sqlRowReader.getReadRowCount(),
TimeUnit.MILLISECONDS.toSeconds(endTime - startTime));
}
/**
* Closes the connections to the input source, output target and SQL database as well as deleting the database.
*
* @throws C3rRuntimeException If there's an error closing connections to the input file, output file, SQL database,
* or the SQL database file.
*/
public void close() {
try {
inputReader.close();
outputWriter.close();
if (sqlTable.getConnection() != null && !sqlTable.getConnection().isClosed()) {
sqlTable.getConnection().close();
}
Files.delete(sqlTable.getDatabaseFile().toPath());
} catch (IOException e) {
throw new C3rRuntimeException("Unable to close file connection.", e);
} catch (SQLException e) {
throw new C3rRuntimeException("Access error while attempting to close the SQL database.", e);
}
}
/**
* Looks for any columns in the schema that are missing from the file.
*
* @throws C3rRuntimeException If specified input columns are missing from the file
*/
private void validate() {
final Set<String> missingHeaders = columnInsights.stream()
.filter(ci -> ci.getSourceColumnPosition() < 0)
.map(column -> column.getTargetHeader().toString())
.collect(Collectors.toSet());
if (!missingHeaders.isEmpty()) {
throw new C3rRuntimeException("Target column(s) ["
+ String.join(", ", missingHeaders)
+ "] could not be matched to the corresponding source columns in the input file.");
}
}
}
| 2,635 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/action/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* This package classes used to marshall (encrypt) and unmarshall (decrypt) data to and from the clean room for the various supported
* data types. {@link com.amazonaws.c3r.action.RowMarshaller} handles the logic of marshalling data outside of anything having to do with
* the actual data format and {@link com.amazonaws.c3r.action.RowUnmarshaller} does the same for unmarshalling. Each format specific class
* handles file I/O and value creation only for that particular data type.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.action; | 2,636 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/json/ValidationTypeAdapterFactory.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.json;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Validatable;
import com.google.gson.Gson;
import com.google.gson.TypeAdapter;
import com.google.gson.TypeAdapterFactory;
import com.google.gson.reflect.TypeToken;
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonWriter;
import java.io.IOException;
/**
* Ensures {@link Validatable} objects are checked for validity after being deserialized.
*/
public class ValidationTypeAdapterFactory implements TypeAdapterFactory {
/**
* Creates an instance of an object that implements the {@link Validatable} interface and calls validate to verify correct
* construction of object according to C3R type rules. A number of classes implement this interface and are constructed automatically
* or via a custom {@link TypeAdapter}, then pass through this factory where the constructed object is checked for correctness. This
* particular factory only changes the read process, nothing is done during the write step on top of the class's normal write call.
*
* @param <T> Specific type being constructed or written
* @param gson JSON parser with customized adapters
* @param type Higher level type
* @return Type factory that calls {@link Validatable#validate()} on all implementing classes when reading in JSON
*/
@Override
public <T> TypeAdapter<T> create(final Gson gson, final TypeToken<T> type) {
final TypeAdapter<T> delegate = gson.getDelegateAdapter(this, type);
return new TypeAdapter<>() {
public void write(final JsonWriter out, final T value) {
try {
delegate.write(out, value);
} catch (IOException e) {
throw new C3rRuntimeException("Unable to write to output.", e);
}
}
public T read(final JsonReader in) {
try {
final T value = delegate.read(in);
if (value instanceof Validatable) {
((Validatable) value).validate();
}
return value;
} catch (IOException e) {
throw new C3rRuntimeException("Unable to read from the input.", e);
}
}
};
}
}
| 2,637 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/json/GsonUtil.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.json;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
/**
* Provides an interface from loading all supported input types as JSON.
*/
public final class GsonUtil {
/**
* The specialized context for serialization of schema specifications for C3R classes.
*/
private static final Gson GSON = new GsonBuilder()
.setPrettyPrinting()
.registerTypeAdapter(PadType.class, new PadTypeTypeAdapter())
.registerTypeAdapter(ColumnType.class, new ColumnTypeTypeAdapter())
.registerTypeAdapter(ColumnHeader.class, new ColumnHeaderTypeAdapter())
.registerTypeAdapter(TableSchema.class, new TableSchemaTypeAdapter())
.registerTypeAdapterFactory(new ValidationTypeAdapterFactory())
.create();
/** Hidden utility constructor. */
private GsonUtil() {
}
/**
* Attempt to parse a string of JSON values to specified class.
*
* @param json String containing formatted JSON values
* @param classOfT Type to parse string as
* @param <T> Specific class you want from the JSON
* @return Constructed value (possibly null)
* @throws C3rIllegalArgumentException If the string can't be parsed as the requested class type
*/
public static <T> T fromJson(final String json, final Class<T> classOfT) {
try {
return GSON.fromJson(json, classOfT);
} catch (Exception e) {
throw new C3rIllegalArgumentException("Unable to parse JSON " + classOfT + ".", e);
}
}
/**
* Converts an object to its representation as a formatted JSON string without specifying a specific class to interpret it as.
*
* @param src Object to convert to JSON
* @return String representing object in pretty printed JSON format
*/
public static String toJson(final Object src) {
return toJson(src, null);
}
/**
* Converts an object to its representation as a formatted JSON string with a specific class to interpret it as.
*
* @param src Object to convert to JSON
* @param classOfT Specific class to interpret object as
* @param <T> Specific class to interpret object as
* @return String representing the object in pretty printed JSON format
* @throws C3rIllegalArgumentException If the class could not be serialized to JSON
*/
public static <T> String toJson(final Object src, final Class<T> classOfT) {
try {
if (classOfT == null) {
return GSON.toJson(src);
} else {
return GSON.toJson(src, classOfT);
}
} catch (Exception e) {
throw new C3rIllegalArgumentException("Unable to write " + classOfT + " as JSON.", e);
}
}
}
| 2,638 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/json/ColumnTypeTypeAdapter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.json;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.google.gson.TypeAdapter;
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonToken;
import com.google.gson.stream.JsonWriter;
import java.io.IOException;
/**
* Handles the serialization/deserialization of ColumnTypes. Allows for case-insensitivity.
*/
public class ColumnTypeTypeAdapter extends TypeAdapter<ColumnType> {
/**
* Serialize {@link ColumnType} object to a string and send to {@code out}.
*
* @param out Stream of values written so far
* @param value the Java object to write
* @throws C3rRuntimeException If there's an error writing to output
*/
@Override
public void write(final JsonWriter out, final ColumnType value) {
try {
if (value == null) {
out.nullValue();
} else {
out.value(value.toString());
}
} catch (IOException e) {
throw new C3rRuntimeException("Error writing to output.", e);
}
}
/**
* Read in a JSON value and attempt to deserialize it as a {@link ColumnType}.
*
* @param in Stream of tokenized JSON values
* @return Type of column transform to use
* @throws C3rRuntimeException If there's an error reading from source
*/
@Override
public ColumnType read(final JsonReader in) {
try {
if (in.peek() == JsonToken.NULL) {
in.nextNull();
return null;
} else {
return ColumnType.valueOf(in.nextString().trim().toUpperCase());
}
} catch (IOException e) {
throw new C3rRuntimeException("Error reading from input.", e);
}
}
}
| 2,639 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/json/ColumnHeaderTypeAdapter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.json;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.google.gson.TypeAdapter;
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonToken;
import com.google.gson.stream.JsonWriter;
import java.io.IOException;
/**
* Handles the serialization/deserialization of ColumnHeader names. Allows for case-insensitivity.
*/
public class ColumnHeaderTypeAdapter extends TypeAdapter<ColumnHeader> {
/**
* Serialize {@link ColumnHeader} object to a string and send to {@code out}.
*
* @param out Formatted JSON output to add to
* @param value The column header name to write
* @throws C3rRuntimeException If there's an error writing to output
*/
@Override
public void write(final JsonWriter out, final ColumnHeader value) {
try {
if (value == null) {
out.nullValue();
} else {
out.value(value.toString());
}
} catch (IOException e) {
throw new C3rRuntimeException("Error writing to output", e);
}
}
/**
* Read in a JSON value and attempt to deserialize it as a {@link ColumnHeader}.
*
* @param in Source to read value from
* @return The value parsed as a header name
* @throws C3rRuntimeException If there's an error reading from source
*/
@Override
public ColumnHeader read(final JsonReader in) {
in.setLenient(true);
try {
if (in.peek() == JsonToken.NULL) {
in.nextNull();
return null;
} else {
return new ColumnHeader(in.nextString());
}
} catch (IOException e) {
throw new C3rRuntimeException("Error reading from input", e);
}
}
}
| 2,640 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/json/TableSchemaTypeAdapter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.json;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.PositionalTableSchema;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.google.gson.JsonDeserializationContext;
import com.google.gson.JsonDeserializer;
import com.google.gson.JsonElement;
import com.google.gson.JsonObject;
import com.google.gson.JsonSerializationContext;
import com.google.gson.JsonSerializer;
import java.lang.reflect.Type;
/**
* Helper class to determine if JSON being serialized/deserialized is a Mapped or Positional TableSchema.
*/
public class TableSchemaTypeAdapter implements JsonDeserializer<TableSchema>, JsonSerializer<TableSchema> {
/**
* Confirms the requested class type matches a type supported by this adapter.
*
* @param typeOfT Requested object type
* @throws C3rIllegalArgumentException If {@code typeOfT} is not support by this adapter
*/
private void checkClassesMatch(final Type typeOfT) {
// Make sure we're deserializing a TableSchema
if (typeOfT != TableSchema.class && typeOfT != MappedTableSchema.class && typeOfT != PositionalTableSchema.class) {
throw new C3rIllegalArgumentException("Expected class type " + typeOfT.getTypeName() + " is not supported by TableSchema.");
}
}
/**
* Ensures proper format of the object and gets the {@code headerRow} field from the object.
*
* @param jsonObject {@code TableSchema} or child being deserialized
* @return Boolean value stored in {@code JsonObject}
* @throws C3rRuntimeException If headerRow is null or headerRow is not a boolean
*/
private static boolean getHasHeaderRow(final JsonObject jsonObject) {
// Get property headerRow, make sure it exists and is a boolean value
final JsonElement headerRow = jsonObject.get("headerRow");
if (headerRow == null || !headerRow.isJsonPrimitive() || !headerRow.getAsJsonPrimitive().isBoolean()) {
throw new C3rRuntimeException("JSON object should contain boolean value headerRow");
}
// Return value of headerRow
return headerRow.getAsBoolean();
}
/**
* Read in a JSON value and attempt to deserialize it as a {@link TableSchema} child class.
*
* <p>
* This class determines which underlying implementation of TableSchema should be used for deserialization
*
* @param json The Json data being deserialized
* @param typeOfT Class to deserialize to (should be {@code TableSchema}
* @param context Helper for deserializing fields and subclasses
* @return A {@code TableSchema} that is backed by either a {@code MappedTableSchema} or {@code PositionalTableSchema}
* @throws C3rRuntimeException If json is not in the expected format of a {@code TableSchema} child class
*/
@Override
public TableSchema deserialize(final JsonElement json, final Type typeOfT, final JsonDeserializationContext context) {
// Confirm this is a supported type
checkClassesMatch(typeOfT);
// Make sure we're deserializing an object
if (!json.isJsonObject()) {
throw new C3rRuntimeException("TableSchema expects a JSON Object at this point of deserialization.");
}
// Get headers and columns with formatting checks
final JsonObject jsonObject = json.getAsJsonObject();
final boolean hasHeaderRow = getHasHeaderRow(jsonObject);
// Construct child class based on if it's mapped or positional
if (hasHeaderRow) {
return context.deserialize(json, MappedTableSchema.class);
} else {
return context.deserialize(json, PositionalTableSchema.class);
}
}
/**
* Serialize {@link TableSchema} object into a {@link JsonElement}.
*
* <p>
* This gets called when we specify the class while calling serialize, otherwise JSON will move to the auto-generated
* serializers for the child implementations.
*
* @param src the object that needs to be converted to Json
* @param typeOfSrc the actual type (fully generalized version) of the source object
* @param context Serialization context
* @return a {@code JsonElement} corresponding to the specified object.
* @throws C3rIllegalArgumentException If the object passed in is not a child class of a {@code TableSchema}
*/
@Override
public JsonElement serialize(final TableSchema src, final Type typeOfSrc, final JsonSerializationContext context) {
// Make sure the source type matches this class
checkClassesMatch(typeOfSrc);
// Dispatch to the correct underlying implementation
if (src.getClass() == MappedTableSchema.class) {
return context.serialize(src, MappedTableSchema.class);
} else if (src.getClass() == PositionalTableSchema.class) {
return context.serialize(src, PositionalTableSchema.class);
} else {
// Paranoia check: we have added something that inherits TableSchema but not routed it for serialization correctly
throw new C3rIllegalArgumentException("Expected child class of TableSchema but found " + typeOfSrc.getTypeName() + ".");
}
}
}
| 2,641 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/json/PadTypeTypeAdapter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.json;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.google.gson.TypeAdapter;
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonToken;
import com.google.gson.stream.JsonWriter;
import java.io.IOException;
/**
* Handles the serialization/deserialization of PadTypes. Allows for case-insensitivity.
*/
public class PadTypeTypeAdapter extends TypeAdapter<PadType> {
/**
* Serialize {@link PadType} object to a string and send to {@code out}.
*
* @param out Output stream of formatted JSON data
* @param value Pad type
* @throws C3rRuntimeException If there's an error writing to output
*/
@Override
public void write(final JsonWriter out, final PadType value) {
try {
if (value == null) {
out.nullValue();
} else {
out.value(value.name());
}
} catch (IOException e) {
throw new C3rRuntimeException("Unable to write to output.", e);
}
}
/**
* Read in a JSON value and attempt to deserialize it as a {@link PadType}.
*
* @param in Input stream of tokenized JSON
* @return Pad type specified
* @throws C3rRuntimeException If there's an error reading from source
*/
@Override
public PadType read(final JsonReader in) {
try {
if (in.peek() == JsonToken.NULL) {
in.nextNull();
return null;
} else {
return PadType.valueOf(in.nextString().toUpperCase());
}
} catch (IOException e) {
throw new C3rRuntimeException("Error reading from input.", e);
}
}
}
| 2,642 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/json/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Custom adapters and interfaces for importing schema information from JSON into the runtime environment. These include adapters for
* types that can't be automatically serialized and deserialized plus interfaces to run standard steps on created objects.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.json; | 2,643 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/cleanrooms/CleanRoomsDao.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cleanrooms;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.utils.C3rSdkProperties;
import lombok.AccessLevel;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Getter;
import lombok.NoArgsConstructor;
import lombok.With;
import lombok.extern.slf4j.Slf4j;
import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider;
import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider;
import software.amazon.awssdk.auth.credentials.ProfileCredentialsProvider;
import software.amazon.awssdk.awscore.AwsRequestOverrideConfiguration;
import software.amazon.awssdk.core.ApiName;
import software.amazon.awssdk.core.exception.SdkException;
import software.amazon.awssdk.regions.Region;
import software.amazon.awssdk.regions.providers.DefaultAwsRegionProviderChain;
import software.amazon.awssdk.services.cleanrooms.CleanRoomsClient;
import software.amazon.awssdk.services.cleanrooms.model.AccessDeniedException;
import software.amazon.awssdk.services.cleanrooms.model.DataEncryptionMetadata;
import software.amazon.awssdk.services.cleanrooms.model.GetCollaborationRequest;
import software.amazon.awssdk.services.cleanrooms.model.GetCollaborationResponse;
import software.amazon.awssdk.services.cleanrooms.model.ResourceNotFoundException;
import software.amazon.awssdk.services.cleanrooms.model.ThrottlingException;
import software.amazon.awssdk.services.cleanrooms.model.ValidationException;
import java.util.function.Function;
/**
* Create a connection to AWS Clean Rooms to get collaboration information.
*/
@Slf4j
@Getter
@NoArgsConstructor(force = true)
@AllArgsConstructor(access = AccessLevel.PRIVATE)
public final class CleanRoomsDao {
/**
* Create a connection to AWS Clean Rooms.
*/
private CleanRoomsClient client;
/**
* AWS CLI named profile to use with the AWS SDK.
*/
@With
private final String profile;
/**
* AWS region to use with the AWS SDK.
*/
@With
private final String region;
/**
* Custom user agent for the application's API calls.
*/
@With
private final ApiName apiName;
/**
* Construct an CleanRoomsDao with default specified settings.
*
* @param profile AWS CLI named profile to use with the AWS SDK
* @param region AWS region to use with the AWS SDK
* @param apiName Custom user agent content for the application's API calls.
*/
@Builder
private CleanRoomsDao(final String profile, final String region, final ApiName apiName) {
this.profile = profile;
this.region = region;
this.apiName = apiName != null ? apiName : C3rSdkProperties.API_NAME;
}
/**
* Get the {@link AwsCredentialsProvider} to use for connecting with AWS Clean Rooms,
* based on a specified named profile or the default provider.
*
* @return A {@link AwsCredentialsProvider} based on the specified named profile (if any).
*/
AwsCredentialsProvider initAwsCredentialsProvider() {
if (profile == null) {
return DefaultCredentialsProvider.builder().build();
} else {
return ProfileCredentialsProvider.builder().profileName(profile).build();
}
}
/**
* Get the {@link Region} to use for connecting with AWS Clean Rooms,
* based on a specified region or the default provider chain.
*
* @return A specified {@link Region} or the default.
*/
Region initRegion() {
if (region == null) {
return DefaultAwsRegionProviderChain.builder().build().getRegion();
} else {
return Region.of(region);
}
}
/**
* Get the {@link CleanRoomsClient} for this instance, initializing it if it is not yet created.
*
* @return The instances {@link CleanRoomsClient}
* @throws C3rRuntimeException If an SDK error occurs setting up the {@link CleanRoomsClient}
*/
CleanRoomsClient getClient() {
if (client != null) {
return client;
}
try {
client = CleanRoomsClient.builder()
.region(initRegion())
.credentialsProvider(initAwsCredentialsProvider())
.build();
return client;
} catch (SdkException e) {
throw new C3rRuntimeException("Unable to connect to AWS Clean Rooms: " + e.getMessage(), e);
}
}
/**
* Get the cryptographic rules governing a particular collaboration.
*
* @param collaborationId Clean Room Collaboration Identification number
* @return Cryptographic settings in use for the collaboration
* @throws C3rRuntimeException If DataEncryptionMetadata cannot be retrieved from AWS Clean Rooms
*/
public ClientSettings getCollaborationDataEncryptionMetadata(final String collaborationId) {
final AwsRequestOverrideConfiguration overrideConfiguration = AwsRequestOverrideConfiguration.builder()
.addApiName(apiName)
.build();
final GetCollaborationRequest request = GetCollaborationRequest.builder()
.collaborationIdentifier(collaborationId)
.overrideConfiguration(overrideConfiguration)
.build();
final String baseError = "Unable to retrieve the collaboration configuration for CollaborationID: `" + collaborationId + "`.";
final String endError = "Please verify that the CollaborationID is correct and try again.";
final GetCollaborationResponse response;
try {
response = getClient().getCollaboration(request);
} catch (ResourceNotFoundException e) {
throw new C3rRuntimeException(baseError + " No collaboration found. " + endError, e);
} catch (AccessDeniedException e) {
throw new C3rRuntimeException(baseError + " Access denied. " + endError, e);
} catch (ThrottlingException e) {
throw new C3rRuntimeException(baseError + " Throttling. Please wait a moment before trying again.", e);
} catch (ValidationException e) {
throw new C3rRuntimeException(baseError + " CollaborationID could not be validated. " + endError, e);
} catch (SdkException e) {
throw new C3rRuntimeException(baseError + " Unknown error: " + e.getMessage(), e);
}
final DataEncryptionMetadata metadata = response.collaboration().dataEncryptionMetadata();
if (metadata == null) {
throw new C3rRuntimeException(
"The collaboration with CollaborationID `" + collaborationId + "` was not created for use with " +
"C3R! C3R must be enabled on the collaboration when it's created in order to continue.");
}
final var settings = ClientSettings.builder()
.allowJoinsOnColumnsWithDifferentNames(metadata.allowJoinsOnColumnsWithDifferentNames())
.allowCleartext(metadata.allowCleartext())
.allowDuplicates(metadata.allowDuplicates())
.preserveNulls(metadata.preserveNulls())
.build();
final Function<Boolean, String> boolToYesOrNo = (b) -> b ? "yes" : "no";
log.debug("Cryptographic computing parameters found for collaboration {}:", collaborationId);
log.debug(" * Allow cleartext columns = {}",
boolToYesOrNo.apply(settings.isAllowCleartext()));
log.debug(" * Allow duplicates = {}",
boolToYesOrNo.apply(settings.isAllowDuplicates()));
log.debug(" * Allow JOIN of columns with different names = {}",
boolToYesOrNo.apply(settings.isAllowJoinsOnColumnsWithDifferentNames()));
log.debug(" * Preserve NULL values = {}",
boolToYesOrNo.apply(settings.isPreserveNulls()));
return settings;
}
}
| 2,644 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/cleanrooms/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* This package interfaces with AWS Clean Rooms to get collaboration information.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
@DefaultAnnotation(NonNull.class)
package com.amazonaws.c3r.cleanrooms;
import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
import lombok.NonNull; | 2,645 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/data/ValueConverter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import edu.umd.cs.findbugs.annotations.Nullable;
import edu.umd.cs.findbugs.annotations.UnknownNullness;
import lombok.NonNull;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import static com.amazonaws.c3r.data.ClientDataType.INT_BYTE_SIZE;
/**
* Utility functions to convert values from one type to another based off of column specifications.
*/
public final class ValueConverter {
/**
* Private utility class constructor.
*/
private ValueConverter() {
}
/**
* Find the equivalence class super type for the value and return the value represented in that byte format.
*
* @param value Value to convert to equivalence class super type
* @return byte representation of value in super class
*/
private static byte[] getBytesForFingerprint(@NonNull final Value value) {
final ClientDataType superType = value.getClientDataType().getRepresentativeType();
return value.getBytesAs(superType);
}
/**
* Gets the byte representation of the value according to the column type and any specified conversions.
*
* @param value Value to get bytes from
* @param columnType Type of column being written
* @return byte representation of value in the form of the desired {@code ClientDataType}
*/
public static byte[] getBytesForColumn(@NonNull final Value value, @NonNull final ColumnType columnType) {
if (columnType == ColumnType.FINGERPRINT) {
return getBytesForFingerprint(value);
} else {
return value.getBytes();
}
}
/**
* Gets the target data type of the column.
*
* @param value Input value
* @param columnType Type of column being written
* @return {@code ClientDataType} contained in column output
*/
public static ClientDataType getClientDataTypeForColumn(@NonNull final Value value, @NonNull final ColumnType columnType) {
if (columnType == ColumnType.FINGERPRINT) {
return value.getClientDataType().getRepresentativeType();
} else {
return value.getClientDataType();
}
}
/**
* Utility functions for converting a C3R BigInt to and from byte representation.
*/
public static final class BigInt {
/**
* Convert a big-endian formatted byte array to its long value.
* Byte array must be {@value Long#BYTES} or less in length.
*
* @param bytes Big-endian formatted byte array
* @return Corresponding long value
* @throws C3rRuntimeException If the byte array is more than the max length
*/
public static Long fromBytes(final byte[] bytes) {
if (bytes == null) {
return null;
} else if (bytes.length > ClientDataType.BIGINT_BYTE_SIZE) {
throw new C3rRuntimeException("BigInt values must be " + ClientDataType.BIGINT_BYTE_SIZE + " bytes or less.");
}
return new BigInteger(bytes).longValue();
}
/**
* Convert an int value to a long big-endian byte representation.
*
* @param value Integer
* @return Big-endian byte encoding of value
*/
public static byte[] toBytes(final Integer value) {
if (value == null) {
return null;
}
return toBytes(value.longValue());
}
/**
* Convert a long value to its big-endian byte representation.
*
* @param value Long
* @return Big-endian byte encoding of value
*/
static byte[] toBytes(final Long value) {
if (value == null) {
return null;
}
return ByteBuffer.allocate(ClientDataType.BIGINT_BYTE_SIZE).putLong(value).array();
}
}
/**
* Utility functions for converting a C3R boolean to and from byte representation.
*/
public static final class Boolean {
/**
* Convert value to boolean.
*
* @param bytes byte encoded value
* @return {@code true} if value is non-zero or {@code false}
*/
@UnknownNullness
public static java.lang.Boolean fromBytes(final byte[] bytes) {
if (bytes == null) {
return null;
}
boolean nonZero = false;
for (var b : bytes) {
nonZero |= (b != 0);
}
return nonZero;
}
/**
* Take a boolean value and convert it to a 1 byte long byte array.
*
* @param value {@code true}, {@code false} or {@code null}
* @return {@code 1}, {@code 0} or {@code null}
*/
public static byte[] toBytes(final java.lang.Boolean value) {
if (value == null) {
return null;
} else if (value) {
return new byte[]{(byte) 1};
} else {
return new byte[]{(byte) 0};
}
}
}
/**
* Utility functions for converting a C3R Date to and from byte representation.
*/
public static final class Date {
/**
* Get the number of day ticks since epoch.
*
* @param bytes Byte representation of the ticks
* @return Number of days since epoch
* @throws C3rRuntimeException If the byte array is not the expected length
*/
public static Integer fromBytes(final byte[] bytes) {
if (bytes == null) {
return null;
}
if (bytes.length != INT_BYTE_SIZE) {
throw new C3rRuntimeException("DATE should be " + INT_BYTE_SIZE + " in length but " + bytes.length + " found.");
}
return ByteBuffer.wrap(bytes).getInt();
}
/**
* Converts the number of ticks since epoch to its byte representation.
*
* @param value Number of ticks since epoch
* @return Byte representation of number
*/
public static byte[] toBytes(final Integer value) {
return Int.toBytes(value);
}
}
/**
* Utility functions for converting a C3R Double to and from byte representation.
*/
public static final class Double {
/**
* Converts a big-endian formatted byte array to its double value.
* Number of bytes must be {@value java.lang.Double#BYTES}.
*
* @param bytes Bytes in big-endian format
* @return Corresponding float value
* @throws C3rRuntimeException If the byte array is not the expected length
*/
static java.lang.Double fromBytes(final byte[] bytes) {
if (bytes == null) {
return null;
} else if (bytes.length != java.lang.Double.BYTES) {
throw new C3rRuntimeException("Double values may only be " + java.lang.Double.BYTES + " bytes long.");
}
return ByteBuffer.wrap(bytes).getDouble();
}
/**
* Convert a double value to its big-endian byte representation.
*
* @param value Double
* @return Big-endian encoding of value
*/
static byte[] toBytes(final java.lang.Double value) {
if (value == null) {
return null;
}
return ByteBuffer.allocate(java.lang.Double.BYTES).putDouble(value).array();
}
}
/**
* Utility functions for converting a C3R float to and from byte representation.
*/
public static final class Float {
/**
* Converts big-endian formatted bytes to float value.
* Number of bytes must be {@value java.lang.Float#BYTES}.
*
* @param bytes Bytes in big-endian format
* @return Corresponding float value
* @throws C3rRuntimeException If the byte array is not the expected length
*/
public static java.lang.Float fromBytes(final byte[] bytes) {
if (bytes == null) {
return null;
} else if (bytes.length != java.lang.Float.BYTES) {
throw new C3rRuntimeException("Float values may only be " + java.lang.Float.BYTES + " bytes long.");
}
return ByteBuffer.wrap(bytes).getFloat();
}
/**
* Convert a float value to its big-endian byte representation.
*
* @param value Float
* @return Big-endian encoding of value
*/
public static byte[] toBytes(final java.lang.Float value) {
if (value == null) {
return null;
}
return ByteBuffer.allocate(java.lang.Float.BYTES).putFloat(value).array();
}
}
/**
* Utility functions for converting a C3R Int to and from byte representation.
*/
public static final class Int {
/**
* Convert a big-endian formatted byte array to its integer value.
* Byte array must be {@value Integer#BYTES} or less in length.
*
* @param bytes Big-endian formatted byte array
* @return Corresponding integer value
* @throws C3rRuntimeException If the byte array is more than the max length
*/
public static Integer fromBytes(@Nullable final byte[] bytes) {
if (bytes == null) {
return null;
} else if (bytes.length > INT_BYTE_SIZE) {
throw new C3rRuntimeException("Integer values must be " + INT_BYTE_SIZE + " bytes or less.");
}
return new BigInteger(bytes).intValue();
}
/**
* Convert an integer value to its big-endian byte representation.
*
* @param value Integer
* @return Big-endian byte encoding of value
*/
public static byte[] toBytes(final Integer value) {
if (value == null) {
return null;
}
return ByteBuffer.allocate(INT_BYTE_SIZE).putInt(value).array();
}
}
/**
* Utility functions for converting a C3R String to and from byte representation.
*/
public static final class String {
/**
* Convert the byte array to a UTF-8 String.
*
* @param bytes Bytes representing string value
* @return UTF-8 string generated from bytes
*/
public static java.lang.String fromBytes(final byte[] bytes) {
if (bytes == null) {
return null;
}
return StandardCharsets.UTF_8.decode(ByteBuffer.wrap(bytes)).toString();
}
/**
* Convert a string to the UTF-8 bytes that represent its value.
*
* @param value String to conver to bytes
* @return UTF-8 byte representation
*/
public static byte[] toBytes(final java.lang.String value) {
if (value == null) {
return null;
}
return value.getBytes(StandardCharsets.UTF_8);
}
}
}
| 2,646 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/data/CsvRowFactory.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
/**
* Factory for creating empty CSV rows.
*/
public class CsvRowFactory implements RowFactory<CsvValue> {
/**
* Creates an empty CSV row for storing data.
*
* @return Empty CSV value row
*/
public CsvRow newRow() {
return new CsvRow();
}
}
| 2,647 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/data/Row.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Nonce;
import lombok.EqualsAndHashCode;
import lombok.NonNull;
import lombok.ToString;
import java.util.Collection;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.function.BiConsumer;
import java.util.stream.Collectors;
/**
* A single row/entry in user-provided data, parameterized by the data representation
* the of the user input.
*
* @param <T> Data format type
*/
@EqualsAndHashCode
@ToString
public abstract class Row<T extends Value> implements Cloneable {
/**
* Entries (and their corresponding header value) in a row of data.
*/
private final Map<ColumnHeader, T> entries = new LinkedHashMap<>();
/**
* Get the length of the row.
*
* @return Length of row
*/
public int size() {
return entries.size();
}
/**
* Get the names of the columns.
*
* @return Set of unique column names
*/
public Collection<ColumnHeader> getHeaders() {
return Collections.unmodifiableSet(entries.keySet());
}
/**
* Applies a function to each header/entry pair without producing any output.
*
* @param action Function to apply to entry
*/
public void forEach(final BiConsumer<? super ColumnHeader, ? super T> action) {
entries.forEach(action);
}
/**
* Adds a value to the list of row entries.
*
* @param column Name of the column the data belongs to
* @param value Row entry
*/
public void putValue(@NonNull final ColumnHeader column, @NonNull final T value) {
entries.put(column, value);
}
/**
* Associate the given byte-encoded value with the specified column.
*
* @param column Header to add nonce for
* @param encodedValue The value (encoded as bytes) to be associated with the column
*/
public abstract void putBytes(@NonNull ColumnHeader column, byte[] encodedValue);
/**
* Add a nonce value to the row. (Note: nonce values do not appear in any input/output files and may not correspond
* to a particular format's encoding of data. They are only used internally during data marshalling.)
*
* @param nonceColumn The name of the column for nonce values
* @param nonce The nonce value to add
*/
public abstract void putNonce(@NonNull ColumnHeader nonceColumn, Nonce nonce);
/**
* Checks if the entry for a column is in the row.
*
* @param column Column name to look for
* @return {@code true} if row has an entry under that name
*/
public boolean hasColumn(@NonNull final ColumnHeader column) {
return entries.containsKey(column);
}
/**
* Retrieve a value for a column in this row. If the column has no entry, and error is raised.
*
* @param column Name of the column to look up
* @return Value found in column for this row
* @throws C3rRuntimeException If there's an error looking up value for specified column in this row
*/
public T getValue(@NonNull final ColumnHeader column) {
final var val = entries.get(column);
if (val == null) {
throw new C3rRuntimeException("Row lookup error: column `" + column
+ "` not found in known row columns ["
+ entries.keySet().stream()
.map(header -> "`" + header + "`")
.collect(Collectors.joining(", "))
+ "].");
} else {
return val;
}
}
/**
* Remove the entry for named column in the row.
*
* @param column Name of column to drop
*/
public void removeColumn(final ColumnHeader column) {
this.entries.remove(column);
}
/**
* Clones the Row.
*
* @return A cloned copy of the row
*/
public abstract Row<T> clone();
}
| 2,648 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/data/CsvRow.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.internal.Nonce;
import lombok.NonNull;
/**
* Row of CSV data.
*/
public final class CsvRow extends Row<CsvValue> {
/**
* Create an empty CSV row.
*/
public CsvRow() {
}
/**
* Create a fresh version of a CSV row.
*
* @param other Row to copy
*/
public CsvRow(final Row<CsvValue> other) {
other.forEach(this::putValue);
}
/**
* {@inheritDoc}
*/
@Override
public void putBytes(@NonNull final ColumnHeader column, final byte[] encodedValue) {
putValue(column, new CsvValue(encodedValue));
}
/**
* {@inheritDoc}
*/
@Override
public void putNonce(@NonNull final ColumnHeader nonceColumn, final Nonce nonce) {
putBytes(nonceColumn, nonce.getBytes());
}
/**
* {@inheritDoc}
*/
@Override
public Row<CsvValue> clone() {
final Row<CsvValue> row = new CsvRow();
getHeaders().forEach(header -> row.putValue(header, getValue(header)));
return row;
}
}
| 2,649 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/data/ClientDataType.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import lombok.Getter;
import java.util.Arrays;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* The various underlying data types that may be encountered during execution.
*/
public enum ClientDataType {
/**
* Used for uninterpreted byte values.
*/
UNKNOWN((byte) 0),
/**
* UTF8-encoded string.
*/
STRING((byte) 1),
/**
* Signed 64-bit integer.
*/
BIGINT((byte) 2),
/**
* Logical boolean ({@code true}/{@code false}).
*/
BOOLEAN((byte) 3),
/**
* A fixed-length UTF8-encoded string.
*/
CHAR((byte) 4),
/**
* Calendar date (year, month, day).
*/
DATE((byte) 5),
/**
* Exact numeric of selectable precision.
*/
DECIMAL((byte) 6),
/**
* Double precision floating-point number.
*/
DOUBLE((byte) 7),
/**
* Single precision floating-point number.
*/
FLOAT((byte) 8),
/**
* Signed 32-bit integer.
*/
INT((byte) 9),
/**
* Signed 16-bit integer.
*/
SMALLINT((byte) 10),
/**
* Date and time (without time zone).
*/
TIMESTAMP((byte) 11),
/**
* A variable-length character string with a user defined limit on length.
*/
VARCHAR((byte) 12);
/**
* How many bits are reserved for encoding ClientDataType.
*/
public static final int BITS = 7;
/**
* Max number of types representable via the fixed-width bitwise encoding of data.
*/
public static final int MAX_DATATYPE_COUNT = (1 << ClientDataType.BITS);
/**
* Number of bits in a SmallInt.
*/
public static final Integer SMALLINT_BIT_SIZE = 16;
/**
* Number of bytes in a SmallInt.
*/
public static final Integer SMALLINT_BYTE_SIZE = SMALLINT_BIT_SIZE / Byte.SIZE;
/**
* Number of bits in an Int.
*/
public static final Integer INT_BIT_SIZE = 32;
/**
* Number of bytes in an Int.
*/
public static final Integer INT_BYTE_SIZE = INT_BIT_SIZE / Byte.SIZE;
/**
* Number of bits in a BigInt.
*/
public static final Integer BIGINT_BIT_SIZE = 64;
/**
* Number of bytes in a BigInt.
*/
public static final Integer BIGINT_BYTE_SIZE = BIGINT_BIT_SIZE / Byte.SIZE;
/**
* Map of the {@code ClientDataType} enum indices to the corresponding {@code ClientDataType}.
*/
private static final Map<Byte, ClientDataType> INDEX_DATA_TYPE_MAP = Arrays.stream(ClientDataType.values())
.collect(Collectors.toMap(ClientDataType::getIndex, Function.identity()));
/**
* Get the index for this particular instance.
*/
@Getter
private final byte index;
/**
* Create an enum based off of the index.
*
* @param index Index of the {@code ClientDataType}
*/
ClientDataType(final byte index) {
this.index = index;
}
/**
* Look up data type by enum index.
*
* @param index Index of the {@link ClientDataType}
* @return The type corresponding to {@code index}
* @throws C3rIllegalArgumentException If an unknown data type encountered
*/
public static ClientDataType fromIndex(final byte index) {
final var type = INDEX_DATA_TYPE_MAP.get(index);
if (type == null) {
throw new C3rIllegalArgumentException("Unknown data type index: " + index);
}
return type;
}
/**
* Get the representative type for an equivalence class that a type belongs to if the class supports fingerprint columns.
* If the type isn't in a supported equivalence class, it will throw an exception.
*
* <ul>
* <li> `BOOLEAN` equivalence class:
* <ul>
* <li>Representative data type: `BOOLEAN`</li>
* <li>Containing data types: `BOOLEAN`</li>
* </ul>
* <li> `DATE` equivalence class:
* <ul>
* <li>Representative data type: `DATE`</li>
* <li>Containing data types: `DATE`</li>
* </ul>
* <li> `INTEGRAL` equivalence class:
* <ul>
* <li>Representative data type: `BIGINT`</li>
* <li>Containing data types: `BIGINT`, `INT`, `SMALLINT`</li>
* </ul>
* <li> `STRING` equivalence class:
* <ul>
* <li>Representative data type: `STRING`</li>
* <li>Containing data types: `CHAR`, `STRING`, `VARCHAR`</li>
* </ul>
* <li> `TIMESTAMP` equivalence class:
* <ul>
* <li>Representative data type: `TIMESTAMP` (in nanoseconds)</li>
* <li>Containing data types: `TIMESTAMP` (in milliseconds, microseconds and nanoseconds)/li>
* </ul>
* </ul>
* Types not supported by equivalence classes: {@code DECIMAL}, {@code DOUBLE}, {@code FLOAT}, {@code UNKNOWN}.
*
* @return The super type for the equivalence class (if one exists for this `ClientDataType`).
* @throws C3rRuntimeException ClientDataType is unknown or is not part of a supported equivalence class
*/
public ClientDataType getRepresentativeType() {
switch (this) {
case CHAR:
case STRING:
case VARCHAR:
return STRING;
case SMALLINT:
case INT:
case BIGINT:
return BIGINT;
case BOOLEAN:
return BOOLEAN;
case DATE:
return DATE;
case TIMESTAMP:
case DECIMAL:
case DOUBLE:
case FLOAT:
case UNKNOWN:
throw new C3rRuntimeException(this + " data type is not supported in Fingerprint Columns.");
default:
throw new C3rRuntimeException("Unknown ClientDataType: " + this);
}
}
/**
* Checks if this data type is supports fingerprint columns.
*
* @return {@code true} if the type can be used in a fingerprint column
*/
public boolean supportsFingerprintColumns() {
try {
this.getRepresentativeType();
return true;
} catch (C3rRuntimeException e) {
return false;
}
}
/**
* Check if this data type is the parent type for an equivalence class.
*
* @return {@code true} if this is the parent type
*/
public boolean isEquivalenceClassRepresentativeType() {
try {
// A ClientDataType is an equivalence class exactly when
// it is its own equivalence class.
return this == this.getRepresentativeType();
} catch (C3rRuntimeException e) {
return false;
}
}
} | 2,650 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/data/Value.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import edu.umd.cs.findbugs.annotations.Nullable;
import edu.umd.cs.findbugs.annotations.UnknownNullness;
/**
* Common interface for user-provided data entries (i.e., the values that populate
* each column).
*/
public abstract class Value {
/**
* Encode a non-null {@code Value} into a byte representation.
*
* @param value The value to encode.
* @return The value encoded as a byte array, or {@code null} if {@code value == null}.
*/
public static byte[] getBytes(@Nullable final Value value) {
if (value == null) {
return null;
} else {
return value.getBytes();
}
}
/**
* Whether the value is equivalent to {@code null}.
*
* @return {@code true} if the value represents a {@code null}
*/
public abstract boolean isNull();
/**
* Get the type of then entry.
*
* @return Data type
* @see ClientDataType
*/
public abstract ClientDataType getClientDataType();
/**
* {@inheritDoc}
*/
@Override
public abstract boolean equals(Object other);
/**
* {@inheritDoc}
*/
@Override
public abstract int hashCode();
/**
* Get data type for entry.
*
* <p>
* {@inheritDoc}
*/
@Override
@UnknownNullness
public abstract String toString();
/**
* Convert the value to the specified {@code ClientDataType} if possible.
*
* @param type Type to format bytes as
* @return byte representation of value converted to specified type
*/
public abstract byte[] getBytesAs(ClientDataType type);
/**
* Encode a value as plaintext bytes.
*
* @return The underlying byte[]
*/
public abstract byte[] getBytes();
/**
* Length of the value when byte encoded.
*
* @return The length of the underlying byte[]
*/
public abstract int byteLength();
}
| 2,651 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/data/RowFactory.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
/**
* Used to create rows for a particular data format.
*
* @param <T> Data format
*/
public interface RowFactory<T extends Value> {
/**
* Create an empty row to be populated by the callee.
*
* @return An empty Row for storing data in
*/
Row<T> newRow();
}
| 2,652 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/data/CsvValue.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import java.nio.charset.StandardCharsets;
/**
* Implementation of {@link Value} for the CSV data format.
*/
@EqualsAndHashCode(callSuper = false)
public class CsvValue extends Value {
/**
* Data type used for CSV values.
*/
public static final ClientDataType CLIENT_DATA_TYPE = ClientDataType.STRING;
/**
* Data stored as binary.
*/
@Getter
private final byte[] bytes;
/**
* Creates a CSV value containing the given String or `null`.
*
* @param content Data in String format (or {@code null} if the content is equivalent to SQL {@code NULL})
*/
public CsvValue(final String content) {
if (content == null) {
bytes = null;
} else {
this.bytes = content.getBytes(StandardCharsets.UTF_8);
}
}
/**
* Creates a CSV value containing the given bytes.
*
* @param content Data in binary format
*/
public CsvValue(final byte[] content) {
if (content == null) {
bytes = null;
} else {
this.bytes = content.clone();
}
}
/**
* {@inheritDoc}
*/
@Override
public int byteLength() {
return (bytes == null) ? 0 : bytes.length;
}
/**
* {@inheritDoc}
*/
@Override
public boolean isNull() {
return bytes == null;
}
/**
* {@inheritDoc}
*/
@Override
public ClientDataType getClientDataType() {
return CLIENT_DATA_TYPE;
}
/**
* Original string representation of the content.
*
* @return String representation of the value
*/
public String toString() {
if (bytes == null) {
return null;
} else {
return new String(bytes, StandardCharsets.UTF_8);
}
}
@Override
public byte[] getBytesAs(final ClientDataType type) {
if (type == ClientDataType.STRING) {
return (bytes == null) ? null : bytes.clone();
}
throw new C3rRuntimeException("CsvValue could not be convered to type " + type);
}
}
| 2,653 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/data/ClientDataInfo.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import lombok.Builder;
import lombok.EqualsAndHashCode;
import lombok.Getter;
/**
* Stores type and value metadata for encrypted values.
*/
@EqualsAndHashCode
@Getter
public final class ClientDataInfo {
/**
* How many bytes are used to store data info.
*/
static final int BYTE_LENGTH = 1;
/**
* Number of metadata flags.
*/
static final byte FLAG_COUNT = 1;
/**
* Flag to indicate {@code null} value.
*/
private static final byte IS_NULL_FLAG = 0b00000001;
/**
* Data type being stores.
*/
private final ClientDataType type;
/**
* Whether this is an encrypted {@code null} value.
*/
private final boolean isNull;
/**
* Constructor for creating a byte from type information and null value status.
*
* @param type {@code ClientDataType} describing this value's type information
* @param isNull Indicates if the particular value in this row is null or not (if applicable)
*/
@Builder
private ClientDataInfo(final ClientDataType type, final boolean isNull) {
this.type = type;
this.isNull = isNull;
}
/**
* Extract encoded type and value metadata from a byte value.
*
* @param bits Byte with type and value information
* @return The decoded {@link ClientDataInfo}
*/
public static ClientDataInfo decode(final byte bits) {
final boolean isNull = (bits & IS_NULL_FLAG) != 0;
// After extracting flags from lower bits, shift to the right
// and get the ClientDataType in the remaining bits.
final ClientDataType type = ClientDataType.fromIndex((byte) (bits >> FLAG_COUNT));
return new ClientDataInfo(type, isNull);
}
/**
* Combine type information and flags into a single byte for encryption.
*
* @return 1-byte value that contains type information for the field and an indicator if the value is {@code null}
*/
public byte encode() {
byte bits = (byte) (type.getIndex() << FLAG_COUNT);
if (isNull) {
bits |= IS_NULL_FLAG;
}
return bits;
}
}
| 2,654 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/data/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Implementation of {@link com.amazonaws.c3r.data.Value}, {@link com.amazonaws.c3r.data.Row}, and
* {@link com.amazonaws.c3r.data.RowFactory} for each supported data format. Also contains metadata classes to store additional context for
* cryptographic operations on values.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.data; | 2,655 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/exception/C3rRuntimeException.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.exception;
/**
* Like {@link RuntimeException}, but contains an error message
* that is always safe to be printed/logged.
*/
public class C3rRuntimeException extends RuntimeException {
/**
* Construct an unchecked runtime exception.
*
* @param message Safe error message for printing and logging
*/
public C3rRuntimeException(final String message) {
super(message);
}
/**
* Construct an unchecked runtime exception.
*
* @param message Safe error message for printing and logging
* @param cause Original error that may not be safe to print or log in case the user has a higher level of logging enabled
*/
public C3rRuntimeException(final String message, final Throwable cause) {
super(message, cause);
}
}
| 2,656 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/exception/C3rIllegalArgumentException.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.exception;
/**
* Like {@link IllegalArgumentException}, but contains an error message
* that is always safe to be printed/logged.
*
* @see C3rRuntimeException
*/
public class C3rIllegalArgumentException extends C3rRuntimeException {
/**
* Construct an unchecked runtime exception for an invalid method parameter value.
*
* @param message Safe error message text for printing and logging
*/
public C3rIllegalArgumentException(final String message) {
super(message);
}
/**
* Construct an unchecked runtime exception for an invalid method parameter value.
*
* @param message Safe error message text for printing and logging
* @param cause Original error that may not be safe to print or log in case the user has a higher level of logging enabled
*/
public C3rIllegalArgumentException(final String message, final Throwable cause) {
super(message, cause);
}
}
| 2,657 |
0 | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-core/src/main/java/com/amazonaws/c3r/exception/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Contains exceptions used by C3R that will only have information in the message portion that is safe for printing and logging.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.exception; | 2,658 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/io/ParquetTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ParquetSchema;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.data.Row;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Utility functions for reading Parquet data out of files.
*/
public final class ParquetTestUtility {
/**
* Hidden utility class constructor.
*/
private ParquetTestUtility() {
}
/**
* Takes a row of Parquet values and returns them as an array of string values ordered by column indices.
*
* @param row Parquet values looked up by name
* @param indices Mapping of column index to name
* @return Ordered Parquet values converted to strings
*/
private static String[] rowToStringArray(final Row<ParquetValue> row, final Map<Integer, ColumnHeader> indices) {
final String[] strings = new String[row.size()];
for (int i = 0; i < row.size(); i++) {
strings[i] = Objects.requireNonNullElse(row.getValue(indices.get(i)).toString(), "");
}
return strings;
}
/**
* Reads a Parquet file into a list of ordered string values.
*
* @param filePath Location of the file to read
* @return Contents of the file as a list of rows and the rows are string values
*/
public static List<String[]> readContentAsStringArrays(final String filePath) {
final ParquetRowReader reader = ParquetRowReader.builder().sourceName(filePath).build();
final ParquetSchema parquetSchema = reader.getParquetSchema();
final Map<Integer, ColumnHeader> columnIndices = parquetSchema.getHeaders().stream()
.collect(Collectors.toMap(
parquetSchema::getColumnIndex,
Function.identity()
));
final var mapRows = readAllRows(reader);
return mapRows.stream().map(row -> rowToStringArray(row, columnIndices)).collect(Collectors.toList());
}
/**
* Reads all the rows from a Parquet file to their Parquet type.
*
* @param reader Reads a particular Parquet file
* @return Contents of the file as a list of rows with Parquet values
*/
public static List<Row<ParquetValue>> readAllRows(final ParquetRowReader reader) {
final var rows = new ArrayList<Row<ParquetValue>>();
while (reader.hasNext()) {
final var row = reader.next();
rows.add(row);
}
return rows;
}
}
| 2,659 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/io/CsvTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.univocity.parsers.csv.CsvParser;
import com.univocity.parsers.csv.CsvParserSettings;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Utility functions for common CSV data manipulation needed during testing.
*/
public final class CsvTestUtility {
/**
* Hidden utility class constructor.
*/
private CsvTestUtility() {
}
/**
* Creates a simple CSV parser for the specified columns that will read out {@code maxColumns}.
*
* @param fileName Location of the file to read
* @param maxColumns Maximum number of columns expected from file
* @return Parser for getting file contents
* @throws RuntimeException If the CSV file is not found
*/
public static CsvParser getCsvParser(final String fileName, final Integer maxColumns) {
try {
final CsvParserSettings settings = getBasicParserSettings(maxColumns, false);
// creates a CSV parser
final CsvParser parser = new CsvParser(settings);
final InputStreamReader reader = new InputStreamReader(new FileInputStream(fileName), StandardCharsets.UTF_8);
parser.beginParsing(reader);
return parser;
} catch (FileNotFoundException e) {
throw new RuntimeException(e);
}
}
/**
* Create basic parser settings that don't modify/NULL any values
* aside from the default whitespace trimming.
*
* @param maxColumns Most columns allowed in the CSV file
* @param keepQuotes If quotes should be kept as part of the string read in or not
* @return Settings to bring up a simple CSV parser
*/
private static CsvParserSettings getBasicParserSettings(final Integer maxColumns, final boolean keepQuotes) {
final CsvParserSettings settings = new CsvParserSettings();
settings.setLineSeparatorDetectionEnabled(true);
settings.setNullValue("");
settings.setEmptyValue("\"\"");
settings.setKeepQuotes(keepQuotes);
if (maxColumns != null) {
settings.setMaxColumns(maxColumns);
}
return settings;
}
/**
* Read the contents of the CSV file as rows, mapping column names to content.
*
* <p>
* The column names are normalized per the C3R's normalizing (lower-cased and whitespace trimmed).
*
* @param fileName File to read
* @return Rows read in the order they appear
* @throws C3rIllegalArgumentException If the file does not have the same number of entries in each row
*/
public static List<Map<String, String>> readRows(final String fileName) {
final CsvParserSettings settings = getBasicParserSettings(null, true);
settings.setHeaderExtractionEnabled(true);
final CsvParser parser = new CsvParser(settings);
return parser.parseAllRecords(new File(fileName)).stream().map(r -> r.toFieldMap()).collect(Collectors.toList());
}
/**
* Read the file content with rows as arrays. There is no mapping to column headers, if any, in the file.
*
* @param fileName Location of file to read
* @param keepQuotes If quotes should be kept as part of the string read in or not
* @return List of rows where each row is an array of values
* @throws RuntimeException If the file is not found
*/
public static List<String[]> readContentAsArrays(final String fileName, final boolean keepQuotes) {
final CsvParserSettings settings = getBasicParserSettings(null, keepQuotes);
return new CsvParser(settings).parseAll(new File(fileName), StandardCharsets.UTF_8);
}
}
| 2,660 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/io/schema/ParquetSchemaGeneratorTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.schema;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.GeneralTestUtility;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class ParquetSchemaGeneratorTest {
private ParquetSchemaGenerator getTestSchemaGenerator(final String file) throws IOException {
final String output = FileTestUtility.resolve("schema.json").toString();
return ParquetSchemaGenerator.builder()
.inputParquetFile(file)
.targetJsonFile(output)
.overwrite(true)
.build();
}
@Test
public void getSourceHeadersTest() throws IOException {
assertEquals(
GeneralTestUtility.DATA_SAMPLE_HEADERS,
getTestSchemaGenerator("../samples/parquet/data_sample.parquet").getSourceHeaders());
}
@Test
public void getSourceColumnCountTest() throws IOException {
assertEquals(
Collections.nCopies(GeneralTestUtility.DATA_SAMPLE_HEADERS.size(), ClientDataType.STRING),
getTestSchemaGenerator("../samples/parquet/data_sample.parquet").getSourceColumnTypes());
}
@Test
public void getSourceColumnTypesTest() throws IOException {
assertEquals(
List.of(ClientDataType.BOOLEAN,
ClientDataType.STRING,
ClientDataType.UNKNOWN,
ClientDataType.SMALLINT,
ClientDataType.INT,
ClientDataType.BIGINT,
ClientDataType.FLOAT,
ClientDataType.DOUBLE,
ClientDataType.TIMESTAMP),
getTestSchemaGenerator("../samples/parquet/rows_100_groups_10_prim_data.parquet").getSourceColumnTypes());
}
@Test
public void emptyFileTest() throws IOException {
final String emptyParquetFile = FileTestUtility.createTempFile("empty", ".parquet").toString();
assertThrows(C3rRuntimeException.class, () ->
getTestSchemaGenerator(emptyParquetFile));
}
} | 2,661 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/io/schema/InteractiveSchemaGeneratorTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.Pad;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Limits;
import com.amazonaws.c3r.json.GsonUtil;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.io.StringReader;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class InteractiveSchemaGeneratorTest {
private final List<ColumnHeader> headers = Stream.of(
"header1",
"header2",
"header3"
).map(ColumnHeader::new)
.collect(Collectors.toList());
private final List<ClientDataType> stringColumnTypes = Collections.nCopies(headers.size(), ClientDataType.STRING);
private final List<ClientDataType> unknownColumnTypes = Collections.nCopies(headers.size(), ClientDataType.UNKNOWN);
private final String exampleMappedSchemaString =
String.join("\n",
"{",
" \"headerRow\": true,",
" \"columns\": [",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_sealed\",",
" \"type\": \"sealed\",",
" \"pad\": {",
" \"type\": \"NONE\"",
" }",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_fingerprint\",",
" \"type\": \"fingerprint\"",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2\",",
" \"type\": \"cleartext\"",
" },",
" {",
" \"sourceHeader\": \"header3\",",
" \"targetHeader\": \"header3\",",
" \"type\": \"sealed\",",
" \"pad\": {",
" \"type\": \"MAX\",",
" \"length\": \"0\"",
" }",
" }",
" ]",
"}");
private final String exampleMappedSchemaNoCleartextString =
String.join("\n",
"{",
" \"headerRow\": true,",
" \"columns\": [",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_sealed\",",
" \"type\": \"SEALED\",",
" \"pad\": {",
" \"type\": \"NONE\"",
" }",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_fingerprint\",",
" \"type\": \"FINGERPRINT\"",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2\",",
" \"type\": \"FINGERPRINT\"",
" },",
" {",
" \"sourceHeader\": \"header3\",",
" \"targetHeader\": \"header3\",",
" \"type\": \"SEALED\",",
" \"pad\": {",
" \"type\": \"MAX\",",
" \"length\": \"0\"",
" }",
" }",
" ]",
"}");
private final String examplePositionalSchemaString =
String.join("\n",
"{",
" \"headerRow\": false,",
" \"columns\": [",
" [],",
" [",
" {",
" \"type\": \"sealed\",",
" \"pad\": {",
" \"type\": \"NONE\"",
" },",
" \"targetHeader\": \"targetheader2_sealed\"",
" },",
" {",
" \"type\": \"fingerprint\",",
" \"targetHeader\": \"targetheader2_fingerprint\"",
" },",
" {",
" \"type\": \"cleartext\",",
" \"targetHeader\": \"targetheader2\"",
" }",
" ],",
" [",
" {",
" \"type\": \"sealed\",",
" \"pad\": {",
" \"type\": \"MAX\",",
" \"length\": 0",
" },",
" \"targetHeader\": \"targetheader3\"",
" }",
" ]",
" ]",
"}");
private final String exampleMappedSchemaAllCleartextString =
String.join("\n",
"{",
" \"headerRow\": true,",
" \"columns\": [",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_1\",",
" \"type\": \"cleartext\"",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_2\",",
" \"type\": \"cleartext\"",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"targetheader2_3\",",
" \"type\": \"cleartext\"",
" },",
" {",
" \"sourceHeader\": \"header3\",",
" \"targetHeader\": \"header3\",",
" \"type\": \"cleartext\"",
" }",
" ]",
"}");
private final String examplePositionalSchemaAllCleartextString =
String.join("\n",
"{",
" \"headerRow\": false,",
" \"columns\": [",
" [],",
" [",
" {",
" \"type\": \"cleartext\",",
" \"targetHeader\": \"targetheader2_1\"",
" },",
" {",
" \"type\": \"cleartext\",",
" \"targetHeader\": \"targetheader2_2\"",
" },",
" {",
" \"type\": \"cleartext\",",
" \"targetHeader\": \"targetheader2_3\"",
" }",
" ],",
" [",
" {",
" \"type\": \"cleartext\",",
" \"targetHeader\": \"targetheader3\"",
" }",
" ]",
" ]",
"}");
private InteractiveSchemaGenerator schemaGen;
private Path targetSchema;
private ByteArrayOutputStream consoleOutput;
@BeforeEach
public void setup() throws IOException {
targetSchema = FileTestUtility.resolve("schema.json");
}
// Set up the interactive generator.
private void createInteractiveSchemaGenerator(final String simulatedUserInput,
final List<ColumnHeader> headers,
final List<ClientDataType> types,
final ClientSettings clientSettings) {
final var userInput = new BufferedReader(new StringReader(simulatedUserInput + "\n"));
consoleOutput = new ByteArrayOutputStream();
schemaGen = InteractiveSchemaGenerator.builder()
.sourceHeaders(headers)
.sourceColumnTypes(types)
.targetJsonFile(targetSchema.toString())
.consoleInput(userInput)
.consoleOutput(new PrintStream(consoleOutput, true, StandardCharsets.UTF_8))
.clientSettings(clientSettings)
.build();
}
@Test
public void validateErrorWithMismatchedColumnCounts() {
assertThrows(C3rIllegalArgumentException.class, () ->
createInteractiveSchemaGenerator("", headers, List.of(), null));
}
@Test
public void validateUnexpectedUserInputEndError() {
final List<String> incompleteUserInputs = List.of("", "0", "0\n", "0\n0", "0\n0\n");
final Consumer<String> schemaGenRunner = (userInput) ->
InteractiveSchemaGenerator.builder()
.sourceHeaders(headers)
.sourceColumnTypes(stringColumnTypes)
.targetJsonFile(targetSchema.toString())
.consoleInput(new BufferedReader(new StringReader(userInput)))
.consoleOutput(new PrintStream(new ByteArrayOutputStream(), true, StandardCharsets.UTF_8))
.clientSettings(null)
.build()
.run();
for (var input : incompleteUserInputs) {
assertThrows(C3rRuntimeException.class, () -> schemaGenRunner.accept(input));
}
assertDoesNotThrow(() -> schemaGenRunner.accept("0\n0\n0"));
}
@Test
public void promptNonnegativeIntValidTest() {
final List<String> validInputs = List.of("42", "0", "100");
for (var input : validInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null);
assertEquals(
Integer.valueOf(input),
schemaGen.promptNonNegativeInt("", null, 100));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
@Test
public void promptNonnegativeIntInvalidTest() {
final List<String> validInputs = List.of("", "NotANumber", "-1", "101");
for (var input : validInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null);
assertNull(schemaGen.promptNonNegativeInt("", null, 100));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
@Test
public void promptNonNegativeIntValidDefaultTest() {
final List<String> validInputs = List.of("1", "", "3");
for (var input : validInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null);
assertEquals(
input.isBlank() ? 2 : Integer.parseInt(input),
schemaGen.promptNonNegativeInt("", 2, 100));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
@Test
public void promptYesOrNoValidTest() {
final List<Boolean> defaultBooleanAnswers = Arrays.asList(null, true, false);
final List<String> validYesStrings = List.of("y", "yes", "Y", "YES");
for (var input : validYesStrings) {
for (var answer : defaultBooleanAnswers) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null);
assertTrue(schemaGen.promptYesOrNo("", answer));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
final List<String> validNoStrings = List.of("n", "no", "N", "NO");
for (var input : validNoStrings) {
for (var answer : defaultBooleanAnswers) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null);
assertFalse(schemaGen.promptYesOrNo("", answer));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
for (var answer : defaultBooleanAnswers) {
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertEquals(answer, schemaGen.promptYesOrNo("", answer));
if (answer == null) {
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
} else {
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
}
@Test
public void promptYesOrNoInvalidTest() {
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertNull(schemaGen.promptYesOrNo("", null));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("ja", headers, stringColumnTypes, null);
assertNull(schemaGen.promptYesOrNo("", null));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("nein", headers, stringColumnTypes, null);
assertNull(schemaGen.promptYesOrNo("", null));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptColumnTypeValidTest() {
final List<String> validCleartextInputs = List.of("c", "C", "cleartext", "CLEARTEXT");
final List<ClientSettings> permissiveSettings = new ArrayList<>();
permissiveSettings.add(null);
permissiveSettings.add(ClientSettings.lowAssuranceMode());
for (var settings : permissiveSettings) {
for (var input : validCleartextInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, settings);
assertEquals(ColumnType.CLEARTEXT, schemaGen.promptColumnType());
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
final List<String> validFingerprintInputs = List.of("f", "F", "fingerprint", "FINGERPRINT");
for (var input : validFingerprintInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, settings);
assertEquals(ColumnType.FINGERPRINT, schemaGen.promptColumnType());
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
final List<String> validSealedInputs = List.of("s", "S", "sealed", "SEALED");
for (var input : validSealedInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, settings);
assertEquals(ColumnType.SEALED, schemaGen.promptColumnType());
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
}
@Test
public void promptColumnTypeRestrictiveSettingsTest() {
final List<String> validCleartextInputs = List.of("c", "C", "cleartext", "CLEARTEXT");
for (var input : validCleartextInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, ClientSettings.highAssuranceMode());
assertNull(schemaGen.promptColumnType());
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
final List<String> validFingerprintInputs = List.of("f", "F", "fingerprint", "FINGERPRINT");
for (var input : validFingerprintInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, ClientSettings.highAssuranceMode());
assertEquals(ColumnType.FINGERPRINT, schemaGen.promptColumnType());
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
final List<String> validSealedInputs = List.of("s", "S", "sealed", "SEALED");
for (var input : validSealedInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, ClientSettings.highAssuranceMode());
assertEquals(ColumnType.SEALED, schemaGen.promptColumnType());
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
@Test
public void promptColumnTypeInvalidTest() {
final List<String> validCleartextInputs = List.of("", "a", "unrostricted", "solekt", "joyn");
for (var input : validCleartextInputs) {
createInteractiveSchemaGenerator(input, headers, stringColumnTypes, null);
assertNull(schemaGen.promptColumnType());
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
}
@Test
public void promptTargetHeaderSuffixTest() {
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertNull(schemaGen.promptTargetHeaderSuffix(ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("y", headers, stringColumnTypes, null);
assertEquals("_sealed", schemaGen.promptTargetHeaderSuffix(ColumnType.SEALED));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("n", headers, stringColumnTypes, null);
assertNull(schemaGen.promptTargetHeaderSuffix(ColumnType.SEALED));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertEquals("_fingerprint", schemaGen.promptTargetHeaderSuffix(ColumnType.FINGERPRINT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("n", headers, stringColumnTypes, null);
assertNull(schemaGen.promptTargetHeaderSuffix(ColumnType.FINGERPRINT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptTargetHeaderTest() {
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertEquals(new ColumnHeader("a"), schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("b", headers, stringColumnTypes, null);
assertEquals(new ColumnHeader("b"), schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
assertFalse(consoleOutput.toString().toLowerCase().contains("normalized"));
createInteractiveSchemaGenerator("B", headers, stringColumnTypes, null);
assertEquals(new ColumnHeader("b"), schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
assertTrue(consoleOutput.toString().toLowerCase().contains("normalized"));
createInteractiveSchemaGenerator("b".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH) + 1, headers, stringColumnTypes, null);
assertNull(schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.CLEARTEXT));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptTargetHeaderWithoutSourceHeadersTest() {
// empty input does _not_ give you a default target header when no source headers exist
createInteractiveSchemaGenerator("", null, stringColumnTypes, null);
assertNull(schemaGen.promptTargetHeader(null, ColumnType.CLEARTEXT));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
// providing input for a target header when source headers are null remains unchanged
createInteractiveSchemaGenerator("b", headers, stringColumnTypes, null);
assertEquals(new ColumnHeader("b"), schemaGen.promptTargetHeader(null, ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("B", headers, stringColumnTypes, null);
assertEquals(new ColumnHeader("b"), schemaGen.promptTargetHeader(null, ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
assertTrue(consoleOutput.toString().toLowerCase().contains("normalized"));
}
@Test
public void promptTargetHeaderAlreadyUsedHeaderTest() {
createInteractiveSchemaGenerator("\n", headers, stringColumnTypes, null);
assertEquals(new ColumnHeader("header"), schemaGen.promptTargetHeader(new ColumnHeader("header"), ColumnType.CLEARTEXT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
assertNull(schemaGen.promptTargetHeader(new ColumnHeader("header"), ColumnType.CLEARTEXT));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptTargetHeaderWithSuffixTest() {
final String suffix = ColumnHeader.DEFAULT_FINGERPRINT_SUFFIX;
createInteractiveSchemaGenerator("\n", headers, stringColumnTypes, null);
assertEquals(
new ColumnHeader("a_fingerprint"),
schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.FINGERPRINT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("b".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH - suffix.length())
+ "\n", headers, stringColumnTypes, null);
assertEquals(
new ColumnHeader(
"b".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH - suffix.length())
+ suffix),
schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.FINGERPRINT));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptTargetHeaderCannotAddSuffixTest() {
createInteractiveSchemaGenerator("a".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH)
+ "\n", headers, stringColumnTypes, null);
assertNull(schemaGen.promptTargetHeader(new ColumnHeader("a"), ColumnType.FINGERPRINT));
assertTrue(consoleOutput.toString().toLowerCase().contains("unable to add header suffix"));
}
@Test
public void promptPadTypeTest() {
final var header = new ColumnHeader("a");
final PadType nullDefaultType = null;
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertNull(schemaGen.promptPadType(header, nullDefaultType));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("", headers, stringColumnTypes, null);
assertEquals(PadType.MAX, schemaGen.promptPadType(header, PadType.MAX));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("n", headers, stringColumnTypes, null);
assertEquals(PadType.NONE, schemaGen.promptPadType(header, nullDefaultType));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("none", headers, stringColumnTypes, null);
assertEquals(PadType.NONE, schemaGen.promptPadType(header, nullDefaultType));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("f", headers, stringColumnTypes, null);
assertEquals(PadType.FIXED, schemaGen.promptPadType(header, nullDefaultType));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("fixed", headers, stringColumnTypes, null);
assertEquals(PadType.FIXED, schemaGen.promptPadType(header, nullDefaultType));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("m", headers, stringColumnTypes, null);
assertEquals(PadType.MAX, schemaGen.promptPadType(header, nullDefaultType));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("max", headers, stringColumnTypes, null);
assertEquals(PadType.MAX, schemaGen.promptPadType(header, nullDefaultType));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("unknown", headers, stringColumnTypes, null);
assertNull(schemaGen.promptPadType(header, nullDefaultType));
assertTrue(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptPadTest() {
final var header = new ColumnHeader("a");
createInteractiveSchemaGenerator("n", headers, stringColumnTypes, null);
assertEquals(
Pad.DEFAULT,
schemaGen.promptPad(header));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("f\n42", headers, stringColumnTypes, null);
assertEquals(
Pad.builder().type(PadType.FIXED).length(42).build(),
schemaGen.promptPad(header));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
createInteractiveSchemaGenerator("m\n42", headers, stringColumnTypes, null);
assertEquals(
Pad.builder().type(PadType.MAX).length(42).build(),
schemaGen.promptPad(header));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptColumnInfoWithSourceHeadersTest() {
final String columnType = "sealed";
final String targetName = "target";
final String useSuffix = "no";
final String paddingType = "none";
createInteractiveSchemaGenerator(String.join("\n",
columnType,
targetName,
useSuffix,
paddingType),
headers,
stringColumnTypes, null);
assertEquals(
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("source"))
.targetHeader(new ColumnHeader("target"))
.type(ColumnType.SEALED)
.pad(Pad.DEFAULT)
.build(),
schemaGen.promptColumnInfo(new ColumnHeader("source"), 1, 2));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptColumnInfoWithSourceHeadersAndUnknownTypeTest() {
createInteractiveSchemaGenerator("target", headers, unknownColumnTypes, null);
assertEquals(
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("source"))
.targetHeader(new ColumnHeader("target"))
.type(ColumnType.CLEARTEXT)
.build(),
schemaGen.promptColumnInfo(new ColumnHeader("source"), 1, 2));
assertTrue(consoleOutput.toString().toLowerCase().contains("cryptographic computing is not supported"));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptColumnInfoWithoutSourceHeadersTest() {
createInteractiveSchemaGenerator("", null, stringColumnTypes, null);
final String columnType = "sealed";
final String targetName = "target";
final String useSuffix = "no";
final String paddingType = "none";
createInteractiveSchemaGenerator(String.join("\n",
columnType,
targetName,
useSuffix,
paddingType),
headers,
stringColumnTypes, null);
assertEquals(
ColumnSchema.builder()
.sourceHeader(null)
.targetHeader(new ColumnHeader("target"))
.type(ColumnType.SEALED)
.pad(Pad.builder().type(PadType.NONE).build())
.build(),
schemaGen.promptColumnInfo(null, 1, 2));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void promptColumnInfoWithoutSourceHeadersAndUnknownTypeTest() {
createInteractiveSchemaGenerator("target", null, unknownColumnTypes, null);
assertEquals(
ColumnSchema.builder()
.targetHeader(new ColumnHeader("target"))
.type(ColumnType.CLEARTEXT)
.build(),
schemaGen.promptColumnInfo(null, 1, 2));
assertTrue(consoleOutput.toString().toLowerCase().contains("cryptographic computing is not supported"));
assertFalse(consoleOutput.toString().toLowerCase().contains("expected"));
}
@Test
public void runGenerateNoSchemaTest() {
// 0 target columns to generate for each source column
createInteractiveSchemaGenerator("0\n".repeat(headers.size()), headers, stringColumnTypes, null);
schemaGen.run();
assertTrue(consoleOutput.toString().contains("No target columns were specified."));
assertEquals(0, targetSchema.toFile().length());
}
@Test
public void runGenerateSchemaWithSourceHeadersTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
"sealed", // header2, column 1 type
"targetHeader2", // header2, column 1 target header
"yes", // header2, column 1 use suffix
"none", // header2, column 1 padding type
// header2, column 2
"fingerprint", // header2, column 2 type
"targetHeader2", // header2, column 2 target header
"yes", // header2, column 2 use suffix
// header2, column 3
"cleartext", // header2, column 3 type
"targetHeader2", // header2, column 3 target header
// source header3
"", // number of columns for header3 (default to 1)
"sealed",
"", // header3, column 1 target header (default)
"n", // header3, column 1 use suffix
"max", // header3, column 1 padding type
"" // header3, column 1 padding length (default 0)
);
createInteractiveSchemaGenerator(userInput, headers, stringColumnTypes, null);
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runGenerateSchemaWithSourceHeadersUnknownTypesTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
// type is cleartext due to unknown client type
"targetHeader2_1", // header2, column 1 target header
// header2, column 2
// type is cleartext due to unknown client type
"targetHeader2_2", // header2, column 2 target header
// header2, column 3
// type is cleartext due to unknown client type
"targetHeader2_3", // header2, column 2 target header
// source header3
"", // number of columns for header3 (default to 1)
// type is cleartext due to unknown client type
"" // header3, column 1 target header (default)
);
createInteractiveSchemaGenerator(userInput, headers, unknownColumnTypes, null);
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaAllCleartextString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runGenerateSchemaWithoutSourceHeadersTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
"sealed", // header2, column 1 type
"targetHeader2", // header2, column 1 target header
"yes", // header2, column 1 use suffix
"none", // header2, column 1 padding type
// header2, column 2
"fingerprint", // header2, column 2 type
"targetHeader2", // header2, column 2 target header
"yes", // header2, column 2 use suffix
// header2, column 3
"cleartext", // header2, column 3 type
"targetHeader2", // header2, column 3 target header
// source header3
"", // number of columns for header3 (default to 1)
"sealed",
"targetHeader3", // header3, column 1 target header (default)
"n", // header3, column 1 use suffix
"max", // header3, column 1 padding type
"" // header3, column 1 padding length (default 0)
);
createInteractiveSchemaGenerator(userInput, null, stringColumnTypes, null);
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(examplePositionalSchemaString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runGenerateSchemaWithoutSourceHeadersUnknownTypesTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
// type is cleartext due to unknown client type
"targetHeader2_1", // header2, column 1 target header
// header2, column 2
// type is cleartext due to unknown client type
"targetHeader2_2", // header2, column 2 target header
// header2, column 3
// type is cleartext due to unknown client type
"targetHeader2_3", // header2, column 3 target header
// source header3
"", // number of columns for header3 (default to 1)
// type is cleartext due to unknown client type
"targetHeader3" // header3, column 1 target header (default)
);
createInteractiveSchemaGenerator(userInput, null, unknownColumnTypes, null);
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(examplePositionalSchemaAllCleartextString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runTestWithBadInputsMixedIn() {
final String userInput =
String.join("\n",
// source header1
"zero", // bad number of columns for header1
"0", // number of columns for header1
// source header2
"three", // bad number of columns
"3", // number of columns
// header 2, column 1
"special", // bad column type
"sealed", // header 2, column 1 type
"long_name".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH), // header 2, column 1 bad target header
"targetHeader2", // header 2, column 1 target header
"maybe", // header 2, column 1 bad use suffix
"yes", // header 2, column 1 use suffix
"super", // header 2, column 1 bad padding type
"none", // header 2, column 1 padding type
// header 2, column 2
"goin", // header 2, column 2 bad type
"fingerprint", // header 2, column 2 type
"long_name".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH), // header 2, column 2 bad target header
"targetHeader2", // header 2, column 2 target header
"I can't decide", // header 2, column 2 bad use suffix
"yes", // header 2, column 2 use suffix
// header 2, column 3
"plaintext", // header 2, column 3 bad type
"cleartext", // header 2, column 3 type
"long_name".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH), // header 2, column 3 bad target header
"targetHeader2", // header 2, column 3 target header
// source header3
"one", // bad number of columns for header3
"", // number of columns for header3 (default to 1)
"sealed",
"", // header3, column 1 target header (default)
"what", // bad header3, column 1 use suffix
"n", // header3, column 1 use suffix
"mux", // bad header3, column 1 padding type
"max", // header3, column 1 padding type
"zero", // header3, column 1 padding length (default 0)
"" // header3, column 1 padding length (default 0)
);
createInteractiveSchemaGenerator(userInput, headers, stringColumnTypes, null);
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final TableSchema expectedSchema = GsonUtil.fromJson(exampleMappedSchemaString, TableSchema.class);
final TableSchema actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson(actualSchema));
}
@Test
public void nullValueCsvSchemaGeneratorTest() {
// no headers
assertThrows(NullPointerException.class, () -> CsvSchemaGenerator.builder()
.inputCsvFile("../samples/csv/data_sample_without_quotes.csv")
.targetJsonFile(targetSchema.toString())
.overwrite(true).build());
// no target
assertThrows(NullPointerException.class, () -> CsvSchemaGenerator.builder()
.inputCsvFile("../samples/csv/data_sample_without_quotes.csv")
.overwrite(true)
.hasHeaders(true).build());
// no input
assertThrows(NullPointerException.class,
() -> CsvSchemaGenerator.builder()
.targetJsonFile(targetSchema.toString())
.overwrite(true)
.hasHeaders(true).build());
// no overwrite
assertThrows(NullPointerException.class, () -> CsvSchemaGenerator.builder()
.inputCsvFile("../samples/csv/data_sample_without_quotes.csv")
.targetJsonFile(targetSchema.toString())
.hasHeaders(true).build());
}
@Test
public void runGenerateSchemaWithSourceHeadersPermissiveSettingsTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
"sealed", // header2, column 1 type
"targetHeader2", // header2, column 1 target header
"yes", // header2, column 1 use suffix
"none", // header2, column 1 padding type
// header2, column 2
"fingerprint", // header2, column 2 type
"targetHeader2", // header2, column 2 target header
"yes", // header2, column 2 use suffix
// header2, column 3
"cleartext", // header2, column 3 type
"targetHeader2", // header2, column 3 target header
// source header3
"", // number of columns for header3 (default to 1)
"sealed",
"", // header3, column 1 target header (default)
"n", // header3, column 1 use suffix
"max", // header3, column 1 padding type
"" // header3, column 1 padding length (default 0)
);
createInteractiveSchemaGenerator(userInput, headers, stringColumnTypes, ClientSettings.lowAssuranceMode());
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runGenerateSchemaWithSourceHeadersRestrictiveSettingsTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
"sealed", // header2, column 1 type
"targetHeader2", // header2, column 1 target header
"yes", // header2, column 1 use suffix
"none", // header2, column 1 padding type
// header2, column 2
"fingerprint", // header2, column 2 type
"targetHeader2", // header2, column 2 target header
"yes", // header2, column 2 use suffix
// header2, column 3
"cleartext", // header2, column 3 type, NOT ALLOWED
"fingerprint",
"targetHeader2", // header2, column 3 target header
"n", // header2, column 3 use suffix
// source header3
"", // number of columns for header3 (default to 1)
"sealed",
"", // header3, column 1 target header (default)
"n", // header3, column 1 use suffix
"max", // header3, column 1 padding type
"" // header3, column 1 padding length (default 0)
);
createInteractiveSchemaGenerator(userInput, headers, stringColumnTypes, ClientSettings.highAssuranceMode());
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaNoCleartextString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runGenerateSchemaWithSourceHeadersUnknownTypesPermissiveSettingsTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
// type is cleartext due to unknown client type
"targetHeader2_1", // header2, column 1 target header
// header2, column 2
// type is cleartext due to unknown client type
"targetHeader2_2", // header2, column 2 target header
// header2, column 3
// type is cleartext due to unknown client type
"targetHeader2_3", // header2, column 2 target header
// source header3
"", // number of columns for header3 (default to 1)
// type is cleartext due to unknown client type
"" // header3, column 1 target header (default)
);
createInteractiveSchemaGenerator(userInput, headers, unknownColumnTypes, ClientSettings.lowAssuranceMode());
schemaGen.run();
assertNotEquals(0, targetSchema.toFile().length());
final var expectedSchema = GsonUtil.fromJson(exampleMappedSchemaAllCleartextString, TableSchema.class);
final var actualSchema = GsonUtil.fromJson(FileUtil.readBytes(targetSchema.toString()), TableSchema.class);
assertEquals(GsonUtil.toJson(expectedSchema), GsonUtil.toJson((actualSchema)));
}
@Test
public void runGenerateSchemaWithSourceHeadersUnknownTypesRestrictiveSettingsTest() {
final String userInput =
String.join("\n",
// source header1
"0", // number of columns for header1
// source header2
"3", // number of columns for header2
// header2, column 1
// type is cleartext due to unknown client type
"targetHeader2_1", // header2, column 1 target header
// header2, column 2
// type is cleartext due to unknown client type
"targetHeader2_2", // header2, column 2 target header
// header2, column 3
// type is cleartext due to unknown client type
"targetHeader2_3", // header2, column 2 target header
// source header3
"", // number of columns for header3 (default to 1)
// type is cleartext due to unknown client type
"" // header3, column 1 target header (default)
);
createInteractiveSchemaGenerator(userInput, headers, unknownColumnTypes, ClientSettings.highAssuranceMode());
schemaGen.run();
assertTrue(consoleOutput.toString().contains("No source columns could be considered for output"));
assertEquals(0, targetSchema.toFile().length());
}
}
| 2,662 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/io/schema/CsvSchemaGeneratorTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.schema;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Path;
import static com.amazonaws.c3r.utils.GeneralTestUtility.DATA_SAMPLE_HEADERS;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class CsvSchemaGeneratorTest {
private CsvSchemaGenerator getTestSchemaGenerator(final String file) throws IOException {
final String output = FileTestUtility.resolve("schema.json").toString();
return CsvSchemaGenerator.builder()
.inputCsvFile(file)
.hasHeaders(true)
.targetJsonFile(output)
.overwrite(true)
.build();
}
@Test
public void getSourceHeadersTest() throws IOException {
assertEquals(
DATA_SAMPLE_HEADERS,
getTestSchemaGenerator(FileUtil.CURRENT_DIR + "/../samples/csv/data_sample_without_quotes.csv").getSourceHeaders());
}
@Test
public void getSourceColumnCountTest() throws IOException {
assertEquals(
DATA_SAMPLE_HEADERS.size(),
getTestSchemaGenerator(FileUtil.CURRENT_DIR + "/../samples/csv/data_sample_without_quotes.csv").getSourceColumnCount());
}
@Test
public void emptyFileTest() throws IOException {
final Path emptyCsvFile = FileTestUtility.createTempFile("empty", ".csv");
assertThrows(C3rRuntimeException.class, () ->
getTestSchemaGenerator(emptyCsvFile.toString()));
}
}
| 2,663 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/io/schema/TemplateSchemaGeneratorTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.utils.FileTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class TemplateSchemaGeneratorTest {
private Path tempSchema;
@BeforeEach
public void setup() throws IOException {
tempSchema = FileTestUtility.resolve("schema.json");
}
@Test
public void validateErrorWithMismatchedColumnCounts() {
assertThrows(C3rIllegalArgumentException.class, () ->
TemplateSchemaGenerator.builder()
.sourceHeaders(List.of(new ColumnHeader("_c0")))
.sourceColumnTypes(List.of())
.targetJsonFile(tempSchema.toString())
.build());
}
@Test
public void testTemplateWithSourceHeadersNoSettingsGeneration() throws IOException {
final var expectedContent = String.join("\n",
"{",
" \"headerRow\": true,",
" \"columns\": [",
" {",
" \"sourceHeader\": \"header1\",",
" \"targetHeader\": \"header1\",",
" \"type\": \"[sealed|fingerprint|cleartext]\",",
" \"pad\": {",
" \"COMMENT\": \"omit this pad entry unless column type is sealed\",",
" \"type\": \"[none|fixed|max]\",",
" \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"",
" }",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"header2\",",
" \"type\": \"cleartext\"",
" }",
" ]",
"}"
);
final var headers = List.of(
new ColumnHeader("header1"),
new ColumnHeader("header2")
);
final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN);
final var generator = TemplateSchemaGenerator.builder()
.sourceHeaders(headers)
.sourceColumnTypes(types)
.targetJsonFile(tempSchema.toString())
.build();
generator.run();
final String content = Files.readString(tempSchema, StandardCharsets.UTF_8);
assertEquals(expectedContent, content);
}
@Test
public void testTemplateWithoutSourceHeadersNoSettingsGeneration() throws IOException {
final String expectedPositionalSchemaOutput = String.join("\n",
"{",
" \"headerRow\": false,",
" \"columns\": [",
" [",
" {",
" \"targetHeader\": \"_c0\",",
" \"type\": \"[sealed|fingerprint|cleartext]\",",
" \"pad\": {",
" \"COMMENT\": \"omit this pad entry unless column type is sealed\",",
" \"type\": \"[none|fixed|max]\",",
" \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"",
" }",
" }",
" ],",
" [",
" {",
" \"targetHeader\": \"_c1\",",
" \"type\": \"cleartext\"",
" }",
" ]",
" ]",
"}");
final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN);
TemplateSchemaGenerator.builder()
.sourceHeaders(null)
.sourceColumnTypes(types)
.targetJsonFile(tempSchema.toString())
.build()
.run();
final String content = Files.readString(tempSchema);
assertEquals(expectedPositionalSchemaOutput, content);
}
@Test
public void testTemplateWithSourceHeadersPermissiveSettingsGeneration() throws IOException {
final var expectedContent = String.join("\n",
"{",
" \"headerRow\": true,",
" \"columns\": [",
" {",
" \"sourceHeader\": \"header1\",",
" \"targetHeader\": \"header1\",",
" \"type\": \"[sealed|fingerprint|cleartext]\",",
" \"pad\": {",
" \"COMMENT\": \"omit this pad entry unless column type is sealed\",",
" \"type\": \"[none|fixed|max]\",",
" \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"",
" }",
" },",
" {",
" \"sourceHeader\": \"header2\",",
" \"targetHeader\": \"header2\",",
" \"type\": \"cleartext\"",
" }",
" ]",
"}"
);
final var headers = List.of(
new ColumnHeader("header1"),
new ColumnHeader("header2")
);
final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN);
final var generator = TemplateSchemaGenerator.builder()
.sourceHeaders(headers)
.sourceColumnTypes(types)
.targetJsonFile(tempSchema.toString())
.clientSettings(ClientSettings.lowAssuranceMode())
.build();
generator.run();
final String content = Files.readString(tempSchema, StandardCharsets.UTF_8);
assertEquals(expectedContent, content);
}
@Test
public void testTemplateWithoutSourceHeadersPermissiveSettingsGeneration() throws IOException {
final String expectedPositionalSchemaOutput = String.join("\n",
"{",
" \"headerRow\": false,",
" \"columns\": [",
" [",
" {",
" \"targetHeader\": \"_c0\",",
" \"type\": \"[sealed|fingerprint|cleartext]\",",
" \"pad\": {",
" \"COMMENT\": \"omit this pad entry unless column type is sealed\",",
" \"type\": \"[none|fixed|max]\",",
" \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"",
" }",
" }",
" ],",
" [",
" {",
" \"targetHeader\": \"_c1\",",
" \"type\": \"cleartext\"",
" }",
" ]",
" ]",
"}");
final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN);
TemplateSchemaGenerator.builder()
.sourceHeaders(null)
.sourceColumnTypes(types)
.targetJsonFile(tempSchema.toString())
.clientSettings(ClientSettings.lowAssuranceMode())
.build()
.run();
final String content = Files.readString(tempSchema);
assertEquals(expectedPositionalSchemaOutput, content);
}
@Test
public void testTemplateWithSourceHeadersRestrictiveSettingsGeneration() throws IOException {
final var expectedContent = String.join("\n",
"{",
" \"headerRow\": true,",
" \"columns\": [",
" {",
" \"sourceHeader\": \"header1\",",
" \"targetHeader\": \"header1\",",
" \"type\": \"[sealed|fingerprint]\",",
" \"pad\": {",
" \"COMMENT\": \"omit this pad entry unless column type is sealed\",",
" \"type\": \"[none|fixed|max]\",",
" \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"",
" }",
" }",
" ]",
"}"
);
final var headers = List.of(
new ColumnHeader("header1"),
new ColumnHeader("header2")
);
final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN);
final var generator = TemplateSchemaGenerator.builder()
.sourceHeaders(headers)
.sourceColumnTypes(types)
.targetJsonFile(tempSchema.toString())
.clientSettings(ClientSettings.highAssuranceMode())
.build();
generator.run();
final String content = Files.readString(tempSchema, StandardCharsets.UTF_8);
assertEquals(expectedContent, content);
}
@Test
public void testTemplateWithoutSourceHeadersRestrictiveSettingsGeneration() throws IOException {
final String expectedPositionalSchemaOutput = String.join("\n",
"{",
" \"headerRow\": false,",
" \"columns\": [",
" [",
" {",
" \"targetHeader\": \"_c0\",",
" \"type\": \"[sealed|fingerprint]\",",
" \"pad\": {",
" \"COMMENT\": \"omit this pad entry unless column type is sealed\",",
" \"type\": \"[none|fixed|max]\",",
" \"length\": \"omit length property for type none, otherwise specify value in [0, 10000]\"",
" }",
" }",
" ],",
" []",
" ]",
"}");
final List<ClientDataType> types = List.of(ClientDataType.STRING, ClientDataType.UNKNOWN);
TemplateSchemaGenerator.builder()
.sourceHeaders(null)
.sourceColumnTypes(types)
.targetJsonFile(tempSchema.toString())
.clientSettings(ClientSettings.highAssuranceMode())
.build()
.run();
final String content = Files.readString(tempSchema);
assertEquals(expectedPositionalSchemaOutput, content);
}
}
| 2,664 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/utils/TimingResultTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.utils;
import com.amazonaws.c3r.config.ColumnType;
import lombok.Builder;
/**
* Used to store performance testing metrics.
*/
@Builder
public class TimingResultTestUtility {
/**
* Header names for timing results.
*/
public static final String[] HEADERS = {
"Columns",
"Rows",
"Marshal Time (s)",
"Unmarshal Time (s)",
"Input Size (MB)",
"Marshalled Size (MB)",
"Unmarshalled Size (MB)",
"Cleartext Columns",
"Sealed Columns",
"Fingerprint Columns",
"Chars/Entry"
};
/**
* How many column types we are supporting.
*/
private static final int NUM_COL_TYPES = ColumnType.values().length;
/**
* Conversion factor for bytes to megabytes.
*/
private static final double MB = Math.pow(2, 20);
/**
* How many characters per entry in the input file.
*/
private Integer charsPerEntry;
/**
* Number of columns in the files.
*/
private Integer columnCount;
/**
* Number of rows in the files.
*/
private Long rowCount;
/**
* Size of original input file.
*/
private Long inputSizeBytes;
/**
* Time spent marshalling data.
*/
private Long marshalTimeSec;
/**
* Size of marshalled file.
*/
private Long marshalledSizeBytes;
/**
* Time spent unmarshalling data.
*/
private Long unmarshalTimeSec;
/**
* Size of the unmarshalled file.
*/
private Long unmarshalledSizeBytes;
} | 2,665 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/utils/TableGeneratorTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.utils;
import com.amazonaws.c3r.config.ColumnType;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonPrimitive;
import lombok.Builder;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Random;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
/**
* Used to generate CSV files with random data and an associated schema for testing purposes.
*/
@Builder
public final class TableGeneratorTestUtility {
/**
* Number of column types currently supported.
*/
private static final int COL_TYPES = ColumnType.values().length;
/**
* Hidden utility class constructor.
*/
private TableGeneratorTestUtility() {
}
/**
* Generates unique column header names based on type.
*
* @param columnIndex Which column to create a header for
* @return Column type name followed by column number
*/
private static String headerName(final int columnIndex) {
switch (columnIndex % COL_TYPES) {
case 0:
return "cleartext" + columnIndex;
case 1:
return "sealed" + columnIndex;
default:
return "fingerprint" + columnIndex;
}
}
/**
* Generates the JSON output for a column schema. During data generation the column types are evenly rotated between:
* <ul>
* <li>Cleartext</li>
* <li>Sealed with a Max Pad of Length 0</li>
* <li>Fingerprint</li>
* </ul>
*
* @param columnIndex Which column to generate a schema for (determines types)
* @return JSON object representing the column's schema
*/
private static JsonObject columnSchema(final int columnIndex) {
final JsonObject obj = new JsonObject();
final JsonObject pad = new JsonObject();
obj.addProperty("sourceHeader", headerName(columnIndex));
switch (columnIndex % COL_TYPES) {
case 0:
obj.addProperty("type", "cleartext");
break;
case 1:
obj.addProperty("type", "sealed");
pad.addProperty("type", "max");
pad.addProperty("length", 0);
obj.add("pad", pad);
break;
default:
obj.addProperty("type", "fingerprint");
break;
}
return obj;
}
/**
* Generates a prefix for the CSV and schema files.
*
* @param columnCount Number of columns in generated file
* @param rowCount Number of rows in generated file
* @return String value {@code misc<columnCount>by<rowCount>-} for start of file name
*/
public static String filePrefix(final int columnCount, final long rowCount) {
return "misc" + columnCount + "by" + rowCount + "-";
}
/**
* Generates a schema to match the generated CSV file. Column types rotate as specified in {@link #columnSchema(int)}.
*
* @param columnCount Number of columns in generated file
* @param rowCount Number of rows in generated file (used for naming file only)
* @return Path to schema file
* @throws IOException If there was an error writing the schema to disk
*/
public static Path generateSchema(final int columnCount, final long rowCount) throws IOException {
final JsonArray columns = new JsonArray(columnCount);
for (int i = 0; i < columnCount; i++) {
columns.add(columnSchema(i));
}
final JsonObject content = new JsonObject();
content.add("headerRow", new JsonPrimitive(true));
content.add("columns", columns);
final Path path = FileTestUtility.resolve(filePrefix(columnCount, rowCount) + ".json");
final var writer = Files.newBufferedWriter(path, StandardCharsets.UTF_8);
writer.write(content.toString());
writer.close();
return path;
}
/**
* Generate a random alphanumeric string of the specified size.
*
* @param size Number of characters in the string
* @return Random alphanumeric string
*/
private static String randomString(final int size) {
final String chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
final Random random = new Random();
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < size; i++) {
sb.append(chars.charAt(random.nextInt(chars.length())));
}
return sb.toString();
}
/**
* Creates a CSV file of the specified size filled with random alphanumeric strings.
*
* @param entrySize Number of characters in each entry
* @param columnCount Number of columns in the output file
* @param rowCount Number of rows in te output file
* @return Path to the generated file
* @throws IOException If an error occurred while writing the file
*/
public static Path generateCsv(final int entrySize, final int columnCount, final long rowCount)
throws IOException {
final Path path = FileTestUtility.resolve(filePrefix(columnCount, rowCount) + ".csv");
final var writer = Files.newBufferedWriter(path, StandardCharsets.UTF_8);
final var headers = IntStream.range(0, columnCount).boxed().map(TableGeneratorTestUtility::headerName)
.collect(Collectors.joining(","));
writer.write(headers);
writer.write(System.lineSeparator());
for (int i = 0; i < rowCount; i++) {
final String entry = randomString(entrySize);
final var entries = new String[columnCount];
Arrays.fill(entries, entry);
writer.write(String.join(",", entries));
writer.write(System.lineSeparator());
}
writer.close();
return path;
}
}
| 2,666 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/utils/GeneralTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.utils;
import com.amazonaws.c3r.config.ColumnHeader;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.function.Predicate;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Set of Utilities used for Testing. A combination of file settings and helper functions.
*/
public abstract class GeneralTestUtility {
/**
* Example salt for testing.
*/
public static final UUID EXAMPLE_SALT = UUID.fromString("00000000-1111-2222-3333-444444444444");
/**
* List of headers from the golden test file (data_sample.csv).
*/
public static final List<ColumnHeader> DATA_SAMPLE_HEADERS =
List.of(new ColumnHeader("FirstName"),
new ColumnHeader("LastName"),
new ColumnHeader("Address"),
new ColumnHeader("City"),
new ColumnHeader("State"),
new ColumnHeader("PhoneNumber"),
new ColumnHeader("Title"),
new ColumnHeader("Level"),
new ColumnHeader("Notes"));
/**
* Takes a mapping of column headers to values along with a set of map entries for a column header to a test function.
* This class creates the map of predicate functions by column header and calls {@link #assertRowEntryPredicates(Map, Map)}.
*
* @param content A map of column headers to row content
* @param predicates A variable length list of arguments that are map entries for testing row data
* @see #assertRowEntryPredicates(Map, Map)
*/
@SafeVarargs
public static void assertRowEntryPredicates(final Map<String, String> content,
final Map.Entry<String, Predicate<String>>... predicates) {
assertRowEntryPredicates(content, Map.ofEntries(predicates));
}
/**
* Using a mapping of headers to values and headers to test functions, verify each value in a row.
*
* @param content Map of column headers to row content
* @param predicateMap Map of column headers to a predicate function to check the column's value
* @throws RuntimeException If the number of tests don't match the number of entries in the row
*/
public static void assertRowEntryPredicates(final Map<String, String> content, final Map<String, Predicate<String>> predicateMap) {
if (!content.keySet().equals(predicateMap.keySet())) {
throw new RuntimeException(
String.join("\n",
"Bad test! Content keys and predicate keys don't match!",
" Content headers: " + String.join(",", content.keySet()),
"Predicate headers: " + String.join(",", predicateMap.keySet())));
}
content.forEach((header, value) ->
assertTrue(predicateMap.get(header).test(value),
"Row entry predicate failure: `" + header + "` -> `" + value + "`"));
}
}
| 2,667 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/utils/StringTestUtilityTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.utils;
import org.junit.jupiter.api.Test;
import static com.amazonaws.c3r.utils.StringTestUtility.countMatches;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class StringTestUtilityTest {
@Test
public void countMatchesTest() {
assertEquals(0, countMatches("a", ""));
assertEquals(0, countMatches("a", "b"));
assertEquals(1, countMatches("a", "a"));
assertEquals(1, countMatches("a", "abcd"));
assertEquals(3, countMatches("a", "abcdabcdabcd"));
assertEquals(3, countMatches("aa", "aaabcdaaabcdaaabcd"));
}
}
| 2,668 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/utils/StringTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.utils;
import java.util.regex.Pattern;
public final class StringTestUtility {
private StringTestUtility() {
}
/**
* Counts how many times a search string occurs (non-overlapping) in given string content.
*
* @param searchString String to search for
* @param content Content to search in
* @return The number of occurrences of the search string in the content.
*/
public static int countMatches(final String searchString, final String content) {
return content.split(Pattern.quote(searchString), -1).length - 1;
}
}
| 2,669 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/utils/FileTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.utils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
/**
* A test utility for creating temporary Path resources for tests that will clean themselves up after execution.
*/
public abstract class FileTestUtility {
/**
* Creates a temporary directory with the prefix "temp" marked with deleteOnExit.
*
* @return A temporary Path
* @throws IOException If the temporary Path cannot be created
*/
public static Path createTempDir() throws IOException {
final Path tempDir = Files.createTempDirectory("temp");
tempDir.toFile().deleteOnExit();
return tempDir;
}
/**
* Creates a temporary file with the prefix "testFile" and suffix ".tmp" marked with deleteOnExit.
*
* @return A temporary Path
* @throws IOException If the temporary Path cannot be created
*/
public static Path createTempFile() throws IOException {
return createTempFile("testFile", ".tmp");
}
/**
* Creates a temporary file with the prefix and suffix provided marked with deleteOnExit.
*
* @param prefix The prefix of the Path to create
* @param suffix The suffix of the Path to create
* @return A temporary Path
* @throws IOException If the temporary Path cannot be created
*/
public static Path createTempFile(final String prefix, final String suffix) throws IOException {
final Path tempDir = createTempDir();
final Path tempFile = Files.createTempFile(tempDir, prefix, suffix);
tempFile.toFile().deleteOnExit();
return tempFile;
}
/**
* Resolves a temporary file with the file name provided marked with deleteOnExit.
*
* @param fileName The name of the Path to resolve
* @return A temporary Path
* @throws IOException If the temporary Path cannot be resolved
*/
public static Path resolve(final String fileName) throws IOException {
return resolve(fileName, createTempDir());
}
/**
* Resolves a temporary file with the prefix and suffix provided marked with deleteOnExit.
*
* @param fileName The name of the Path to resolve
* @param tempDir The Path to use to resolve the temporary file
* @return A temporary Path
*/
private static Path resolve(final String fileName, final Path tempDir) {
final Path tempFile = tempDir.resolve(fileName);
tempFile.toFile().deleteOnExit();
return tempFile;
}
}
| 2,670 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/MainEnvVarKeyInvalidTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import picocli.CommandLine;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
/*
* Tests specifically needing an invalid key in the environment
* variable for the shared secret key.
*/
public class MainEnvVarKeyInvalidTest {
private static final String ENC_INPUT_PATH = "../samples/csv/data_sample_without_quotes.csv";
private static final String SCHEMA_PATH = "../samples/schema/config_sample.json";
private static final String DEC_INPUT_PATH = "../samples/csv/marshalled_data_sample.csv";
private DecryptCliConfigTestUtility decArgs;
private CommandLine decMain;
private EncryptCliConfigTestUtility encArgs;
private CommandLine encMain;
public int runEncryptMainWithCliArgs() {
return encMain.execute(encArgs.toArrayWithoutMode());
}
public int runDecryptMainWithCliArgs() {
return decMain.execute(decArgs.toArrayWithoutMode());
}
@BeforeEach
public void setup() {
encArgs = EncryptCliConfigTestUtility.defaultDryRunTestArgs(ENC_INPUT_PATH, SCHEMA_PATH);
encMain = EncryptMode.getApp(null);
decArgs = DecryptCliConfigTestUtility.defaultDryRunTestArgs(DEC_INPUT_PATH);
decMain = DecryptMode.getApp();
}
@Test
public void validateEncryptSecretKeyInvalidTest() {
assertNotEquals(0, runEncryptMainWithCliArgs());
}
@Test
public void validateDecryptSecretKeyInvalidTest() {
assertNotEquals(0, runDecryptMainWithCliArgs());
}
}
| 2,671 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/CliTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDaoTestUtility;
import picocli.CommandLine;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Utilities to interface with the CLI interface as if you were calling from the command line.
*/
public final class CliTestUtility {
/**
* Hidden utility class constructor.
*/
private CliTestUtility() {
}
/**
* Function to test cli options without loading the entire system. First argument must be one of the sub-modes:
* encrypt, decrypt, schema. From there, the rest of the parameters should match what is required of that particular sub-mode. If extra
* parameters are present, an error will be thrown or if required parameters are missing and error will be thrown.
*
* @param args Set of strings corresponding to a run of the software
* @return A data structure that stores all stages of parsing from initial reading of parameters until final matches are made
*/
public static CommandLine.ParseResult verifyCliOptions(final String[] args) {
return Main.getApp().parseArgs(args);
}
/**
* Runs the cli with a mock to replace an actual connection to AWS Clean Rooms.
*
* @param args Command line parameters for encrypt mode
* @return {@value Main#SUCCESS} if no errors are encountered or {@value Main#FAILURE}
*/
public static int runWithoutCleanRooms(final EncryptCliConfigTestUtility args) {
final CleanRoomsDao cleanRoomsDao;
cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(args.getClientSettings());
return EncryptMode.getApp(cleanRoomsDao).execute(args.toArrayWithoutMode());
}
}
| 2,672 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/MainArgParseTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import picocli.CommandLine;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Class for testing CLI argument parsing from the top-level which intentionally
* does not execute any C3R business logic. I.e., only testing CLI parsing
* configurations are correct with respect to which arguments are required,
* which are exclusive, how certain common behaviors are triggered, etc.
*/
public class MainArgParseTest {
@Test
public void noArgsTest() {
final CommandLine.ParseResult result = Main.getApp().parseArgs();
assertFalse(result.isVersionHelpRequested());
assertFalse(result.isUsageHelpRequested());
assertEquals(0, result.subcommands().size());
}
@ParameterizedTest
@ValueSource(strings = {"-V", "--version"})
public void mainVersionTest(final String versionFlag) {
final CommandLine.ParseResult result = Main.getApp().parseArgs(versionFlag);
assertTrue(result.isVersionHelpRequested());
assertFalse(result.isUsageHelpRequested());
assertEquals(0, result.subcommands().size());
}
@ParameterizedTest
@ValueSource(strings = {"-h", "--help"})
public void mainHelpTest(final String helpFlag) {
final CommandLine.ParseResult result = Main.getApp().parseArgs(helpFlag);
assertFalse(result.isVersionHelpRequested());
assertTrue(result.isUsageHelpRequested());
assertEquals(0, result.subcommands().size());
}
/**
* Check help parses as expected for a certain mode.
*
* @param mode CLI mode
* @param help Help flag
*/
private void checkModeHelpFlag(final String mode, final String help) {
final CommandLine.ParseResult mainResult = Main.getApp().parseArgs(mode, help);
assertEquals(1, mainResult.subcommands().size());
final CommandLine.ParseResult modeResult = mainResult.subcommand();
assertEquals(mode, modeResult.commandSpec().name());
assertEquals(1, modeResult.expandedArgs().size());
assertEquals(help, modeResult.expandedArgs().get(0));
assertFalse(modeResult.isVersionHelpRequested());
assertTrue(modeResult.isUsageHelpRequested());
}
@ParameterizedTest
@ValueSource(strings = {"encrypt", "decrypt", "schema"})
public void modeHelpFlagTest(final String mode) {
checkModeHelpFlag(mode, "-h");
checkModeHelpFlag(mode, "--help");
}
/**
* Check version parses as expected for a certain mode.
*
* @param mode CLI mode
* @param version Version flag
*/
private void checkModeVersionFlag(final String mode, final String version) {
final CommandLine.ParseResult mainResult = Main.getApp().parseArgs(mode, version);
assertEquals(1, mainResult.subcommands().size());
final CommandLine.ParseResult modeResult = mainResult.subcommand();
assertEquals(mode, modeResult.commandSpec().name());
assertEquals(1, modeResult.expandedArgs().size());
assertEquals(version, modeResult.expandedArgs().get(0));
assertTrue(modeResult.isVersionHelpRequested());
assertFalse(modeResult.isUsageHelpRequested());
}
@ParameterizedTest
@ValueSource(strings = {"encrypt", "decrypt", "schema"})
public void modeVersionFlagTest(final String mode) {
checkModeVersionFlag(mode, "-V");
checkModeVersionFlag(mode, "--version");
}
@ParameterizedTest
@ValueSource(strings = {"encrypt", "decrypt", "schema"})
public void subcommandsWithNoArgsTest(final String mode) {
// NOTE: This assumes the above listed modes have _some_ required arguments
assertThrows(CommandLine.MissingParameterException.class, () -> Main.getApp().parseArgs(mode));
}
@Test
public void invalidSubcommandTest() {
// NOTE: This assumes the above listed modes have _some_ required arguments
assertThrows(CommandLine.UnmatchedArgumentException.class, () -> Main.getApp().parseArgs("not-a-real-mode"));
}
/**
* Asserts that no errors occur when using the given minimal args,
* and then asserts that removing any of the arguments after the
* first (i.e., the mode name itself) raises an error and a missing parameter).
*
* @param minimalArgs Minimal argument list - first element is mode name, remaining are arguments
* for that mode.
*/
public void checkMinimalRequiredModeArgs(final String[] minimalArgs) {
// NOTE: This assumes the above listed modes have _some_ required arguments
assertDoesNotThrow(() -> Main.getApp().parseArgs(minimalArgs));
// check that for this mode (element 0), removing any argument causes a CLI parse error
for (int pos = 1; pos < minimalArgs.length; pos++) {
final List<String> invalidParameters = Arrays.stream(minimalArgs).collect(Collectors.toList());
invalidParameters.remove(pos);
assertThrows(CommandLine.MissingParameterException.class, () ->
Main.getApp().parseArgs(invalidParameters.toArray(String[]::new)));
}
}
@Test
public void encryptWithRequiredArgs() {
final String[] parameters = {"encrypt", "input", "--id=00000000-1111-2222-3333-444444444444", "--schema=schema"};
checkMinimalRequiredModeArgs(parameters);
}
@Test
public void decryptWithRequiredArgs() {
final String[] parameters = {"decrypt", "input", "--id=00000000-1111-2222-3333-444444444444"};
checkMinimalRequiredModeArgs(parameters);
}
@ParameterizedTest
@ValueSource(strings = {"-t", "--template", "-i", "--interactive"})
public void schemaWithRequiredArgs(final String modeFlag) {
final String[] parameters = {"schema", "input", modeFlag};
checkMinimalRequiredModeArgs(parameters);
}
@Test
public void schemaGenModesExclusiveArgs() {
final String[] parameters = {"schema", "input", "-i", "-t"};
// parsing with both -i and -t errors due to those being mutually exclusive
assertThrows(CommandLine.MutuallyExclusiveArgsException.class, () -> Main.getApp().parseArgs(parameters));
// and simply dropping one fixes things
assertDoesNotThrow(() -> Main.getApp().parseArgs(Arrays.copyOfRange(parameters, 0, parameters.length - 1)));
}
}
| 2,673 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/EncryptCliConfigTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.utils.GeneralTestUtility;
import lombok.Getter;
import lombok.Setter;
import java.util.ArrayList;
import java.util.List;
/**
* Class for conveniently generating various command line argument
* combinations for the `encrypt` command.
*/
@Setter
public final class EncryptCliConfigTestUtility {
/**
* Schema file location.
*/
private String schema;
/**
* Collaboration ID to use for computing shared secret keys.
*/
private String collaborationId;
/**
* Input file location.
*/
@Getter
private String input;
/**
* Value used in the input file to represent {@code null} in the CSV data.
*/
private String csvInputNullValue;
/**
* Value to use in the output file to represent {@code null} in the CSV data.
*/
private String csvOutputNullValue;
/**
* Value to use to see if binary parquet values should be treated as strings.
*/
private Boolean parquetBinaryAsString;
/**
* Location to write the output file.
*/
@Getter
private String output;
/**
* Whether the output file should be overwritten if it already exists.
*/
private boolean overwrite;
/**
* Whether encryption will actually be run or only the configuration will be validated.
*/
private boolean dryRun;
/**
* Whether plaintext values are allowed.
*/
private boolean allowCleartext;
/**
* Whether duplicate values are allowed in fingerprint columns.
*/
private boolean allowDuplicates;
/**
* Whether columns with different names should be allowed in a join statement.
*/
private boolean allowJoinsOnColumnsWithDifferentNames;
/**
* Whether {@code null} values should be preserved during encryption.
*/
private boolean preserveNulls;
/**
* Whether a stacktrace should be printed.
*/
private boolean enableStackTraces;
/**
* Input file data type.
*/
private FileFormat fileFormat;
/**
* AWS CLI profile.
*/
private String profile;
/**
* AWS region.
*/
private String region;
/**
* Hidden default constructor so static instance creators are used.
*/
private EncryptCliConfigTestUtility() {
}
/**
* Default test values for encryption args to use with tests.
*
* @return Default test values
*/
public static EncryptCliConfigTestUtility defaultTestArgs() {
final var args = new EncryptCliConfigTestUtility();
args.enableStackTraces = true;
args.allowCleartext = true;
args.overwrite = true;
args.schema = "mySchema";
args.collaborationId = GeneralTestUtility.EXAMPLE_SALT.toString();
args.input = "mySourceFile";
return args;
}
/**
* Creates a test configuration for a dry run. Skips all data processing and validates settings.
*
* @param file Input file to use for the dry run
* @param schema Schema file to use for the dry run
* @return Default dry run configuration with specified files
*/
public static EncryptCliConfigTestUtility defaultDryRunTestArgs(final String file, final String schema) {
final var args = new EncryptCliConfigTestUtility();
args.schema = (schema == null) ? "mySchema" : schema;
args.collaborationId = GeneralTestUtility.EXAMPLE_SALT.toString();
args.input = (file == null) ? "mySourceFile" : file;
args.overwrite = true;
args.dryRun = true;
args.allowCleartext = true;
args.enableStackTraces = true;
return args;
}
/**
* Empty CLI configuration.
*
* @return Configuration instance with no set values
*/
public static EncryptCliConfigTestUtility blankTestArgs() {
return new EncryptCliConfigTestUtility();
}
/**
* Create an instance of {@code ClientSettings} using the specified values.
*
* @return {@link ClientSettings} using values stored in this instance
*/
public ClientSettings getClientSettings() {
return ClientSettings.builder()
.allowCleartext(allowCleartext)
.allowDuplicates(allowDuplicates)
.allowJoinsOnColumnsWithDifferentNames(allowJoinsOnColumnsWithDifferentNames)
.preserveNulls(preserveNulls).build();
}
/**
* Converts the specified command line parameters to a list.
*
* @return List of command line parameters
* @see EncryptCliConfigTestUtility#getCliArgsWithoutMode
*/
public List<String> getCliArgs() {
final List<String> args = new ArrayList<>();
args.add("encrypt");
if (input != null) {
args.add(input);
}
if (schema != null) {
args.add("--schema=" + schema);
}
if (collaborationId != null) {
args.add("--id=" + collaborationId);
}
if (csvInputNullValue != null) {
args.add("--csvInputNULLValue=" + csvInputNullValue);
}
if (csvOutputNullValue != null) {
args.add("--csvOutputNULLValue=" + csvOutputNullValue);
}
if (output != null) {
args.add("--output=" + output);
}
if (overwrite) {
args.add("--overwrite");
}
if (dryRun) {
args.add("--dryRun");
}
if (enableStackTraces) {
args.add("--enableStackTraces");
}
if (fileFormat != null) {
args.add("--fileFormat=" + fileFormat);
}
if (profile != null) {
args.add("--profile=" + profile);
}
if (region != null) {
args.add("--region=" + region);
}
return args;
}
/**
* Converts the specified command line parameters to a list without including the CLI mode parameter.
*
* @return List of command line parameters.
* @see EncryptCliConfigTestUtility#getCliArgs
*/
public List<String> getCliArgsWithoutMode() {
final List<String> args = getCliArgs();
args.remove(0);
return args;
}
/**
* Converts the specified command line parameters to an array without including the CLI mode parameter.
*
* @return Array of command line parameters
*/
public String[] toArrayWithoutMode() {
return getCliArgsWithoutMode().toArray(String[]::new);
}
}
| 2,674 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/SchemaModeTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.GeneralTestUtility;
import com.amazonaws.c3r.utils.StringTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class SchemaModeTest {
private static final int SAMPLE_DATA_COLUMN_COUNT = 9;
private static final String ALL_COLUMN_TYPES =
"[" + Arrays.stream(ColumnType.values())
.map(ColumnType::toString)
.collect(Collectors.joining("|")) + "]";
private static final String ALL_COLUMN_TYPES_SANS_CLEARTEXT =
"[" + Arrays.stream(ColumnType.values())
.filter(c -> c != ColumnType.CLEARTEXT)
.map(ColumnType::toString)
.collect(Collectors.joining("|")) + "]";
private Path schemaPath;
@BeforeEach
public void setup() throws IOException {
schemaPath = FileTestUtility.resolve("schema.json");
}
// Generate a template without settings and shallowly check content contains expected entries
private void runTemplateGeneratorNoSettings(final String inputFile,
final boolean hasHeaderRow) throws IOException {
final var args = SchemaCliConfigTestUtility.builder()
.input(inputFile)
.output(schemaPath.toString())
.subMode("--template")
.noHeaders(!hasHeaderRow)
.overwrite(true)
.build();
assertEquals(0, SchemaMode.getApp(null).execute(args.toArrayWithoutMode()));
assertTrue(Files.exists(schemaPath));
assertTrue(Files.size(schemaPath) > 0);
final String contents = Files.readString(schemaPath);
assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow));
assertEquals(hasHeaderRow ? SAMPLE_DATA_COLUMN_COUNT : 0,
StringTestUtility.countMatches("sourceHeader", contents));
assertEquals(SAMPLE_DATA_COLUMN_COUNT,
StringTestUtility.countMatches(ALL_COLUMN_TYPES, contents));
}
// Generate a template with permissive settings and shallowly check content contains expected entries
private void runTemplateGeneratorPermissiveSettings(final String inputFile,
final boolean hasHeaderRow) throws IOException {
final var args = SchemaCliConfigTestUtility.builder()
.input(inputFile)
.output(schemaPath.toString())
.subMode("--template")
.noHeaders(!hasHeaderRow)
.overwrite(true)
.collaborationId(GeneralTestUtility.EXAMPLE_SALT.toString())
.build();
final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.lowAssuranceMode());
assertEquals(0, SchemaMode.getApp(cleanRoomsDao).execute(args.toArrayWithoutMode()));
assertTrue(Files.exists(schemaPath));
assertTrue(Files.size(schemaPath) > 0);
final String contents = Files.readString(schemaPath);
assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow));
assertEquals(hasHeaderRow ? SAMPLE_DATA_COLUMN_COUNT : 0,
StringTestUtility.countMatches("sourceHeader", contents));
assertEquals(SAMPLE_DATA_COLUMN_COUNT,
StringTestUtility.countMatches(ALL_COLUMN_TYPES, contents));
}
// Generate a template with restrictive settings and shallowly check content contains expected entries
private void runTemplateGeneratorRestrictiveSettings(final String inputFile,
final int expectedTargetColumnCount,
final boolean hasHeaderRow) throws IOException {
final var args = SchemaCliConfigTestUtility.builder()
.input(inputFile)
.output(schemaPath.toString())
.subMode("--template")
.noHeaders(!hasHeaderRow)
.overwrite(true)
.collaborationId(GeneralTestUtility.EXAMPLE_SALT.toString())
.build();
final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.highAssuranceMode());
assertEquals(0, SchemaMode.getApp(cleanRoomsDao).execute(args.toArrayWithoutMode()));
assertTrue(Files.exists(schemaPath));
assertTrue(Files.size(schemaPath) > 0);
final String contents = Files.readString(schemaPath);
assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow));
assertEquals(hasHeaderRow ? expectedTargetColumnCount : 0,
StringTestUtility.countMatches("sourceHeader", contents));
assertEquals(expectedTargetColumnCount,
StringTestUtility.countMatches("targetHeader", contents));
assertEquals(expectedTargetColumnCount,
StringTestUtility.countMatches(ALL_COLUMN_TYPES_SANS_CLEARTEXT, contents));
}
// Run interactive schema gen without settings and check it returns results
// and shallowly check content contains expected entries
private void runInteractiveGeneratorNoSettings(final String inputFile,
final boolean hasHeaderRow) throws IOException {
final var args = SchemaCliConfigTestUtility.builder()
.input(inputFile)
.output(schemaPath.toString())
.subMode("--interactive")
.noHeaders(!hasHeaderRow)
.overwrite(true)
.build();
// number greater than test file column counts (test will fail if too low, so no incorrectness risk in
// picking a number)
final int columnCountUpperBound = 100;
// user input which repeatedly says the source column in question should generate one cleartext column
// with a trivial name
final StringBuilder inputBuilder = new StringBuilder();
for (int i = 0; i < columnCountUpperBound; i++) {
// 1 target column
inputBuilder.append("1\n");
// target column type
inputBuilder.append("cleartext\n");
// target column name
inputBuilder.append("column").append(i).append('\n');
}
final var userInput = new ByteArrayInputStream(inputBuilder.toString().getBytes(StandardCharsets.UTF_8));
System.setIn(new BufferedInputStream(userInput));
assertEquals(0, Main.getApp().execute(args.toArray()));
assertTrue(schemaPath.toFile().exists());
assertTrue(schemaPath.toFile().length() > 0);
final String contents = Files.readString(schemaPath);
assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow));
assertEquals(hasHeaderRow ? SAMPLE_DATA_COLUMN_COUNT : 0,
StringTestUtility.countMatches("sourceHeader", contents));
assertEquals(SAMPLE_DATA_COLUMN_COUNT,
StringTestUtility.countMatches("\"" + ColumnType.CLEARTEXT + "\"", contents));
}
// Run interactive schema gen with permissive settings and check it returns results
// and shallowly check content contains expected entries
private void runInteractiveGeneratorPermissiveSettings(final String inputFile,
final boolean hasHeaderRow) throws IOException {
final var args = SchemaCliConfigTestUtility.builder()
.input(inputFile)
.output(schemaPath.toString())
.subMode("--interactive")
.noHeaders(!hasHeaderRow)
.overwrite(true)
.collaborationId(GeneralTestUtility.EXAMPLE_SALT.toString())
.build();
// number greater than test file column counts (test will fail if too low, so no incorrectness risk in
// picking a number)
final int columnCountUpperBound = 100;
// user input which repeatedly says the source column in question should generate one cleartext column
// with a trivial name
final StringBuilder inputBuilder = new StringBuilder();
for (int i = 0; i < columnCountUpperBound; i++) {
// 1 target column
inputBuilder.append("1\n");
// target column type
inputBuilder.append("cleartext\n");
// target column name
inputBuilder.append("column").append(i).append('\n');
}
final var userInput = new ByteArrayInputStream(inputBuilder.toString().getBytes(StandardCharsets.UTF_8));
System.setIn(new BufferedInputStream(userInput));
final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.lowAssuranceMode());
assertEquals(0, SchemaMode.getApp(cleanRoomsDao).execute(args.toArrayWithoutMode()));
assertTrue(schemaPath.toFile().exists());
assertTrue(schemaPath.toFile().length() > 0);
final String contents = Files.readString(schemaPath);
assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow));
assertEquals(hasHeaderRow ? SAMPLE_DATA_COLUMN_COUNT : 0,
StringTestUtility.countMatches("sourceHeader", contents));
assertEquals(SAMPLE_DATA_COLUMN_COUNT,
StringTestUtility.countMatches("\"" + ColumnType.CLEARTEXT + "\"", contents));
}
// Run interactive schema gen with restrictive settings and check it returns results
// and shallowly check content contains expected entries=
private void runInteractiveGeneratorRestrictiveSettings(final String inputFile,
final int expectedTargetColumnCount,
final boolean hasHeaderRow) throws IOException {
final var args = SchemaCliConfigTestUtility.builder()
.input(inputFile)
.output(schemaPath.toString())
.subMode("--interactive")
.noHeaders(!hasHeaderRow)
.overwrite(true)
.collaborationId(GeneralTestUtility.EXAMPLE_SALT.toString())
.build();
// number greater than test file column counts (test will fail if too low, so no incorrectness risk in
// picking a number)
final int columnCountUpperBound = 100;
// user input which repeatedly says the source column in question should generate one cleartext column
// with a trivial name
final StringBuilder inputBuilder = new StringBuilder();
for (int i = 0; i < columnCountUpperBound; i++) {
// 1 target column
inputBuilder.append("1\n");
// target column type, will fail due to restrictive settings
inputBuilder.append("cleartext\n");
// target column type, will succeed
inputBuilder.append("fingerprint\n");
// target column name
inputBuilder.append("column").append(i).append('\n');
// skip suffix
inputBuilder.append("\n");
}
final var userInput = new ByteArrayInputStream(inputBuilder.toString().getBytes(StandardCharsets.UTF_8));
System.setIn(new BufferedInputStream(userInput));
final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.highAssuranceMode());
assertEquals(0, SchemaMode.getApp(cleanRoomsDao).execute(args.toArrayWithoutMode()));
assertTrue(schemaPath.toFile().exists());
assertTrue(schemaPath.toFile().length() > 0);
final String contents = Files.readString(schemaPath);
assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow));
assertEquals(hasHeaderRow ? expectedTargetColumnCount : 0,
StringTestUtility.countMatches("sourceHeader", contents));
assertEquals(expectedTargetColumnCount,
StringTestUtility.countMatches("targetHeader", contents));
assertEquals(0,
StringTestUtility.countMatches(ColumnType.CLEARTEXT.toString(), contents));
assertEquals(expectedTargetColumnCount,
StringTestUtility.countMatches("\"" + ColumnType.FINGERPRINT + "\"", contents));
}
@Test
public void schemaTemplateCsvTest() throws IOException {
runTemplateGeneratorNoSettings("../samples/csv/data_sample_without_quotes.csv", true);
}
@Test
public void schemaTemplateCsvNoHeadersTest() throws IOException {
runTemplateGeneratorNoSettings("../samples/csv/data_sample_no_headers.csv", false);
}
@Test
public void schemaTemplateWithPermissiveSettingsCsvTest() throws IOException {
runTemplateGeneratorPermissiveSettings("../samples/csv/data_sample_without_quotes.csv", true);
}
@Test
public void schemaTemplateWithPermissiveSettingsCsvNoHeadersTest() throws IOException {
runTemplateGeneratorPermissiveSettings("../samples/csv/data_sample_no_headers.csv", false);
}
@Test
public void schemaTemplateWithRestrictiveSettingsCsvTest() throws IOException {
runTemplateGeneratorRestrictiveSettings("../samples/csv/data_sample_without_quotes.csv", SAMPLE_DATA_COLUMN_COUNT, true);
}
@Test
public void schemaTemplateWithRestrictiveSettingsCsvNoHeadersTest() throws IOException {
runTemplateGeneratorRestrictiveSettings("../samples/csv/data_sample_no_headers.csv", SAMPLE_DATA_COLUMN_COUNT, false);
}
@Test
public void schemaTemplateParquetTest() throws IOException {
runTemplateGeneratorNoSettings("../samples/parquet/data_sample.parquet", true);
}
@Test
public void schemaTemplateWithPermissiveSettingsParquetTest() throws IOException {
runTemplateGeneratorPermissiveSettings("../samples/parquet/data_sample.parquet", true);
}
@Test
public void schemaTemplateWithRestrictiveSettingsParquetTest() throws IOException {
runTemplateGeneratorRestrictiveSettings("../samples/parquet/data_sample.parquet", SAMPLE_DATA_COLUMN_COUNT, true);
}
@Test
public void schemaTemplateWithRestrictiveSettingsParquetMixedDataTest() throws IOException {
// only 8 columns are supported types, so we only expect 8 target columns
runTemplateGeneratorRestrictiveSettings("../samples/parquet/rows_100_groups_10_prim_data.parquet", 8, true);
}
@Test
public void schemaInteractiveCsvTest() throws IOException {
runInteractiveGeneratorNoSettings("../samples/csv/data_sample_without_quotes.csv", true);
}
// Check that interactive schema command returns results and shallowly check content contains expected entries
@Test
public void schemaInteractiveCsvNoHeadersTest() throws IOException {
runInteractiveGeneratorNoSettings("../samples/csv/data_sample_no_headers.csv", false);
}
@Test
public void schemaInteractiveParquetTest() throws IOException {
runInteractiveGeneratorNoSettings("../samples/parquet/data_sample.parquet", true);
}
@Test
public void schemaInteractivePermissiveSettingsCsvTest() throws IOException {
runInteractiveGeneratorPermissiveSettings("../samples/csv/data_sample_without_quotes.csv", true);
}
@Test
public void schemaInteractivePermissiveSettingsCsvNoHeadersTest() throws IOException {
runInteractiveGeneratorPermissiveSettings("../samples/csv/data_sample_no_headers.csv", false);
}
@Test
public void schemaInteractivePermissiveSettingsParquetTest() throws IOException {
runInteractiveGeneratorNoSettings("../samples/parquet/data_sample.parquet", true);
}
@Test
public void schemaInteractiveRestrictiveSettingsCsvTest() throws IOException {
runInteractiveGeneratorRestrictiveSettings("../samples/csv/data_sample_without_quotes.csv", SAMPLE_DATA_COLUMN_COUNT, true);
}
@Test
public void schemaInteractiveRestrictiveSettingsCsvNoHeadersTest() throws IOException {
runInteractiveGeneratorRestrictiveSettings("../samples/csv/data_sample_no_headers.csv", SAMPLE_DATA_COLUMN_COUNT, false);
}
@Test
public void schemaInteractiveRestrictiveSettingsParquetTest() throws IOException {
runInteractiveGeneratorRestrictiveSettings("../samples/parquet/data_sample.parquet", SAMPLE_DATA_COLUMN_COUNT, true);
}
@Test
public void schemaInteractiveRestrictiveSettingsParquetMixedDataTest() throws IOException {
// Only 8 columns are supported types, so we expect 8 target columns only
runInteractiveGeneratorRestrictiveSettings("../samples/parquet/rows_100_groups_10_prim_data.parquet", 8, true);
}
}
| 2,675 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/MainCsvSingleRowRoundTripTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.FingerprintTransformer;
import com.amazonaws.c3r.SealedTransformer;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.Pad;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.CsvTestUtility;
import com.amazonaws.c3r.json.GsonUtil;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.GeneralTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import static java.util.Map.entry;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/*
* A test class with a single row of data containing self-descriptively named column entries
* across the gambit of possible kinds of data that could appear. Intended to act
* as easy to audit unit tests for round tripping through the C3R with various settings and CSV input/output.
*/
public class MainCsvSingleRowRoundTripTest {
private EncryptCliConfigTestUtility encArgs;
private DecryptCliConfigTestUtility decArgs;
private String encCsvInputNull;
private String encCsvOutputNull;
private String decCsvInputNull;
private String decCsvOutputNull;
// ColumnSchema Name -> ColumnSchema Value mappings used for convenient testing data
// written out to a CSV file and then parsed in
private final List<Map.Entry<String, String>> exampleCsvEntries = List.of(
Map.entry("foo", "foo"),
Map.entry("quoted-foo", "\"foo\""),
Map.entry("quoted-foo-newline-bar", "\"foo\nbar\""),
Map.entry("blank", ""), // `,,`
Map.entry("1space", " "), // `, ,`
Map.entry("quoted-blank", "\"\""),
Map.entry("quoted-1space", "\" \"")
);
private Path input;
private ColumnSchema createColumn(final String headerName, final ColumnType type, final Pad pad) {
final var columnBuilder = ColumnSchema.builder()
.sourceHeader(new ColumnHeader(headerName))
.targetHeader(new ColumnHeader(headerName))
.type(type);
if (type == ColumnType.SEALED) {
columnBuilder.pad(pad);
}
return columnBuilder.build();
}
// Create a schema where all columns have the same type and padding.
private TableSchema createMonoSchema(final ColumnType type, final Pad pad) {
if (type != ColumnType.SEALED && pad != null) {
throw new C3rRuntimeException("Bad test! Can't pad non-sealed columns!");
}
return new MappedTableSchema(exampleCsvEntries.stream()
.map(entry -> createColumn(entry.getKey(), type, pad))
.collect(Collectors.toList())
);
}
@BeforeEach
public void setup() throws IOException {
input = FileTestUtility.createTempFile("csv-values", ".csv");
final String headerRow = exampleCsvEntries.stream().map(Map.Entry::getKey).collect(Collectors.joining(","));
final String valueRow = exampleCsvEntries.stream().map(Map.Entry::getValue).collect(Collectors.joining(","));
Files.writeString(input,
String.join("\n",
headerRow,
valueRow));
encArgs = EncryptCliConfigTestUtility.blankTestArgs();
decArgs = DecryptCliConfigTestUtility.blankTestArgs();
encCsvInputNull = null;
encCsvOutputNull = null;
decCsvInputNull = null;
decCsvOutputNull = null;
}
private String encrypt(final ColumnType type, final Pad pad) throws IOException {
final String output = FileTestUtility.createTempFile("encrypted", ".csv").toString();
final Path schemaPath = FileTestUtility.createTempFile("schema", ".json");
schemaPath.toFile().deleteOnExit();
final var writer = Files.newBufferedWriter(schemaPath, StandardCharsets.UTF_8);
writer.write(GsonUtil.toJson(createMonoSchema(type, pad)));
writer.close();
encArgs.setInput(input.toString());
encArgs.setAllowCleartext(true);
encArgs.setEnableStackTraces(true);
encArgs.setSchema(schemaPath.toString());
encArgs.setCollaborationId(GeneralTestUtility.EXAMPLE_SALT.toString());
encArgs.setOutput(output);
encArgs.setOverwrite(true);
if (encCsvInputNull != null) {
encArgs.setCsvInputNullValue(encCsvInputNull);
}
if (encCsvOutputNull != null) {
encArgs.setCsvOutputNullValue(encCsvOutputNull);
}
final CleanRoomsDao cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(encArgs.getClientSettings());
final int exitCode = EncryptMode.getApp(cleanRoomsDao).execute(encArgs.toArrayWithoutMode());
assertEquals(0, exitCode);
return output;
}
private String encryptAllColumnsCleartext() throws IOException {
return encrypt(ColumnType.CLEARTEXT, null);
}
private String encryptAllColumnsSealed() throws IOException {
return encrypt(ColumnType.SEALED, Pad.DEFAULT);
}
private String encryptAllColumnsFingerprint() throws IOException {
return encrypt(ColumnType.FINGERPRINT, null);
}
private String decrypt(final String inPath) throws IOException {
final String output = FileTestUtility.createTempFile("decrypted", ".csv").toString();
decArgs.setInput(inPath);
decArgs.setFailOnFingerprintColumns(false);
decArgs.setEnableStackTraces(true);
decArgs.setCollaborationId(GeneralTestUtility.EXAMPLE_SALT.toString());
decArgs.setOutput(output);
decArgs.setOverwrite(true);
if (decCsvInputNull != null) {
decArgs.setCsvInputNullValue(decCsvInputNull);
}
if (decCsvOutputNull != null) {
decArgs.setCsvOutputNullValue(decCsvOutputNull);
}
final int exitCode = Main.getApp().execute(decArgs.toArray());
assertEquals(0, exitCode);
return output;
}
private Map<String, String> readSingleCsvRow(final String path) {
final var rows = CsvTestUtility.readRows(path);
assertEquals(1, rows.size());
return rows.get(0);
}
public void validateCleartextRoundTripEncDecRowContent(final Map<String, Predicate<String>> expectedEncRow,
final Map<String, Predicate<String>> expectedDecRow) throws IOException {
final String encryptedPath = encryptAllColumnsCleartext();
final var rowPostEncryption = readSingleCsvRow(encryptedPath);
GeneralTestUtility.assertRowEntryPredicates(rowPostEncryption, expectedEncRow);
final String decryptedPath = decrypt(encryptedPath);
final var rowPostDecryption = readSingleCsvRow(decryptedPath);
GeneralTestUtility.assertRowEntryPredicates(rowPostDecryption, expectedDecRow);
}
public void validateSealedRoundTripDecRowContent(final Map<String, Predicate<String>> expectedDecRow) throws IOException {
final String encryptedPath = encryptAllColumnsSealed();
final var rowPostEncryption = readSingleCsvRow(encryptedPath);
assertTrue(rowPostEncryption.values().stream().map((val) -> val.startsWith(SealedTransformer.DESCRIPTOR_PREFIX_STRING))
.dropWhile((val) -> val).collect(Collectors.toSet()).isEmpty());
final String decryptedPath = decrypt(encryptedPath);
final var rowPostDecryption = readSingleCsvRow(decryptedPath);
GeneralTestUtility.assertRowEntryPredicates(rowPostDecryption, expectedDecRow);
}
@Test
public void defaultEncNulls_defaultDecNulls_EncDec_CleartextTest() throws IOException {
// encryption and decryption have same expected rows
final Map<String, Predicate<String>> expectedRow = Map.ofEntries(
entry("foo", (val) -> val.equals("foo")),
entry("quoted-foo", (val) -> val.equals("foo")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
// spaces are trimmed on unquoted input, so we again get `""` i.e. NULL
entry("1space", (val) -> val.equals("")),
// by default, a blank and a quoted blank both are treated as NULL
entry("quoted-blank", (val) -> val.equals("")),
// quotes preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateCleartextRoundTripEncDecRowContent(expectedRow, expectedRow);
}
@Test
public void customEncNulls_Enc_CleartextTest() throws IOException {
encCsvInputNull = "foo";
encCsvOutputNull = "bar";
final Map<String, Predicate<String>> expectedEncRow = Map.ofEntries(
entry("foo", (val) -> val.equals("bar")),
entry("quoted-foo", (val) -> val.equals("bar")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
// spaces are trimmed on unquoted input, so we again get `""` i.e. NULL
entry("1space", (val) -> val.equals("")),
// by default, a blank and a quoted blank both are treated as NULL
entry("quoted-blank", (val) -> val.equals("")),
// quotes preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateCleartextRoundTripEncDecRowContent(expectedEncRow, expectedEncRow);
}
@Test
public void customNulls_Dec_CleartextTest() throws IOException {
decCsvInputNull = "foo";
decCsvOutputNull = "bar";
final Map<String, Predicate<String>> expectedEncryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("foo")),
entry("quoted-foo", (val) -> val.equals("foo")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
// spaces are trimmed on unquoted input, so we again get `""` i.e. NULL
entry("1space", (val) -> val.equals("")),
// by default, a blank and a quoted blank both are treated as NULL
entry("quoted-blank", (val) -> val.equals("")),
// quotes preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("bar")),
entry("quoted-foo", (val) -> val.equals("bar")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
// spaces are trimmed on unquoted input, so we again get `""` i.e. NULL
entry("1space", (val) -> val.equals("")),
// by default, a blank and a quoted blank both are treated as NULL
entry("quoted-blank", (val) -> val.equals("")),
// quotes preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateCleartextRoundTripEncDecRowContent(expectedEncryptRow, expectedDecryptRow);
}
@Test
public void customNulls_EncDec_CleartextTest() throws IOException {
encCsvInputNull = "foo";
encCsvOutputNull = "bar";
decCsvInputNull = "bar";
decCsvOutputNull = "baz";
final Map<String, Predicate<String>> expectedEncryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("bar")),
entry("quoted-foo", (val) -> val.equals("bar")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
// spaces are trimmed on unquoted input, so we again get `""` i.e. NULL
entry("1space", (val) -> val.equals("")),
// by default, a blank and a quoted blank both are treated as NULL
entry("quoted-blank", (val) -> val.equals("")),
// quotes preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("baz")),
entry("quoted-foo", (val) -> val.equals("baz")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
// spaces are trimmed on unquoted input, so we again get `""` i.e. NULL
entry("1space", (val) -> val.equals("")),
// by default, a blank and a quoted blank both are treated as NULL
entry("quoted-blank", (val) -> val.equals("")),
// quotes preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateCleartextRoundTripEncDecRowContent(expectedEncryptRow, expectedDecryptRow);
}
@Test
public void defaultEncNulls_defaultDecNulls_EncDec_SealedTest() throws IOException {
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("foo")),
entry("quoted-foo", (val) -> val.equals("foo")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
entry("1space", (val) -> val.equals("")),
entry("quoted-blank", (val) -> val.equals("")),
// quotes preserve leading/trailing space and so it isn't considered NULL
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateSealedRoundTripDecRowContent(expectedDecryptRow);
}
@Test
public void customEncNulls_defaultDecOutNull_EncDec_SealedTest() throws IOException {
encCsvInputNull = "foo";
encCsvOutputNull = "bar";
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
// encrypted as NULL
entry("foo", (val) -> val.equals("")),
// encrypted as NULL
entry("quoted-foo", (val) -> val.equals("")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
// written as `,"",` since default NULL encoding `,,` is being used
entry("blank", (val) -> val.equals("\"\"")),
// written as `,"",` since default NULL encoding `,,` is being used
entry("1space", (val) -> val.equals("\"\"")),
// written as `,"",` since default NULL encoding `,,` is being used
entry("quoted-blank", (val) -> val.equals("\"\"")),
// quotes preserve leading/trailing space and so it isn't considered NULL
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateSealedRoundTripDecRowContent(expectedDecryptRow);
}
@Test
public void customEncNulls_customDecOutNull_EncDec_SealedTest() throws IOException {
encCsvInputNull = "foo";
encCsvOutputNull = "bar";
decCsvOutputNull = "baz";
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("baz")),
entry("quoted-foo", (val) -> val.equals("baz")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
entry("1space", (val) -> val.equals("")),
entry("quoted-blank", (val) -> val.equals("")),
// quotes preserve leading/trailing space and so it isn't considered NULL
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateSealedRoundTripDecRowContent(expectedDecryptRow);
}
@Test
public void defaultEncNulls_customDecNulls_EncDec_SealedTest() throws IOException {
decCsvInputNull = "";
decCsvOutputNull = "baz";
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("foo")),
entry("quoted-foo", (val) -> val.equals("foo")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("baz")),
entry("1space", (val) -> val.equals("baz")),
entry("quoted-blank", (val) -> val.equals("baz")),
// quotes preserve leading/trailing space and so it isn't considered NULL
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateSealedRoundTripDecRowContent(expectedDecryptRow);
}
@Test
public void customEncNulls_customDecNulls_EncDec_SealedTest() throws IOException {
encCsvInputNull = "foo";
encCsvOutputNull = "Aliens";
decCsvInputNull = " ";
decCsvOutputNull = "Zombies, run!";
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("\"Zombies, run!\"")),
entry("quoted-foo", (val) -> val.equals("\"Zombies, run!\"")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
entry("1space", (val) -> val.equals("")),
entry("quoted-blank", (val) -> val.equals("")),
// quotes preserve leading/trailing space and so it isn't considered NULL
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateSealedRoundTripDecRowContent(expectedDecryptRow);
}
public void defaultNull_EncDec_Fingerprint(final boolean allowJoinsOnColumnsWithDifferentNames) throws IOException {
encArgs.setAllowJoinsOnColumnsWithDifferentNames(allowJoinsOnColumnsWithDifferentNames);
final String encryptedPath = encryptAllColumnsFingerprint();
final var rowPostEncryption = readSingleCsvRow(encryptedPath);
final Predicate<String> isFingerprintEncrypted = (val) -> val.startsWith(FingerprintTransformer.DESCRIPTOR_PREFIX_STRING);
GeneralTestUtility.assertRowEntryPredicates(rowPostEncryption,
entry("foo", isFingerprintEncrypted),
entry("quoted-foo", isFingerprintEncrypted),
entry("quoted-foo-newline-bar", isFingerprintEncrypted),
entry("blank", isFingerprintEncrypted),
entry("1space", isFingerprintEncrypted),
entry("quoted-blank", isFingerprintEncrypted),
entry("quoted-1space", isFingerprintEncrypted)
);
// check non-NULL values (`foo` and `"foo"`) get the same encoding
// iff allowJoinsOnColumnsWithDifferentNames is true
if (allowJoinsOnColumnsWithDifferentNames) {
assertEquals(
rowPostEncryption.get("foo"),
rowPostEncryption.get("quoted-foo"));
} else {
assertNotEquals(
rowPostEncryption.get("foo"),
rowPostEncryption.get("quoted-foo"));
}
// ensure we always transform NULL to unique values to preserve
// the "uniqueness" of NULLs w.r.t. SQL semantics
assertNotEquals(
rowPostEncryption.get("blank"),
rowPostEncryption.get("1space"));
assertNotEquals(
rowPostEncryption.get("blank"),
rowPostEncryption.get("quoted-blank"));
// fingerprint values don't get decrypted
final String decryptedPath = decrypt(encryptedPath);
final var rowPostDecryption = readSingleCsvRow(decryptedPath);
GeneralTestUtility.assertRowEntryPredicates(rowPostDecryption,
entry("foo", isFingerprintEncrypted),
entry("quoted-foo", isFingerprintEncrypted),
entry("quoted-foo-newline-bar", isFingerprintEncrypted),
entry("blank", isFingerprintEncrypted),
entry("1space", isFingerprintEncrypted),
entry("quoted-blank", isFingerprintEncrypted),
entry("quoted-1space", isFingerprintEncrypted)
);
}
@Test
public void defaultNull_EncDec_allowJoinsOnColumnsWithDifferentNamesIsTrue_FingerprintTest() throws IOException {
defaultNull_EncDec_Fingerprint(true);
}
@Test
public void defaultNull_EncDec_allowJoinsOnColumnsWithDifferentNamesIsFalse_FingerprintTest() throws IOException {
defaultNull_EncDec_Fingerprint(false);
}
public void blankEncNull_Fingerprint(final boolean allowJoinsOnColumnsWithDifferentNames) throws IOException {
encCsvInputNull = "";
encArgs.setAllowJoinsOnColumnsWithDifferentNames(allowJoinsOnColumnsWithDifferentNames);
final String encryptedPath = encryptAllColumnsFingerprint();
final var rowPostEncryption = readSingleCsvRow(encryptedPath);
final Predicate<String> isFingerprintEncrypted = (val) -> val.startsWith(FingerprintTransformer.DESCRIPTOR_PREFIX_STRING);
GeneralTestUtility.assertRowEntryPredicates(rowPostEncryption,
entry("foo", isFingerprintEncrypted),
entry("quoted-foo", isFingerprintEncrypted),
entry("quoted-foo-newline-bar", isFingerprintEncrypted),
entry("blank", isFingerprintEncrypted),
entry("1space", isFingerprintEncrypted),
entry("quoted-blank", isFingerprintEncrypted),
entry("quoted-1space", isFingerprintEncrypted)
);
// check that NULL values never get the same encoding
// (preserving NULL "uniqueness" for fingerprint columns)
assertNotEquals(
rowPostEncryption.get("blank"),
rowPostEncryption.get("1space"));
// check that `,,` and and `,"",` get different encoding when the user
// specifies `""` as the input NULL value
assertNotEquals(
rowPostEncryption.get("blank"),
rowPostEncryption.get("quoted-blank"));
}
@Test
public void blankEncNull_Enc_allowJoinsOnColumnsWithDifferentNamesIsTrue_FingerprintTest() throws IOException {
blankEncNull_Fingerprint(true);
}
@Test
public void blankEncNull_Enc_allowJoinsOnColumnsWithDifferentNamesIsFalse_FingerprintTest() throws IOException {
blankEncNull_Fingerprint(false);
}
@Test
public void emptyQuotesEncNull_FingerprintTest() throws IOException {
encCsvInputNull = "\"\"";
final String encryptedPath = encryptAllColumnsFingerprint();
final var rowPostEncryption = readSingleCsvRow(encryptedPath);
final Predicate<String> isFingerprintEncrypted = (val) -> val.startsWith(FingerprintTransformer.DESCRIPTOR_PREFIX_STRING);
GeneralTestUtility.assertRowEntryPredicates(rowPostEncryption,
entry("foo", isFingerprintEncrypted),
entry("quoted-foo", isFingerprintEncrypted),
entry("quoted-foo-newline-bar", isFingerprintEncrypted),
entry("blank", isFingerprintEncrypted),
entry("1space", isFingerprintEncrypted),
entry("quoted-blank", isFingerprintEncrypted),
entry("quoted-1space", isFingerprintEncrypted)
);
// check that `,"",` and `," ",` get different encodings
assertNotEquals(
rowPostEncryption.get("quoted-blank"),
rowPostEncryption.get("quoted-1space"));
// check that `,,` and and `,"",` get different encoding when the user
// specifies `""` as the input NULL value
assertNotEquals(
rowPostEncryption.get("blank"),
rowPostEncryption.get("quoted-blank"));
}
}
| 2,676 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/MainEnvVarKeyValidTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.utils.FileTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import picocli.CommandLine;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class MainEnvVarKeyValidTest {
private static final String ENC_INPUT_PATH = "../samples/csv/data_sample_without_quotes.csv";
private static final String SCHEMA_PATH = "../samples/schema/config_sample.json";
private static final String DEC_INPUT_PATH = "../samples/csv/marshalled_data_sample.csv";
private DecryptCliConfigTestUtility decArgs;
private CommandLine decMain;
private EncryptCliConfigTestUtility encArgs;
private CommandLine encMain;
public int runEncryptMainWithCliArgs() {
return encMain.execute(encArgs.toArrayWithoutMode());
}
public int runDecryptMainWithCliArgs() {
return decMain.execute(decArgs.toArrayWithoutMode());
}
@BeforeEach
public void setup() throws IOException {
final String output = FileTestUtility.createTempFile().toString();
encArgs = EncryptCliConfigTestUtility.defaultDryRunTestArgs(ENC_INPUT_PATH, SCHEMA_PATH);
encArgs.setOutput(output);
final CleanRoomsDao cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(encArgs.getClientSettings());
encMain = EncryptMode.getApp(cleanRoomsDao);
decArgs = DecryptCliConfigTestUtility.defaultDryRunTestArgs(DEC_INPUT_PATH);
decArgs.setOutput(output);
decMain = DecryptMode.getApp();
}
@Test
public void validateEncryptSecretKeyInvalidTest() {
assertEquals(0, runEncryptMainWithCliArgs());
}
@Test
public void validateDecryptSecretKeyInvalidTest() {
assertEquals(0, runDecryptMainWithCliArgs());
}
}
| 2,677 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/MainPerfTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.io.CsvTestUtility;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.TableGeneratorTestUtility;
import com.amazonaws.c3r.utils.TimingResultTestUtility;
import com.univocity.parsers.csv.CsvParser;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Path;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class MainPerfTest {
private EncryptCliConfigTestUtility encArgs;
private DecryptCliConfigTestUtility decArgs;
@BeforeEach
public void setup() {
encArgs = EncryptCliConfigTestUtility.defaultTestArgs();
encArgs.setAllowDuplicates(true);
decArgs = DecryptCliConfigTestUtility.defaultTestArgs();
decArgs.setFailOnFingerprintColumns(false);
}
public TimingResultTestUtility timeCsvRoundTrips(final int repetitions, final int entrySize, final int columnCount, final long rowCount)
throws IOException {
final String schemaPath = TableGeneratorTestUtility.generateSchema(columnCount, rowCount).toString();
final Path dataPath = TableGeneratorTestUtility.generateCsv(entrySize, columnCount, rowCount);
final long inputSizeBytes = dataPath.toFile().length();
final Path marshalledPath = FileTestUtility.createTempFile(
TableGeneratorTestUtility.filePrefix(columnCount, rowCount),
".marshalled.csv");
final Path unmarshalledPath = FileTestUtility.createTempFile(
TableGeneratorTestUtility.filePrefix(columnCount, rowCount),
".unmarshalled.csv");
encArgs.setInput(dataPath.toString());
encArgs.setSchema(schemaPath);
encArgs.setOutput(marshalledPath.toString());
final CleanRoomsDao cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(encArgs.getClientSettings());
long totalMarshalTimeSec = 0;
for (int i = 0; i < repetitions; i++) {
final long startTimeMs = System.currentTimeMillis();
final int exitCode = EncryptMode.getApp(cleanRoomsDao).execute(encArgs.toArrayWithoutMode());
final long endTimeMs = System.currentTimeMillis();
totalMarshalTimeSec = totalMarshalTimeSec + ((endTimeMs - startTimeMs) / 1000);
assertEquals(0, exitCode);
}
final long marshalledSizeBytes = marshalledPath.toFile().length();
decArgs.setFailOnFingerprintColumns(false);
decArgs.setInput(marshalledPath.toString());
decArgs.setOutput(unmarshalledPath.toString());
// printCliArgs();
long totalUnmarshalTimeSec = 0;
for (int i = 0; i < repetitions; i++) {
final long startTimeMs = System.currentTimeMillis();
final int exitCode = Main.getApp().execute(decArgs.toArray());
final long endTimeMs = System.currentTimeMillis();
totalUnmarshalTimeSec = totalUnmarshalTimeSec + ((endTimeMs - startTimeMs) / 1000);
assertEquals(0, exitCode);
}
final long unmarshalledSizeBytes = unmarshalledPath.toFile().length();
final CsvParser parser = CsvTestUtility.getCsvParser(unmarshalledPath.toString(), columnCount);
parser.parseNext(); // skip the header
long readRows = 0;
String[] row = parser.parseNext();
while (row != null) {
assertEquals(columnCount, row.length);
readRows++;
row = parser.parseNext();
}
assertEquals(rowCount, readRows);
return TimingResultTestUtility.builder()
.charsPerEntry(entrySize)
.columnCount(columnCount)
.rowCount(rowCount)
.inputSizeBytes(inputSizeBytes)
.marshalTimeSec(totalMarshalTimeSec / repetitions)
.marshalledSizeBytes(marshalledSizeBytes)
.unmarshalTimeSec(totalUnmarshalTimeSec / repetitions)
.unmarshalledSizeBytes(unmarshalledSizeBytes)
.build();
}
@Test
public void timeVariousColRowSizes() throws IOException {
final int[] columnCounts = {3, 6};
final long[] rowCounts = {100, 1000};
final int repetitions = 1;
final int entrySize = 20;
for (var nCols : columnCounts) {
for (var nRows : rowCounts) {
timeCsvRoundTrips(repetitions, entrySize, nCols, nRows);
}
}
}
}
| 2,678 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/SchemaCliConfigTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.io.FileFormat;
import lombok.Builder;
import lombok.Getter;
import lombok.Setter;
import java.util.ArrayList;
import java.util.List;
/**
* Class for conveniently generating various command line argument
* combinations for the `schema` command.
*/
@Builder
@Setter
public class SchemaCliConfigTestUtility {
/**
* Whether template or interactive mode should be used.
*/
@Builder.Default
private String subMode = "--template";
/**
* Data file for building the schema.
*/
@Builder.Default
private String input = "mySourceFile";
/**
* Output file location for the schema.
*/
@Builder.Default
@Getter
private String output = null;
/**
* Whether the output file should be overwritten if it exists.
*/
@Builder.Default
private boolean overwrite = false;
/**
* How much detail is printed to the console and log files.
*/
@Builder.Default
private String verbosity = null;
/**
* Whether a stacktrace should be displayed.
*/
@Builder.Default
private boolean enableStackTraces = true;
/**
* Data type.
*/
private FileFormat fileFormat;
/**
* If the data file has no headers.
*/
@Builder.Default
private boolean noHeaders = false;
/**
* Collaboration ID.
*/
private String collaborationId;
/**
* AWS CLI profile.
*/
private String profile;
/**
* AWS region.
*/
private String region;
/**
* Converts the specified command line parameters to a list.
*
* @return List of command line parameters
* @see SchemaCliConfigTestUtility#toListWithoutMode
*/
public List<String> toList() {
final List<String> args = new ArrayList<>(List.of(
"schema",
input
));
args.add(subMode);
if (output != null) {
args.add("--output=" + output);
}
if (overwrite) {
args.add("--overwrite");
}
if (verbosity != null) {
args.add("--verbosity=" + verbosity);
}
if (enableStackTraces) {
args.add("--enableStackTraces");
}
if (fileFormat != null) {
args.add("--fileFormat=" + fileFormat);
}
if (noHeaders) {
args.add("--noHeaders");
}
if (collaborationId != null) {
args.add("--id=" + collaborationId);
}
if (profile != null) {
args.add("--profile=" + profile);
}
if (region != null) {
args.add("--region=" + region);
}
return args;
}
/**
* Converts the specified command line parameters to a list without including the CLI mode parameter.
*
* @return List of command line parameters.
* @see SchemaCliConfigTestUtility#toList
*/
public List<String> toListWithoutMode() {
final List<String> args = toList();
args.remove(0);
return args;
}
/**
* Converts the specified command line parameters to an array.
*
* @return Array of command line parameters
* @see SchemaCliConfigTestUtility#toArrayWithoutMode
*/
public String[] toArray() {
return toList().toArray(String[]::new);
}
/**
* Converts the specified command line parameters to an array without including the CLI mode parameter.
*
* @return Array of command line parameters
* @see SchemaCliConfigTestUtility#toArray
*/
public String[] toArrayWithoutMode() {
return toListWithoutMode().toArray(String[]::new);
}
}
| 2,679 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/EncryptModeDryRunTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.io.CsvTestUtility;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import com.amazonaws.c3r.utils.GeneralTestUtility;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.stubbing.Answer;
import picocli.CommandLine;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class EncryptModeDryRunTest {
private static final String INPUT_PATH = "../samples/csv/data_sample_without_quotes.csv";
private static final String SCHEMA_PATH = "../samples/schema/config_sample.json";
private EncryptCliConfigTestUtility encArgs;
private EncryptMode main;
private CleanRoomsDao mockCleanRoomsDao;
@BeforeEach
public void setup() throws IOException {
final String output = FileTestUtility.createTempFile().toString();
encArgs = EncryptCliConfigTestUtility.defaultDryRunTestArgs(INPUT_PATH, SCHEMA_PATH);
encArgs.setOutput(output);
mockCleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(mockCleanRoomsDao.getCollaborationDataEncryptionMetadata(any()))
.thenAnswer((Answer<ClientSettings>) invocation -> encArgs.getClientSettings());
main = new EncryptMode(mockCleanRoomsDao);
}
public int runMainWithCliArgs() {
return new CommandLine(main).execute(encArgs.toArrayWithoutMode());
}
@Test
public void minimumViableArgsTest() {
assertEquals(0, runMainWithCliArgs());
assertEquals(SCHEMA_PATH, main.getRequiredArgs().getSchema());
assertEquals(INPUT_PATH, main.getRequiredArgs().getInput());
assertEquals(GeneralTestUtility.EXAMPLE_SALT, main.getRequiredArgs().getId());
}
@Test
public void validateInputBlankTest() {
encArgs.setInput("");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void validateConfigBlankTest() {
encArgs.setSchema("");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void validateCollaborationIdBlankTest() {
encArgs.setCollaborationId("");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void validateCollaborationIdInvalidUuidTest() {
encArgs.setCollaborationId("123456");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void getTargetFileEmptyTest() {
encArgs.setOutput("");
assertNotEquals(0, runMainWithCliArgs());
}
private void checkBooleans(final Function<Boolean, Boolean> action) {
assertEquals(true, action.apply(true));
assertEquals(false, action.apply(false));
}
@Test
public void allowCleartextFlagTest() {
checkBooleans(b -> {
encArgs.setAllowCleartext(b);
runMainWithCliArgs();
return main.getClientSettings().isAllowCleartext();
});
}
@Test
public void allowDuplicatesFlagTest() {
checkBooleans(b -> {
encArgs.setAllowDuplicates(b);
runMainWithCliArgs();
return main.getClientSettings().isAllowDuplicates();
});
}
@Test
public void allowJoinsOnColumnsWithDifferentNamesFlagTest() {
checkBooleans(b -> {
encArgs.setAllowJoinsOnColumnsWithDifferentNames(b);
runMainWithCliArgs();
return main.getClientSettings().isAllowJoinsOnColumnsWithDifferentNames();
});
}
@Test
public void preserveNullsFlagTest() {
checkBooleans(b -> {
encArgs.setPreserveNulls(b);
runMainWithCliArgs();
return main.getClientSettings().isPreserveNulls();
});
}
@Test
public void inputFileFormatTest() throws IOException {
final String input = FileTestUtility.createTempFile("input", ".unknown").toString();
encArgs.setInput(input);
assertNotEquals(0, runMainWithCliArgs());
encArgs.setFileFormat(FileFormat.CSV);
assertEquals(0, runMainWithCliArgs());
}
@Test
public void noProfileOrRegionFlagsTest() {
main = new EncryptMode(mockCleanRoomsDao);
new CommandLine(main).execute(encArgs.toArrayWithoutMode());
assertNull(main.getOptionalArgs().getProfile());
assertNull(mockCleanRoomsDao.getRegion());
}
@Test
public void profileFlagTest() throws IOException {
// Ensure that passing a value via the --profile flag is given to the CleanRoomsDao builder's `profile(..)` method.
final String myProfileName = "my-profile-name";
assertNotEquals(myProfileName, mockCleanRoomsDao.toString());
when(mockCleanRoomsDao.withRegion(any())).thenThrow(new RuntimeException("test failure - region should have have been set"));
encArgs.setProfile(myProfileName);
main = new EncryptMode(mockCleanRoomsDao);
new CommandLine(main).execute(encArgs.toArrayWithoutMode());
assertEquals(myProfileName, main.getOptionalArgs().getProfile());
assertEquals(myProfileName, main.getCleanRoomsDao().getProfile());
}
@Test
public void regionFlagTest() {
final String myRegion = "collywobbles";
encArgs.setRegion(myRegion);
main = new EncryptMode(mockCleanRoomsDao);
new CommandLine(main).execute(encArgs.toArrayWithoutMode());
assertEquals(myRegion, main.getOptionalArgs().getRegion());
assertEquals(myRegion, main.getCleanRoomsDao().getRegion());
}
/*
* Add an extra column to a known valid schema and make sure it's not accepted because it doesn't have the same number
* of columns as the csv file. Easiest to run through the CLI since we need the CSV parser for verification.
*/
@Test
public void tooManyColumnsPositionalSchemaTest() throws IOException {
final String tempJson = FileUtil.readBytes("../samples/schema/config_sample_no_headers.json");
final int closeOuter = tempJson.lastIndexOf("]");
final String json = tempJson.substring(0, closeOuter - 1) + ", [] ] }";
final Path schema = FileTestUtility.createTempFile("schema", ".json");
Files.writeString(schema, json);
final EncryptCliConfigTestUtility args =
EncryptCliConfigTestUtility.defaultDryRunTestArgs("../samples/csv/data_sample_without_quotes.csv", schema.toString());
args.setDryRun(false);
final var inputArgs = args.toArrayWithoutMode();
Assertions.assertEquals(Main.FAILURE, EncryptMode.getApp(null).execute(inputArgs));
}
/*
* Remove a column to a known valid schema and make sure it's not accepted because it doesn't have the same number
* of columns as the csv file. Easiest to run through the CLI since we need the CSV parser for verification.
*/
@Test
public void tooFewColumnsPositionalSchemaTest() throws IOException {
final String tempJson = FileUtil.readBytes("../samples/schema/config_sample_no_headers.json");
final int lastElementStart = tempJson.lastIndexOf("],");
final String json = tempJson.substring(0, lastElementStart - 1) + "]]}";
final Path schema = FileTestUtility.createTempFile("schema", ".json");
Files.writeString(schema, json);
final var args = EncryptCliConfigTestUtility.defaultDryRunTestArgs("../samples/csv/data_sample_no_headers.csv", schema.toString());
args.setDryRun(false);
final var inputArgs = args.toArrayWithoutMode();
Assertions.assertEquals(Main.FAILURE, EncryptMode.getApp(null).execute(inputArgs));
}
/*
* Make sure only the columns with ColumnSchemas are included in the output. Easiest to run through the CLI since we need
* the CSV parser for verification.
*/
@Test
public void notAllColumnsUsedTest() throws IOException {
final String json = "{ \"headerRow\": false, \"columns\": [" +
"[{\"targetHeader\":\"firstname\", \"type\": \"cleartext\"}]," +
"[]," +
"[]," +
"[]," +
"[]," +
"[]," +
"[]," +
"[]," +
"[]" +
"]}";
final Path schema = FileTestUtility.createTempFile("schema", ".json");
Files.writeString(schema, json);
final EncryptCliConfigTestUtility args =
EncryptCliConfigTestUtility.defaultDryRunTestArgs("../samples/csv" + "/data_sample_without_quotes.csv", schema.toString());
final String output = FileTestUtility.createTempFile().toString();
args.setOutput(output);
args.setDryRun(false);
Assertions.assertEquals(Main.SUCCESS, CliTestUtility.runWithoutCleanRooms(args));
final List<Map<String, String>> rows = CsvTestUtility.readRows(args.getOutput());
assertTrue(rows.size() > 0);
for (Map<String, String> row : rows) {
assertEquals(1, row.size());
assertTrue(row.containsKey("firstname"));
}
}
} | 2,680 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/DecryptCliConfigTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.utils.GeneralTestUtility;
import lombok.Getter;
import lombok.Setter;
import java.util.ArrayList;
import java.util.List;
/**
* Class for conveniently generating various command line argument
* combinations for the `decrypt` command.
*/
@Setter
public final class DecryptCliConfigTestUtility {
/**
* Collaboration ID to use for computing shared secret keys.
*/
private String collaborationId;
/**
* Input file location.
*/
@Getter
private String input;
/**
* Value used in the input file to represent {@code null} in the CSV data.
*/
private String csvInputNullValue;
/**
* Value to use in the output file to represent {@code null} in the CSV data.
*/
private String csvOutputNullValue;
/**
* Location to write the output file.
*/
@Getter
private String output;
/**
* Whether the output file should be overwritten if it already exists.
*/
private boolean overwrite;
/**
* Whether encryption will actually be run or only the configuration will be validated.
*/
private boolean dryRun;
/**
* Whether to fail if a fingerprint column is seen in the data file.
*/
private boolean failOnFingerprintColumns;
/**
* Whether a stacktrace should be printed.
*/
private boolean enableStackTraces;
/**
* Input file data type.
*/
private FileFormat fileFormat;
/**
* Hidden default constructor so static instance creators are used.
*/
private DecryptCliConfigTestUtility() {
}
/**
* Default test values for encryption args to use with tests.
*
* @return Default test values
*/
public static DecryptCliConfigTestUtility defaultTestArgs() {
final var args = new DecryptCliConfigTestUtility();
args.enableStackTraces = true;
args.overwrite = true;
args.collaborationId = GeneralTestUtility.EXAMPLE_SALT.toString();
args.input = "mySourceFile";
return args;
}
/**
* Creates a test configuration for a dry run. Skips all data processing and validates settings.
*
* @param file Input file to use for the dry run
* @return Default dry run configuration
*/
public static DecryptCliConfigTestUtility defaultDryRunTestArgs(final String file) {
final var args = new DecryptCliConfigTestUtility();
args.collaborationId = GeneralTestUtility.EXAMPLE_SALT.toString();
args.input = (file == null) ? "mySourceFile" : file;
args.overwrite = true;
args.dryRun = true;
args.enableStackTraces = true;
return args;
}
/**
* Empty CLI configuration.
*
* @return Configuration instance with no set values
*/
public static DecryptCliConfigTestUtility blankTestArgs() {
return new DecryptCliConfigTestUtility();
}
/**
* Converts the specified command line parameters to a list.
*
* @return List of command line parameters
* @see DecryptCliConfigTestUtility#getCliArgsWithoutMode
*/
public List<String> getCliArgs() {
final List<String> args = new ArrayList<>();
args.add("decrypt");
if (input != null) {
args.add(input);
}
if (collaborationId != null) {
args.add("--id=" + collaborationId);
}
if (csvInputNullValue != null) {
args.add("--csvInputNULLValue=" + csvInputNullValue);
}
if (csvOutputNullValue != null) {
args.add("--csvOutputNULLValue=" + csvOutputNullValue);
}
if (output != null) {
args.add("--output=" + output);
}
if (overwrite) {
args.add("--overwrite");
}
if (dryRun) {
args.add("--dryRun");
}
if (failOnFingerprintColumns) {
args.add("--failOnFingerprintColumns");
}
if (enableStackTraces) {
args.add("--enableStackTraces");
}
if (fileFormat != null) {
args.add("--fileFormat=" + fileFormat);
}
return args;
}
/**
* Converts the specified command line parameters to a list without including the CLI mode parameter.
*
* @return List of command line parameters.
* @see DecryptCliConfigTestUtility#getCliArgs
*/
public List<String> getCliArgsWithoutMode() {
final List<String> args = getCliArgs();
args.remove(0);
return args;
}
/**
* Converts the specified command line parameters to an array.
*
* @return Array of command line parameters
* @see DecryptCliConfigTestUtility#toArrayWithoutMode
*/
public String[] toArray() {
return getCliArgs().toArray(String[]::new);
}
/**
* Converts the specified command line parameters to an array without including the CLI mode parameter.
*
* @return Array of command line parameters
* @see DecryptCliConfigTestUtility#toArray
*/
public String[] toArrayWithoutMode() {
return getCliArgsWithoutMode().toArray(String[]::new);
}
}
| 2,681 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/MainErrorMessageTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.utils.FileTestUtility;
import nl.altindag.log.LogCaptor;
import nl.altindag.log.model.LogEvent;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import picocli.CommandLine;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.StandardOpenOption;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class MainErrorMessageTest {
private final String config = "../samples/schema/config_sample.json";
private final String input = "../samples/csv/data_sample_without_quotes.csv";
private EncryptCliConfigTestUtility encArgs;
private DecryptCliConfigTestUtility decArgs;
private SchemaCliConfigTestUtility schemaCliTestConfig;
@BeforeEach
public void setup() throws IOException {
final String output = FileTestUtility.createTempFile().toString();
encArgs = EncryptCliConfigTestUtility.defaultTestArgs();
encArgs.setSchema(config);
encArgs.setAllowDuplicates(true);
encArgs.setInput(input);
encArgs.setOutput(output);
decArgs = DecryptCliConfigTestUtility.defaultTestArgs();
decArgs.setFailOnFingerprintColumns(false);
decArgs.setInput(input);
decArgs.setOutput(output);
schemaCliTestConfig = SchemaCliConfigTestUtility.builder().input(input).output(output).build();
}
private void encryptAndCheckErrorMessagePresent(final EncryptCliConfigTestUtility encArgs, final boolean enableStackTraces,
final String message, final Class<? extends Throwable> expectedException) {
final CleanRoomsDao cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(encArgs.getClientSettings());
final CommandLine cmd = EncryptMode.getApp(cleanRoomsDao);
runAndCheckErrorMessagePresent(cmd, encArgs.toArrayWithoutMode(), enableStackTraces, message, expectedException);
}
private void decryptAndCheckErrorMessagePresent(final DecryptCliConfigTestUtility decArgs, final boolean enableStackTraces,
final String message, final Class<? extends Throwable> expectedException) {
final CommandLine cmd = DecryptMode.getApp();
runAndCheckErrorMessagePresent(cmd, decArgs.toArrayWithoutMode(), enableStackTraces, message, expectedException);
}
private void schemaAndCheckErrorMessagePresent(final SchemaCliConfigTestUtility args, final boolean enableStackTraces,
final String message) {
final CommandLine cmd = SchemaMode.getApp(null);
runAndCheckErrorMessagePresent(cmd, args.toArrayWithoutMode(), enableStackTraces, message, C3rIllegalArgumentException.class);
}
private void runAndCheckErrorMessagePresent(final CommandLine cmd, final String[] args, final boolean enableStackTraces,
final String message, final Class<? extends Throwable> expectedException) {
final List<LogEvent> errorLogEvents;
final List<LogEvent> warnLogEvents;
try (LogCaptor logCaptor = LogCaptor.forName("ROOT")) {
cmd.execute(args);
errorLogEvents = logCaptor.getLogEvents().stream()
.filter(logEvent -> "ERROR".equals(logEvent.getLevel()))
.collect(Collectors.toList());
warnLogEvents = logCaptor.getLogEvents().stream()
.filter(logEvent -> "WARN".equals(logEvent.getLevel()))
.collect(Collectors.toList());
}
assertFalse(errorLogEvents.isEmpty());
assertFalse(warnLogEvents.isEmpty());
final LogEvent errorEvent = errorLogEvents.get(errorLogEvents.size() - 1); // The last error is what we want
final LogEvent warnEvent = warnLogEvents.get(warnLogEvents.size() - 1); // The last warning is what we want
final String errorMessage = errorEvent.getFormattedMessage();
// Validate presence when stack traces enabled
if (enableStackTraces) {
assertTrue(errorEvent.getThrowable().isPresent());
assertEquals(expectedException, errorEvent.getThrowable().get().getClass());
assertTrue(errorMessage.contains(message));
} else {
// Validate presence when stack traces disabled
assertFalse(errorEvent.getThrowable().isPresent());
assertTrue(errorMessage.contains(message));
}
final String warnMessage = warnEvent.getFormattedMessage();
// Static check since it's the final message and not dynamic.
assertEquals("Output files may have been left on disk.", warnMessage);
}
@Test
public void encryptInputIllegalArgumentExceptionTest() throws IOException {
final String missingInput = FileTestUtility.resolve("missingEncryptInputIllegalArgument.csv").toString();
encArgs.setInput(missingInput);
encArgs.setEnableStackTraces(true);
encryptAndCheckErrorMessagePresent(encArgs, true, "File does not exist", C3rIllegalArgumentException.class);
encArgs.setEnableStackTraces(false);
encryptAndCheckErrorMessagePresent(encArgs, false, "File does not exist", C3rIllegalArgumentException.class);
}
@Test
public void decryptInputIllegalArgumentExceptionTest() throws IOException {
final String missingInput = FileTestUtility.resolve("missingDecryptInputIllegalArgument.csv").toString();
decArgs.setInput(missingInput);
decArgs.setEnableStackTraces(true);
decryptAndCheckErrorMessagePresent(decArgs, true, "File does not exist", C3rIllegalArgumentException.class);
decArgs.setEnableStackTraces(false);
decryptAndCheckErrorMessagePresent(decArgs, false, "File does not exist", C3rIllegalArgumentException.class);
}
@Test
public void schemaValidateIllegalArgumentExceptionTest() throws IOException {
final String missingInput = FileTestUtility.resolve("missingSchemaValidateIllegalArgument.csv").toString();
schemaCliTestConfig.setInput(missingInput);
schemaCliTestConfig.setSubMode("--template");
schemaCliTestConfig.setEnableStackTraces(true);
schemaAndCheckErrorMessagePresent(schemaCliTestConfig, true, "File does not exist");
schemaCliTestConfig.setEnableStackTraces(false);
schemaAndCheckErrorMessagePresent(schemaCliTestConfig, false, "File does not exist");
}
@Test
public void encryptDuplicatesRuntimeExceptionTest() {
encArgs.setAllowDuplicates(false);
encArgs.setEnableStackTraces(true);
encryptAndCheckErrorMessagePresent(encArgs, true, "Duplicate entries found", C3rRuntimeException.class);
encArgs.setEnableStackTraces(false);
encryptAndCheckErrorMessagePresent(encArgs, false, "Duplicate entries found", C3rRuntimeException.class);
}
@Test
public void encryptPadFailureRuntimeExceptionTest() throws IOException {
final Path badSample = FileTestUtility.resolve("bad_data_sample.csv");
final Path sample = new File(input).toPath();
Files.copy(sample, badSample, StandardCopyOption.REPLACE_EXISTING);
final byte[] bits = new byte[150];
Arrays.fill(bits, (byte) 'a');
final String badValue = new String(bits, StandardCharsets.UTF_8);
final String unpaddableRow = "Shana,Hendrix,8 Hollows Rd,Richmond,VA,407-555-4322," + badValue + ",5,Sean's older sister\n";
Files.write(badSample, unpaddableRow.getBytes(StandardCharsets.UTF_8), StandardOpenOption.APPEND);
encArgs.setInput(badSample.toString());
encArgs.setEnableStackTraces(true);
encryptAndCheckErrorMessagePresent(encArgs, true, "No room for padding", C3rRuntimeException.class);
encArgs.setEnableStackTraces(false);
encryptAndCheckErrorMessagePresent(encArgs, false, "No room for padding", C3rRuntimeException.class);
}
@Test
public void decryptFailOnFingerprintColumnsUnsupportedOperationExceptionTest() {
decArgs.setInput("../samples/csv/marshalled_data_sample.csv");
decArgs.setFailOnFingerprintColumns(true);
decArgs.setEnableStackTraces(true);
decryptAndCheckErrorMessagePresent(decArgs, true,
"Data encrypted for a fingerprint column was found but is forbidden with current settings.", C3rRuntimeException.class);
decArgs.setEnableStackTraces(false);
decryptAndCheckErrorMessagePresent(decArgs, false,
"Data encrypted for a fingerprint column was found but is forbidden with current settings.", C3rRuntimeException.class);
}
@Test
public void encryptOverwriteOutputWhenFileExistsTest() {
encArgs.setOverwrite(false);
encArgs.setEnableStackTraces(true);
encryptAndCheckErrorMessagePresent(encArgs, true, "File already exists", C3rIllegalArgumentException.class);
encArgs.setEnableStackTraces(false);
encryptAndCheckErrorMessagePresent(encArgs, false, "File already exists", C3rIllegalArgumentException.class);
}
@Test
public void decryptOverwriteOutputWhenFileExistsTest() {
decArgs.setOverwrite(false);
decArgs.setEnableStackTraces(true);
decryptAndCheckErrorMessagePresent(decArgs, true, "File already exists", C3rIllegalArgumentException.class);
decArgs.setEnableStackTraces(false);
decryptAndCheckErrorMessagePresent(decArgs, false, "File already exists", C3rIllegalArgumentException.class);
}
@Test
public void schemaOverwriteOutputWhenFileExistsTest() {
schemaCliTestConfig.setSubMode("--interactive");
schemaCliTestConfig.setOverwrite(false);
schemaCliTestConfig.setEnableStackTraces(true);
schemaAndCheckErrorMessagePresent(schemaCliTestConfig, true, "File already exists");
schemaCliTestConfig.setEnableStackTraces(false);
schemaAndCheckErrorMessagePresent(schemaCliTestConfig, false, "File already exists");
}
@Test
public void encryptAllowCleartextFalseTest() {
encArgs.setAllowCleartext(false);
encArgs.setEnableStackTraces(true);
encryptAndCheckErrorMessagePresent(encArgs, true, "Cleartext columns found", C3rIllegalArgumentException.class);
encArgs.setEnableStackTraces(false);
encryptAndCheckErrorMessagePresent(encArgs, false, "Cleartext columns found", C3rIllegalArgumentException.class);
}
@Test
public void encryptUnrecognizedFileFormatTest() throws IOException {
final Path unknownInputFormat = FileTestUtility.createTempFile("unknownInputFormat", ".unknown");
unknownInputFormat.toFile().deleteOnExit();
encArgs.setInput(unknownInputFormat.toFile().getAbsolutePath());
encArgs.setEnableStackTraces(true);
encryptAndCheckErrorMessagePresent(encArgs, true, "Unknown file extension", C3rIllegalArgumentException.class);
encArgs.setEnableStackTraces(false);
encryptAndCheckErrorMessagePresent(encArgs, false, "Unknown file extension", C3rIllegalArgumentException.class);
}
@Test
public void decryptUnrecognizedFileFormatTest() throws IOException {
final Path unknownInputFormat = FileTestUtility.createTempFile("unknownInputFormat", ".unknown");
unknownInputFormat.toFile().deleteOnExit();
decArgs.setInput(unknownInputFormat.toFile().getAbsolutePath());
decArgs.setEnableStackTraces(true);
decryptAndCheckErrorMessagePresent(decArgs, true, "Unknown file extension", C3rIllegalArgumentException.class);
decArgs.setEnableStackTraces(false);
decryptAndCheckErrorMessagePresent(decArgs, false, "Unknown file extension", C3rIllegalArgumentException.class);
}
@Test
public void schemaUnrecognizedFileFormatTest() throws IOException {
schemaCliTestConfig.setSubMode("--interactive");
final Path unknownInputFormat = FileTestUtility.createTempFile("unknownInputFormat", ".unknown");
unknownInputFormat.toFile().deleteOnExit();
schemaCliTestConfig.setInput(unknownInputFormat.toFile().getAbsolutePath());
schemaCliTestConfig.setEnableStackTraces(true);
schemaAndCheckErrorMessagePresent(schemaCliTestConfig, true, "Unknown file format");
schemaCliTestConfig.setEnableStackTraces(false);
schemaAndCheckErrorMessagePresent(schemaCliTestConfig, false, "Unknown file format");
}
@Test
public void encryptEmptySchemaTest() throws IOException {
final Path emptySchema = FileTestUtility.createTempFile("emptySchema", ".json");
emptySchema.toFile().deleteOnExit();
encArgs.setSchema(emptySchema.toAbsolutePath().toString());
encArgs.setEnableStackTraces(true);
encryptAndCheckErrorMessagePresent(encArgs, true, "The table schema file was empty", C3rIllegalArgumentException.class);
encArgs.setEnableStackTraces(false);
encryptAndCheckErrorMessagePresent(encArgs, false, "The table schema file was empty", C3rIllegalArgumentException.class);
}
}
| 2,682 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/DecryptModeDryRunTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.GeneralTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import picocli.CommandLine;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
public class DecryptModeDryRunTest {
private static final String INPUT_PATH = "../samples/csv/marshalled_data_sample.csv";
private DecryptCliConfigTestUtility decArgs;
private DecryptMode main;
@BeforeEach
public void setup() throws IOException {
final String output = FileTestUtility.createTempFile().toString();
decArgs = DecryptCliConfigTestUtility.defaultDryRunTestArgs(INPUT_PATH);
decArgs.setOutput(output);
main = new DecryptMode();
}
public int runMainWithCliArgs() {
return new CommandLine(main).execute(decArgs.toArrayWithoutMode());
}
@Test
public void minimumViableArgsTest() {
runMainWithCliArgs();
assertEquals(INPUT_PATH, main.getRequiredArgs().getInput());
assertEquals(GeneralTestUtility.EXAMPLE_SALT, main.getRequiredArgs().getId());
}
@Test
public void validateInputBlankTest() {
decArgs.setInput("");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void validateCollaborationIdBlankTest() {
decArgs.setCollaborationId("");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void validateCollaborationIdInvalidUuidTest() {
decArgs.setCollaborationId("123456");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void getTargetFileEmptyTest() {
decArgs.setOutput("");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void inputFileFormatTest() throws IOException {
final String input = FileTestUtility.createTempFile("input", ".unknown").toString();
decArgs.setInput(input);
assertNotEquals(0, runMainWithCliArgs());
decArgs.setFileFormat(FileFormat.CSV);
assertEquals(0, runMainWithCliArgs());
}
}
| 2,683 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/MainTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.io.CsvTestUtility;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.io.ParquetTestUtility;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import com.amazonaws.c3r.utils.GeneralTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.stubbing.Answer;
import picocli.CommandLine;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Comparator;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class MainTest {
private final String config = "../samples/schema/config_sample.json";
private EncryptCliConfigTestUtility encArgs;
private DecryptCliConfigTestUtility decArgs;
private CleanRoomsDao cleanRoomsDao;
private Path output;
// Reset encryption and decryption command line arguments before each test
@BeforeEach
public void setup() throws IOException {
output = FileTestUtility.resolve("output.csv");
encArgs = EncryptCliConfigTestUtility.defaultTestArgs();
encArgs.setSchema(config);
encArgs.setAllowDuplicates(true);
decArgs = DecryptCliConfigTestUtility.defaultTestArgs();
decArgs.setFailOnFingerprintColumns(false);
cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenAnswer((Answer<ClientSettings>) (invocation) ->
encArgs.getClientSettings());
}
// Verify calling the command with no argument fails
@Test
public void noArgsUsageTest() {
final ByteArrayOutputStream consoleOutput = new ByteArrayOutputStream();
System.setErr(new PrintStream(consoleOutput));
final int exitCode = Main.getApp().execute();
assertEquals(2, exitCode);
assertTrue(consoleOutput.toString().toLowerCase().contains("missing required subcommand"));
}
// Make sure help is printed out
@Test
public void helpTest() {
ByteArrayOutputStream consoleOutput = new ByteArrayOutputStream();
System.setErr(new PrintStream(consoleOutput));
consoleOutput = new ByteArrayOutputStream();
System.setOut(new PrintStream(consoleOutput));
Main.getApp().execute("--help");
assertTrue(consoleOutput.toString().toLowerCase().contains("usage"));
}
// Make sure a bad subcommand isn't accepted
@Test
public void validateCommandBadTest() {
final ByteArrayOutputStream consoleOutput = new ByteArrayOutputStream();
System.setErr(new PrintStream(consoleOutput));
final int exitCode = Main.getApp().execute("fly-to-the-moon");
assertEquals(2, exitCode);
}
// Test to make sure quotes are removed
@Test
public void quotesAreRemovedTest() {
final String[] args = {"encrypt", "C:\\User Name\\Here", "--schema=\"schema\"", "--id=\"" + GeneralTestUtility.EXAMPLE_SALT + "\"",
"--overwrite=\"true\""};
final CommandLine.ParseResult pr = CliTestUtility.verifyCliOptions(args);
final List<String> origArgs = pr.subcommands().get(0).originalArgs();
assertArrayEquals(args, origArgs.toArray(String[]::new));
final List<String> parsedArgs = pr.subcommands().get(0).matchedArgs().stream()
.map(CommandLine.Model.ArgSpec::getValue).map(Object::toString).collect(Collectors.toList());
assertEquals("encrypt", pr.subcommands().get(0).commandSpec().name());
assertEquals("C:\\User Name\\Here", parsedArgs.get(0));
assertEquals("schema", parsedArgs.get(1));
assertEquals(GeneralTestUtility.EXAMPLE_SALT.toString(), parsedArgs.get(2));
assertTrue(Boolean.parseBoolean(parsedArgs.get(3)));
}
// Check the encrypt command to make sure it works as expected
@Test
public void marshalTest() throws IOException {
encArgs.setInput("../samples/csv/data_sample_without_quotes.csv");
encArgs.setOutput(output.toString());
final long sourceLineCount;
try (Stream<String> source = Files.lines(Paths.get(encArgs.getInput()), StandardCharsets.UTF_8)) {
sourceLineCount = source.count();
}
final File outputFile = new File(encArgs.getOutput());
outputFile.deleteOnExit();
final int exitCode = EncryptMode.getApp(cleanRoomsDao).execute(encArgs.toArrayWithoutMode());
assertEquals(0, exitCode);
final String outputData = FileUtil.readBytes(output.toString());
assertFalse(outputData.isBlank());
final long targetLineCount;
try (Stream<String> target = Files.lines(output, StandardCharsets.UTF_8)) {
targetLineCount = target.count();
}
assertEquals(sourceLineCount, targetLineCount);
// number of rows should be the same, regardless of how many columns we ended up generating
final List<String[]> rows = CsvTestUtility.readContentAsArrays(encArgs.getOutput(), false);
assertEquals(sourceLineCount, rows.size());
// check each row in the result is the same size
final int columnCount = rows.get(0).length;
for (var row : rows) {
assertEquals(row.length, columnCount);
}
// check the number of output columns is as expected
assertEquals(11, columnCount);
}
// Check the decrypt command to make sure it works as expected
@Test
public void unmarshalTest() {
decArgs.setInput("../samples/csv/marshalled_data_sample.csv");
decArgs.setOutput(output.toString());
final File outputFile = new File(decArgs.getOutput());
final int exitCode = Main.getApp().execute(decArgs.toArray());
assertEquals(0, exitCode);
final String outputData = FileUtil.readBytes(outputFile.getAbsolutePath());
assertFalse(outputData.isBlank());
final List<String[]> preRows = CsvTestUtility.readContentAsArrays(decArgs.getInput(), false);
final List<String[]> postRows = CsvTestUtility.readContentAsArrays(decArgs.getOutput(), false);
// number of rows should have remained unchanged
assertEquals(preRows.size(), postRows.size());
// number of columns should be the same
final int columnCount = preRows.get(0).length;
assertEquals(columnCount, postRows.get(0).length);
// number of columns for each row should match the columnCount
for (int i = 0; i < preRows.size(); i++) {
assertEquals(columnCount, preRows.get(i).length);
assertEquals(columnCount, postRows.get(i).length);
}
}
/*
* Helper for basic round tripping tests - checks if the string will be interpreted as NULL
* by the C3R client with default settings, so we can check this _or_ equality for correctness
* depending on the input value.
*/
private boolean defaultNullString(final String string) {
return string.isBlank()
|| string.equals("\"\"");
}
/*
* A "round trip" test that encrypts and then decrypts data, checking the values match or are still HMACed.
*
* The test takes an original input file and "explodes" it out, generating
* 3 output columns for each input column such that each input column gets a
* corresponding `cleartext` column, a `sealed` column, and a `fingerprint` column
* as follows:
* - Columns `[ColumnA, ...]` are transformed into
* - columns `[ColumnA_cleartext, ColumnA_sealed, fingerprint, ...]` in the output.
*/
public void clientRoundTripTest(final FileFormat fileFormat) throws IOException {
// NOTE: We use a version of the sample data with enough quotes to make round trip
// equalities work more simply
final String originalPath;
if (fileFormat == FileFormat.CSV) {
originalPath = "../samples/csv/data_sample_with_quotes.csv";
} else {
originalPath = "../samples/parquet/data_sample.parquet";
}
final Path marshalledPath = FileTestUtility.resolve("clientRoundTripTest.marshalled." + fileFormat);
final Path unmarshalledPath = FileTestUtility.resolve("clientRoundTripTest.unmarshalled." + fileFormat);
final String marshalledStr = marshalledPath.toString();
final String unmarshalledStr = unmarshalledPath.toString();
encArgs.setInput(originalPath);
encArgs.setOutput(marshalledStr);
encArgs.setSchema("../samples/schema/config_sample_x3.json");
encArgs.setPreserveNulls(false);
final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(encArgs.getClientSettings());
int exitCode = EncryptMode.getApp(cleanRoomsDao).execute(encArgs.toArrayWithoutMode());
assertEquals(0, exitCode);
decArgs.setInput(marshalledStr);
decArgs.setOutput(unmarshalledStr);
decArgs.setFailOnFingerprintColumns(false);
exitCode = Main.getApp().execute(decArgs.toArray());
assertEquals(0, exitCode);
final String outputData = FileUtil.readBytes(unmarshalledStr);
assertFalse(outputData.isBlank());
final List<String[]> preRows;
final List<String[]> postRows;
if (fileFormat == FileFormat.CSV) {
preRows = CsvTestUtility.readContentAsArrays(originalPath, false);
postRows = CsvTestUtility.readContentAsArrays(unmarshalledStr, false);
} else {
preRows = ParquetTestUtility.readContentAsStringArrays(originalPath);
postRows = ParquetTestUtility.readContentAsStringArrays(unmarshalledStr);
}
// number of rows should be the same
assertEquals(preRows.size(), postRows.size());
// drop header row if source is a CSV file
if (fileFormat == FileFormat.CSV) {
preRows.remove(0);
postRows.remove(0);
}
// IMPORTANT! The original data should have no duplicates in the first row,
// so we can sort the data to easily compare it.
preRows.sort(Comparator.comparing(row -> row[0]));
postRows.sort(Comparator.comparing(row -> row[0]));
// check that the cleartext and sealed columns returned
// the same results back but the fingerprint column is still HMACed
for (int i = 0; i < preRows.size(); i++) {
final var preRow = preRows.get(i);
final var postRow = postRows.get(i);
assertEquals(preRow.length * 3, postRow.length);
for (int j = 0; j < preRow.length; j++) {
if (defaultNullString(preRow[j])) {
assertTrue(defaultNullString(postRow[j * 3]));
assertTrue(defaultNullString(postRow[j * 3 + 1]));
} else {
assertEquals(preRow[j], postRow[j * 3]);
assertEquals(preRow[j], postRow[j * 3 + 1]);
}
assertNotEquals(preRow[j], postRow[j * 3 + 2]);
}
}
}
// Make sure non-interactive schema returns results
@Test
public void csvRoundTripTest() throws IOException {
clientRoundTripTest(FileFormat.CSV);
}
@Test
public void parquetRoundTripTest() throws IOException {
clientRoundTripTest(FileFormat.PARQUET);
}
}
| 2,684 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cli/SchemaModeDryRunTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import com.amazonaws.c3r.utils.GeneralTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import picocli.CommandLine;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.UUID;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class SchemaModeDryRunTest {
private static final String INPUT_CSV_PATH = "../samples/csv/data_sample_without_quotes.csv";
private static final String INPUT_PARQUET_PATH = "../samples/parquet/data_sample.parquet";
private SchemaCliConfigTestUtility schemaArgs;
private SchemaMode main;
private CleanRoomsDao mockCleanRoomsDao;
@BeforeEach
public void setup() throws IOException {
final String output = FileTestUtility.createTempFile("schema", ".json").toString();
schemaArgs = SchemaCliConfigTestUtility.builder().overwrite(true).input(INPUT_CSV_PATH)
.output(output).build();
mockCleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(mockCleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.lowAssuranceMode());
}
public void runMainWithCliArgs(final boolean passes) {
main = new SchemaMode(mockCleanRoomsDao);
final int exitCode = new CommandLine(main).execute(schemaArgs.toArrayWithoutMode());
if (passes) {
assertEquals(0, exitCode);
} else {
assertNotEquals(0, exitCode);
}
}
@Test
public void minimumViableArgsTest() {
runMainWithCliArgs(true);
assertEquals(INPUT_CSV_PATH, main.getRequiredArgs().getInput());
}
@Test
public void defaultOutputFileTest() {
final File sourceFile = new File(INPUT_CSV_PATH);
final File targetFile = new File(sourceFile.getName() + ".json");
targetFile.deleteOnExit();
schemaArgs.setOutput(null);
runMainWithCliArgs(true);
assertNull(main.getOptionalArgs().getOutput());
assertTrue(targetFile.exists());
assertTrue(targetFile.length() > 0);
// assert sourceFile directory is stripped and targetFile is associated with the working directory.
assertNotNull(sourceFile.getParentFile());
assertNull(targetFile.getParentFile());
assertTrue(targetFile.getAbsolutePath().contains(FileUtil.CURRENT_DIR));
}
@Test
public void specifiedOutputFileTest() {
final File schemaOutput = new File("output.json");
schemaOutput.deleteOnExit();
schemaArgs.setOutput("output.json");
runMainWithCliArgs(true);
assertEquals("output.json", main.getOptionalArgs().getOutput());
}
@Test
public void validateInputBlankTest() {
schemaArgs.setInput("--invalid");
runMainWithCliArgs(false);
}
@Test
public void getTargetFileEmptyTest() {
schemaArgs.setOutput("");
runMainWithCliArgs(false);
}
@Test
public void validateBadLogLevelErrorTest() {
schemaArgs.setVerbosity("SUPER-LOUD-PLEASE");
runMainWithCliArgs(false);
}
@Test
public void schemaInteractiveTerminatedInputTest() throws IOException {
final Path schemaPath = Files.createTempFile("schema", ".json");
schemaPath.toFile().deleteOnExit();
schemaArgs.setOutput(schemaPath.toAbsolutePath().toString());
schemaArgs.setSubMode("--interactive");
final var args = schemaArgs.toList();
args.remove(0);
// user input which ends unexpectedly during interactive CLI session
final var userInput = new ByteArrayInputStream("".getBytes(StandardCharsets.UTF_8));
System.setIn(new BufferedInputStream(userInput));
final int exitCode = new CommandLine(new SchemaMode(mockCleanRoomsDao)).execute(args.toArray(new String[0]));
assertNotEquals(0, exitCode);
assertTrue(schemaPath.toFile().exists());
assertEquals(0, schemaPath.toFile().length());
}
@Test
public void testInvalidModeSetting() {
final ByteArrayOutputStream consoleOutput = new ByteArrayOutputStream();
final PrintStream pErr = new PrintStream(consoleOutput);
System.setErr(pErr);
schemaArgs.setSubMode("--invalidMode");
runMainWithCliArgs(false);
final String expected = "Unknown option: '--invalidMode'";
assertTrue(consoleOutput.toString(StandardCharsets.UTF_8).contains(expected));
}
@Test
public void testMissingModeSettings() {
final ByteArrayOutputStream nullConsoleOutput = new ByteArrayOutputStream();
final PrintStream pNullErr = new PrintStream(nullConsoleOutput);
System.setErr(pNullErr);
assertDoesNotThrow(() -> new CommandLine(new SchemaMode(mockCleanRoomsDao))
.execute("--output=" + schemaArgs.getOutput(), INPUT_CSV_PATH));
assertTrue(nullConsoleOutput.toString(StandardCharsets.UTF_8)
.startsWith("Error: Missing required argument (specify one of these):"
+ " (-t | -i)"));
}
@Test
public void unknownFileFormatTest() throws IOException {
final String schemaUnknownExtensionPath = FileTestUtility.createTempFile("schema", ".unknown").toString();
schemaArgs.setInput(schemaUnknownExtensionPath);
schemaArgs.setFileFormat(null);
runMainWithCliArgs(false);
}
@Test
public void supportedFileFormatFlagCsvTest() {
schemaArgs.setInput(INPUT_CSV_PATH);
schemaArgs.setFileFormat(FileFormat.CSV);
runMainWithCliArgs(true);
}
@Test
public void unsupportedFileFormatFlagTest() throws IOException {
final String schemaUnsupportedExtensionPath = FileTestUtility.createTempFile("schema", ".unsupported").toString();
schemaArgs.setInput(schemaUnsupportedExtensionPath);
schemaArgs.setFileFormat(FileFormat.PARQUET);
runMainWithCliArgs(false);
}
@Test
public void supportedFileFormatFlagParquetTest() {
schemaArgs.setInput(INPUT_PARQUET_PATH);
schemaArgs.setFileFormat(FileFormat.PARQUET);
runMainWithCliArgs(true);
}
@Test
public void noHeadersCsvTest() {
schemaArgs.setInput(INPUT_CSV_PATH);
schemaArgs.setFileFormat(FileFormat.CSV);
schemaArgs.setNoHeaders(true);
runMainWithCliArgs(true);
}
@Test
public void noHeadersParquetTest() {
schemaArgs.setInput(INPUT_PARQUET_PATH);
schemaArgs.setFileFormat(FileFormat.PARQUET);
schemaArgs.setNoHeaders(true);
runMainWithCliArgs(false);
}
@Test
public void testInvalidIdFormat() {
schemaArgs.setInput(INPUT_CSV_PATH);
schemaArgs.setCollaborationId("invalidCollaborationId");
runMainWithCliArgs(false);
}
@Test
public void testValidId() {
schemaArgs.setInput(INPUT_CSV_PATH);
schemaArgs.setCollaborationId(GeneralTestUtility.EXAMPLE_SALT.toString());
runMainWithCliArgs(true);
}
@Test
public void noProfileOrRegionFlagsTest() {
// Ensure that if no profile or region flag are passed, then the CleanRoomsDao are not constructed
// with any explicit values for them (i.e., ensuring the defaults are used)
main = new SchemaMode(mockCleanRoomsDao);
new CommandLine(main).execute(schemaArgs.toArrayWithoutMode());
assertNull(main.getOptionalArgs().getProfile());
assertNull(main.getCleanRoomsDao().getRegion());
}
@Test
public void profileFlagTest() throws IOException {
// Ensure that passing a value via the --profile flag is given to the CleanRoomsDao builder's `profile(..)` method.
final String myProfileName = "my-profile-name";
assertNotEquals(myProfileName, mockCleanRoomsDao.toString());
schemaArgs.setProfile(myProfileName);
schemaArgs.setCollaborationId(UUID.randomUUID().toString());
main = new SchemaMode(mockCleanRoomsDao);
new CommandLine(main).execute(schemaArgs.toArrayWithoutMode());
assertEquals(myProfileName, main.getOptionalArgs().getProfile());
assertEquals(myProfileName, main.getCleanRoomsDao().getProfile());
}
@Test
public void regionFlagTest() {
final String myRegion = "collywobbles";
schemaArgs.setRegion(myRegion);
schemaArgs.setCollaborationId(UUID.randomUUID().toString());
main = new SchemaMode(mockCleanRoomsDao);
new CommandLine(main).execute(schemaArgs.toArrayWithoutMode());
assertEquals(myRegion, main.getOptionalArgs().getRegion());
assertEquals(myRegion, main.getCleanRoomsDao().getRegion());
}
}
| 2,685 |
0 | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/test/java/com/amazonaws/c3r/cleanrooms/CleanRoomsDaoTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cleanrooms;
import org.mockito.stubbing.Answer;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public final class CleanRoomsDaoTestUtility {
/**
* Hidden utility class constructor.
*/
private CleanRoomsDaoTestUtility() {
}
public static CleanRoomsDao generateMockDao() {
final CleanRoomsDao mockCleanRoomsDao = org.mockito.Mockito.mock(CleanRoomsDao.class);
when(mockCleanRoomsDao.withProfile(any())).thenAnswer((Answer<CleanRoomsDao>) (invocation) -> {
when(mockCleanRoomsDao.getProfile()).thenReturn(invocation.getArgument(0));
return mockCleanRoomsDao;
});
when(mockCleanRoomsDao.withRegion(any())).thenAnswer((Answer<CleanRoomsDao>) (invocation) -> {
when(mockCleanRoomsDao.getRegion()).thenReturn(invocation.getArgument(0));
return mockCleanRoomsDao;
});
when(mockCleanRoomsDao.getRegion()).thenCallRealMethod();
return mockCleanRoomsDao;
}
}
| 2,686 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io/schema/SchemaGeneratorUtils.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.schema;
import com.amazonaws.c3r.config.ColumnHeader;
/**
* Common utility functions used by schema generators.
*/
public final class SchemaGeneratorUtils {
/** Hidden utility constructor. */
private SchemaGeneratorUtils() {
}
/**
* Returns a string for user-facing messages which references the specified column.
* I.e., either {@code "column `COLUMN_NAME`"} or {@code "column COLUMN_1BASED_INDEX"}
*
* @param columnHeader The column header (if one exists)
* @param columnIndex The column's 0-based index
* @return A reference string for user facing I/O
*/
public static String columnReference(final ColumnHeader columnHeader, final int columnIndex) {
if (columnHeader != null) {
return "column `" + columnHeader + "`";
} else {
return ColumnHeader.of(columnIndex).toString();
}
}
/**
* Returns a user-facing warning message stating the specified column cannot be encrypted in any way.
*
* @param columnHeader The column header (if one exists)
* @param columnIndex The column's 0-based index
* @return A warning string user facing I/O
*/
public static String unsupportedTypeWarning(final ColumnHeader columnHeader, final int columnIndex) {
final String columnName = columnReference(columnHeader, columnIndex);
return "WARNING: " + columnName + " contains non-string data and cannot be\n" +
" used for cryptographic computing. Any target column(s) generated\n" +
" from this column will be cleartext.";
}
/**
* Returns a user-facing message stating the specified column cannot be encrypted in any way AND is being skipped.
*
* @param columnHeader The column header (if one exists)
* @param columnIndex The column's 0-based index
* @return A warning string user facing I/O
*/
public static String unsupportedTypeSkippingColumnWarning(final ColumnHeader columnHeader, final int columnIndex) {
final String columnName = columnReference(columnHeader, columnIndex);
final var sb = new StringBuilder();
sb.append("WARNING: " + columnName + " contains non-string data and cannot be\n");
sb.append(" used for cryptographic computing. This column is being skipped\n");
sb.append(" because the collaboration does not permit cleartext columns.");
return sb.toString();
}
}
| 2,687 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io/schema/ParquetSchemaGenerator.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.io.ParquetRowReader;
import com.amazonaws.c3r.utils.FileUtil;
import lombok.Builder;
import lombok.NonNull;
/**
* Used to generate a schema file for a specific Parquet file. User can ask for either a simple, autogenerated schema or be walked through
* the entire schema creation process.
*/
public final class ParquetSchemaGenerator extends SchemaGenerator {
/**
* Set up for schema generation and validate settings.
*
* @param inputParquetFile Parquet file to read header information from
* @param targetJsonFile Where to save the schema
* @param overwrite Whether the {@code targetJsonFile} should be overwritten (if it exists)
* @param clientSettings Collaboration's client settings if provided, else {@code null}
* @param binaryAsString If {@code true}, treat unannounced binary values as strings
*/
@Builder
private ParquetSchemaGenerator(@NonNull final String inputParquetFile,
@NonNull final String targetJsonFile,
@NonNull final Boolean overwrite,
final ClientSettings clientSettings,
final Boolean binaryAsString) {
super(inputParquetFile, targetJsonFile, overwrite, clientSettings);
FileUtil.verifyReadableFile(inputParquetFile);
final var reader = ParquetRowReader.builder().sourceName(inputParquetFile).binaryAsString(binaryAsString).build();
sourceHeaders = reader.getHeaders();
sourceColumnTypes = reader.getParquetSchema().getColumnClientDataTypes();
}
}
| 2,688 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io/schema/TemplateSchemaGenerator.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.json.GsonUtil;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import lombok.Builder;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
/**
* Used to create a simple schema without user input. Creates a one-to-one mapping in the output JSON file which the user can then edit to
* select the transform and padding types they would like.
*/
@Slf4j
public final class TemplateSchemaGenerator {
/**
* String for user-facing messaging showing column type options.
*/
private static final String ALL_COLUMN_TYPES = "[" +
Arrays.stream(ColumnType.values())
.map(ColumnType::toString)
.collect(Collectors.joining("|")) +
"]";
/**
* String for user-facing messaging showing column type options.
*/
private static final String ALL_COLUMN_TYPES_SANS_CLEARTEXT = "[" +
Arrays.stream(ColumnType.values())
.filter(c -> c != ColumnType.CLEARTEXT)
.map(ColumnType::toString)
.collect(Collectors.joining("|")) +
"]";
/**
* The contents to be printed for each pad in the output, along with instructions on how to use it.
*/
private static final JsonObject EXAMPLE_PAD;
static {
EXAMPLE_PAD = new JsonObject();
EXAMPLE_PAD.addProperty("COMMENT", "omit this pad entry unless column type is sealed");
EXAMPLE_PAD.addProperty("type", "[none|fixed|max]");
EXAMPLE_PAD.addProperty("length", "omit length property for type none, otherwise specify value in [0, 10000]");
}
/**
* Console output stream.
*/
private final PrintStream consoleOutput;
/**
* Names of the columns in the input data.
*/
private final List<ColumnHeader> headers;
/**
* Number of source columns.
*/
private final int sourceColumnCount;
/**
* Source column types (in the order they appear in the input file).
*/
private final List<ClientDataType> sourceColumnTypes;
/**
* Where to write the schema file.
*/
private final String targetJsonFile;
/**
* Options for column types based on ClientSettings (if provided).
*/
private final String columnTypeOptions;
/**
* Whether this schema can have cleartext columns.
*/
private final boolean allowCleartextColumns;
/**
* Initializes the automated schema generator.
*
* @param sourceHeaders List of column names in the input file
* @param sourceColumnTypes Source column types (in the order they appear in the input file)
* @param targetJsonFile Where to write the schema
* @param consoleOutput Connection to output stream (i.e., output for user)
* @param clientSettings Collaboration's client settings if provided, else {@code null}
* @throws C3rIllegalArgumentException If input sizes are inconsistent
*/
@Builder
private TemplateSchemaGenerator(final List<ColumnHeader> sourceHeaders,
@NonNull final List<ClientDataType> sourceColumnTypes,
@NonNull final String targetJsonFile,
final PrintStream consoleOutput,
final ClientSettings clientSettings) {
if (sourceHeaders != null && sourceHeaders.size() != sourceColumnTypes.size()) {
throw new C3rIllegalArgumentException("Template schema generator given "
+ sourceHeaders.size() + " headers and " + sourceColumnTypes.size() + " column data types.");
}
this.headers = sourceHeaders == null ? null : List.copyOf(sourceHeaders);
this.sourceColumnTypes = sourceColumnTypes;
this.sourceColumnCount = sourceColumnTypes.size();
this.targetJsonFile = targetJsonFile;
this.consoleOutput = (consoleOutput == null) ? new PrintStream(System.out, true, StandardCharsets.UTF_8)
: consoleOutput;
allowCleartextColumns = clientSettings == null || clientSettings.isAllowCleartext();
if (allowCleartextColumns) {
columnTypeOptions = ALL_COLUMN_TYPES;
} else {
columnTypeOptions = ALL_COLUMN_TYPES_SANS_CLEARTEXT;
}
}
/**
* Creates template column schemas from the provided (non-{@code null}) source {@code headers}.
*
* @return The generated template column schemas
*/
private JsonArray generateTemplateColumnSchemasFromSourceHeaders() {
final var columnSchemaArray = new JsonArray(headers.size());
for (int i = 0; i < sourceColumnCount; i++) {
final var header = headers.get(i);
final var entry = new JsonObject();
entry.addProperty("sourceHeader", header.toString());
entry.addProperty("targetHeader", header.toString());
if (sourceColumnTypes.get(i) != ClientDataType.UNKNOWN) {
entry.addProperty("type", columnTypeOptions);
entry.add("pad", EXAMPLE_PAD);
} else if (allowCleartextColumns) {
consoleOutput.println(SchemaGeneratorUtils.unsupportedTypeWarning(header, i));
entry.addProperty("type", ColumnType.CLEARTEXT.toString());
} else {
consoleOutput.println(SchemaGeneratorUtils.unsupportedTypeSkippingColumnWarning(header, i));
continue;
}
columnSchemaArray.add(entry);
}
return columnSchemaArray;
}
/**
* Creates template column schemas for headerless source.
*
* @return The generated template column schemas
*/
private JsonArray generateTemplateColumnSchemasFromColumnCount() {
final var columnSchemaArray = new JsonArray(sourceColumnCount);
for (int i = 0; i < sourceColumnCount; i++) {
// Array template entry will go in
final var entryArray = new JsonArray(1);
// template entry
final var templateEntry = new JsonObject();
templateEntry.addProperty("targetHeader", ColumnHeader.of(i).toString());
if (sourceColumnTypes.get(i) != ClientDataType.UNKNOWN) {
templateEntry.addProperty("type", columnTypeOptions);
templateEntry.add("pad", EXAMPLE_PAD);
entryArray.add(templateEntry);
} else if (allowCleartextColumns) {
templateEntry.addProperty("type", ColumnType.CLEARTEXT.toString());
entryArray.add(templateEntry);
} else {
// If the column type does not support cryptographic computing and cleartext columns are not allowed,
// then we do not add a template entry to the array, and we warn the user this column has been skipped.
consoleOutput.println(SchemaGeneratorUtils.unsupportedTypeSkippingColumnWarning(null, i));
}
columnSchemaArray.add(entryArray);
}
return columnSchemaArray;
}
/**
* Generate a template schema. I.e., the type (see {@link com.amazonaws.c3r.config.ColumnType}) and padding
* (see {@link com.amazonaws.c3r.config.PadType}) are left with all possible options and must be manually edited.
*
* @throws C3rRuntimeException If unable to write to the target file
*/
public void run() {
final var schemaContent = new JsonObject();
if (headers != null) {
schemaContent.addProperty("headerRow", true);
schemaContent.add("columns", generateTemplateColumnSchemasFromSourceHeaders());
} else {
schemaContent.addProperty("headerRow", false);
schemaContent.add("columns", generateTemplateColumnSchemasFromColumnCount());
}
try (BufferedWriter writer = Files.newBufferedWriter(Path.of(targetJsonFile), StandardCharsets.UTF_8)) {
writer.write(GsonUtil.toJson(schemaContent));
} catch (IOException e) {
throw new C3rRuntimeException("Could not write to target schema file.", e);
}
log.info("Template schema written to {}.", targetJsonFile);
log.info("Schema requires manual modification before use:");
log.info(" * Types for each column must be selected.");
log.info(" * Pad entry must be modified for each sealed column and removed for other column types.");
log.info("Resulting schema must be valid JSON (e.g., final entries in objects have no trailing comma, etc).");
}
}
| 2,689 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io/schema/CsvSchemaGenerator.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.data.CsvValue;
import com.amazonaws.c3r.io.CsvRowReader;
import com.amazonaws.c3r.utils.FileUtil;
import lombok.Builder;
import lombok.Getter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import java.util.Collections;
/**
* Used to generate a schema file for a specific CSV file. User can ask for either a simple, autogenerated schema or be walked through
* the entire schema creation process.
*/
@Slf4j
public final class CsvSchemaGenerator extends SchemaGenerator {
/**
* How many columns are in the source file.
*/
@Getter
private final int sourceColumnCount;
/**
* CSV file to generate a schema for.
*/
private final String inputCsvFile;
/**
* Schema file location.
*/
private final String targetJsonFile;
/**
* Set up for schema generation and validate settings.
*
* @param inputCsvFile CSV file to read header information from
* @param targetJsonFile Where to save the schema
* @param overwrite If the {@code targetJsonFile} should be overwritten if it exists
* @param hasHeaders Does the first source row contain column headers?
* @param clientSettings Collaboration's client settings if provided, else {@code null}
*/
@Builder
private CsvSchemaGenerator(@NonNull final String inputCsvFile,
@NonNull final String targetJsonFile,
@NonNull final Boolean overwrite,
@NonNull final Boolean hasHeaders,
final ClientSettings clientSettings) {
super(inputCsvFile, targetJsonFile, overwrite, clientSettings);
this.inputCsvFile = inputCsvFile;
this.targetJsonFile = targetJsonFile;
FileUtil.initFileIfNotExists(targetJsonFile);
if (hasHeaders) {
final CsvRowReader reader = CsvRowReader.builder()
.sourceName(inputCsvFile)
.build();
sourceHeaders = reader.getHeaders();
sourceColumnCount = sourceHeaders.size();
reader.close();
} else {
sourceColumnCount = CsvRowReader.getCsvColumnCount(inputCsvFile, null);
sourceHeaders = null;
}
this.sourceColumnTypes = Collections.nCopies(sourceColumnCount, CsvValue.CLIENT_DATA_TYPE);
}
}
| 2,690 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io/schema/SchemaGenerator.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.schema;
import com.amazonaws.c3r.cli.SchemaMode;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.utils.FileUtil;
import lombok.Getter;
import lombok.NonNull;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.List;
/**
* Helps generate a schema for a file with a supported data format.
*/
public abstract class SchemaGenerator {
/**
* The headers for the source file, or {@code null} if the file has none.
*/
@Getter
protected List<ColumnHeader> sourceHeaders;
/**
* Column types for the source file.
*/
@Getter
protected List<ClientDataType> sourceColumnTypes;
/**
* The file a schema will be generated for.
*/
private final String inputFile;
/**
* The location the generated schema will be stored.
*/
private final String targetJsonFile;
/**
* Clean room cryptographic settings.
*/
private final ClientSettings clientSettings;
/**
* Setup common schema generator component.
*
* @param inputFile Input data file for processing
* @param targetJsonFile Schema file mapping input to output file data
* @param overwrite Whether to overwrite the output file if it already exists
* @param clientSettings Collaboration settings if available, else {@code null}
*/
protected SchemaGenerator(@NonNull final String inputFile,
@NonNull final String targetJsonFile,
@NonNull final Boolean overwrite,
final ClientSettings clientSettings) {
this.inputFile = inputFile;
this.targetJsonFile = targetJsonFile;
validate(overwrite);
FileUtil.initFileIfNotExists(targetJsonFile);
this.clientSettings = clientSettings;
}
/**
* Verifies that input and target files have appropriate permissions.
*
* @param overwrite If the target JSON file can overwrite an existing file
*/
private void validate(final boolean overwrite) {
FileUtil.verifyReadableFile(inputFile);
FileUtil.verifyWritableFile(targetJsonFile, overwrite);
}
/**
* Generate a schema file.
*
* @param subMode How the schema file should be generated
* @throws C3rIllegalArgumentException If the schema generation mode is invalid
*/
public void generateSchema(final SchemaMode.SubMode subMode) {
// CHECKSTYLE:OFF
System.out.println();
System.out.println("A schema file will be generated for file " + inputFile + ".");
// CHECKSTYLE:ON
if (subMode.isInteractiveMode()) {
InteractiveSchemaGenerator.builder()
.sourceHeaders(getSourceHeaders())
.sourceColumnTypes(getSourceColumnTypes())
.targetJsonFile(targetJsonFile)
.consoleInput(new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8)))
.consoleOutput(System.out)
.clientSettings(clientSettings)
.build()
.run();
} else if (subMode.isTemplateMode()) {
TemplateSchemaGenerator.builder()
.sourceHeaders(getSourceHeaders())
.sourceColumnTypes(getSourceColumnTypes())
.targetJsonFile(targetJsonFile)
.clientSettings(clientSettings)
.build()
.run();
} else {
throw new C3rIllegalArgumentException("Schema generation mode must be interactive or template.");
}
}
} | 2,691 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io/schema/InteractiveSchemaGenerator.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.Pad;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.config.PositionalTableSchema;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Limits;
import com.amazonaws.c3r.internal.PadUtil;
import com.amazonaws.c3r.json.GsonUtil;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.Builder;
import lombok.NonNull;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.Supplier;
import java.util.stream.Collectors;
/**
* Walks the user through creating a customized schema for their data.
*/
public final class InteractiveSchemaGenerator {
/**
* Column schemas generated by user input are stored here in a list of lists
* like a {@link PositionalTableSchema} during processing. When the user is done providing input,
* this list of lists is either used to create a {@link PositionalTableSchema} or is flattened
* and used to create a {@link MappedTableSchema}, depending on the type of schema being generated.
*
* <p>
* NOTE: The length of this list during user-interaction directly corresponds to the 0-based
* index of the source column schemas being generated at that moment.
*/
private final List<List<ColumnSchema>> generatedColumnSchemas;
/**
* Column header names already used for target column headers to prevent duplicates.
*/
private final Set<ColumnHeader> usedColumnHeaders;
/**
* Source headers from the input file or {@code null} if the file has no headers.
*/
private final List<ColumnHeader> headers;
/**
* Number of source columns.
*/
private final int sourceColumnCount;
/**
* Number of columns that cannot be supported (e.g., if the column cannot be
* encrypted and cleartext columns are disallowed).
*/
private int unsupportedTypeColumnCount;
/**
* Source column types (in the order they appear in the input file).
*/
private final List<ClientDataType> sourceColumnTypes;
/**
* JSON file schema will be written to.
*/
private final String targetJsonFile;
/**
* Console input from user.
*/
private final BufferedReader consoleInput;
/**
* Console output stream.
*/
private final PrintStream consoleOutput;
/**
* Whether cleartext columns possible for this schema.
*/
private final boolean allowCleartextColumns;
/**
* Sets up the schema generator to run in interactive mode. Makes I/O connections to console, processes header information and
* initializes preprocessing state.
*
* @param sourceHeaders Column names in data file if they exist, otherwise {@code null}
* @param sourceColumnTypes The column types in the file in the order they appear
* @param targetJsonFile Where schema should be written
* @param consoleInput Connection to input stream (i.e., input from user)
* @param consoleOutput Connection to output stream (i.e., output for user)
* @param clientSettings Collaboration's client settings if provided, else {@code null}
* @throws C3rIllegalArgumentException If input sizes are inconsistent
*/
@Builder
@SuppressFBWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
private InteractiveSchemaGenerator(final List<ColumnHeader> sourceHeaders,
@NonNull final List<ClientDataType> sourceColumnTypes,
@NonNull final String targetJsonFile,
final BufferedReader consoleInput,
final PrintStream consoleOutput,
final ClientSettings clientSettings) {
if (sourceHeaders != null && sourceHeaders.size() != sourceColumnTypes.size()) {
throw new C3rIllegalArgumentException("Interactive schema generator given " + sourceHeaders.size() + " headers and " +
sourceColumnTypes.size() + " column data types.");
}
this.headers = sourceHeaders == null ? null : List.copyOf(sourceHeaders);
this.sourceColumnTypes = sourceColumnTypes;
this.sourceColumnCount = sourceColumnTypes.size();
this.unsupportedTypeColumnCount = 0;
this.targetJsonFile = targetJsonFile;
this.consoleInput = (consoleInput == null)
? new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8))
: consoleInput;
this.consoleOutput = (consoleOutput == null) ? new PrintStream(System.out, true, StandardCharsets.UTF_8)
: consoleOutput;
this.allowCleartextColumns = clientSettings == null || clientSettings.isAllowCleartext();
generatedColumnSchemas = new ArrayList<>();
usedColumnHeaders = new HashSet<>();
}
/**
* Whether the source file has headers.
*
* @return {@code true} if the source file has headers, else {@code false}.
*/
private boolean hasHeaders() {
return headers != null;
}
/**
* Has the user create the schema and writes it to a file. Also does some validation on the created schema such as at least one output
* column was specified.
*
* @throws C3rRuntimeException If an I/O error occurs opening or creating the file
*/
public void run() {
if (!allowCleartextColumns) {
consoleOutput.println();
consoleOutput.println("NOTE: Cleartext columns are not permitted for this collaboration");
consoleOutput.println(" and will not be provided as an option in prompts.");
}
generateColumns();
final List<ColumnSchema> flattenedColumnSchemas = generatedColumnSchemas.stream()
.flatMap(List::stream)
.collect(Collectors.toList());
if (flattenedColumnSchemas.isEmpty()) {
if (unsupportedTypeColumnCount >= sourceColumnCount) {
consoleOutput.println("No source columns could be considered for output:");
consoleOutput.println(" all columns were of an unsupported type and the");
consoleOutput.println(" specified collaboration does not allow cleartext.");
} else {
consoleOutput.println("No target columns were specified.");
}
return;
}
final TableSchema schema;
if (hasHeaders()) {
schema = new MappedTableSchema(flattenedColumnSchemas);
} else {
schema = new PositionalTableSchema(generatedColumnSchemas);
}
try (BufferedWriter writer = Files.newBufferedWriter(Path.of(targetJsonFile), StandardCharsets.UTF_8)) {
writer.write(GsonUtil.toJson(schema));
} catch (IOException e) {
throw new C3rRuntimeException("Could not write to target schema file.", e);
}
consoleOutput.println("Schema written to " + targetJsonFile + ".");
}
/**
* The current source column index target columns are being generated from.
*
* @return The current positional zero-based source index.
*/
private int getCurrentSourceColumnPosition() {
return generatedColumnSchemas.size();
}
/**
* The current source column's client data type (how the data is represented).
*
* @return The client data type for the current source column.
*/
private ClientDataType getCurrentSourceColumnDataType() {
return sourceColumnTypes.get(getCurrentSourceColumnPosition());
}
/**
* Gets the next line of text from the user and converts it to lowercase.
*
* @return Normalized user input
* @throws C3rRuntimeException If there's an unexpected end of user input
*/
private String readNextLineLowercase() {
try {
final String nextLine = consoleInput.readLine();
if (nextLine == null) {
throw new C3rRuntimeException("Unexpected end of user input.");
}
return nextLine.toLowerCase();
} catch (IOException e) {
throw new C3rRuntimeException("Unexpected end of user input.", e);
}
}
/**
* Prompt the user for a non-negative integer value.
*
* @param baseUserPrompt User prompt, sans any default value or ending question mark
* @param defaultValue What is the default user response they can leverage by simply
* pressing `return` with no entered text. {@code defaultValue == null}
* implies there is no default value
* @param maxValue The maximum allowed value
* @return The user chosen value via the interaction, or {@code null} if no acceptable user input was found
*/
Integer promptNonNegativeInt(final String baseUserPrompt,
final Integer defaultValue,
final int maxValue) {
final var promptSB = new StringBuilder(baseUserPrompt);
if (defaultValue != null) {
promptSB.append(" (default `").append(defaultValue).append("`)");
}
promptSB.append("? ");
consoleOutput.print(promptSB);
final int num;
final String userInput = readNextLineLowercase();
try {
num = Integer.parseInt(userInput);
} catch (NumberFormatException e) {
if (userInput.isBlank()) {
if (defaultValue == null) {
consoleOutput.println("Expected an integer >= 0, but found no input.");
}
return defaultValue;
} else {
consoleOutput.println("Expected an integer >= 0, but found `" + userInput + "`.");
return null;
}
}
if (num < 0) {
consoleOutput.println("Expected an integer >= 0, but found " + num + ".");
return null;
} else if (num > maxValue) {
consoleOutput.println("Expected an integer >= 0 and < " + maxValue + ".");
return null;
}
return num;
}
/**
* Ask a user the {@code questionPrompt}, followed by a comma and [y]es or [n]o, and parse their response.
*
* @param questionPrompt What to print before `, [y]es or [n]o?`
* @param defaultAnswer A default answer for this prompt, or {@code null} if there is none.
* @return {@code true} if `yes`, {@code false} if `no`, {@code null} otherwise.
*/
Boolean promptYesOrNo(final String questionPrompt, final Boolean defaultAnswer) {
final var promptSB = new StringBuilder(questionPrompt).append(", [y]es or [n]o");
if (defaultAnswer != null) {
if (defaultAnswer) {
promptSB.append(" (default `yes`)");
} else {
promptSB.append(" (default `no`)");
}
}
promptSB.append("? ");
consoleOutput.print(promptSB);
final String userInput = readNextLineLowercase();
final Boolean answer;
if (userInput.isBlank()) {
if (defaultAnswer != null) {
answer = defaultAnswer;
} else {
consoleOutput.println("Expected [y]es or [n]o, but found no input.");
answer = null;
}
} else if ("yes".startsWith(userInput)) {
answer = true;
} else if ("no".startsWith(userInput)) {
answer = false;
} else {
consoleOutput.println("Expected [y]es or [n]o, but got `" + userInput + "`.");
answer = null;
}
return answer;
}
/**
* Attempt to read a ColumnType.
*
* @return The ColumnType if successful, or {@code null} if the input was invalid
*/
ColumnType promptColumnType() {
final ColumnType type;
if (allowCleartextColumns) {
consoleOutput.print("Target column type: [c]leartext, [f]ingerprint, or [s]ealed? ");
} else {
consoleOutput.print("Target column type: [f]ingerprint, or [s]ealed? ");
}
final String userInput = readNextLineLowercase();
if (userInput.isBlank()) {
consoleOutput.println("Expected a column type, but found no input.");
type = null;
} else if (allowCleartextColumns && "cleartext".startsWith(userInput)) {
type = ColumnType.CLEARTEXT;
} else if ("fingerprint".startsWith(userInput)) {
type = ColumnType.FINGERPRINT;
} else if ("sealed".startsWith(userInput)) {
type = ColumnType.SEALED;
} else {
consoleOutput.println("Expected a valid column type, but got `" + userInput + "`.");
type = null;
}
return type;
}
/**
* Repeat an action until it is non-{@code null}, e.g. for repeating requests for valid input.
*
* @param supplier Function that supplies the (eventually) non-null value.
* @param <T> The type of value to be returned by the supplier.
* @return The non-{@code null} value eventually returned by the supplier.
*/
static <T> T repeatUntilNotNull(final Supplier<T> supplier) {
T result = null;
while (result == null) {
result = supplier.get();
}
return result;
}
/**
* Suggest a suffix for the output column name based on the transform between input and output data selected.
*
* @param columnType The data transform type that will be used (see {@link ColumnType})
* @return The selected suffix for the column name
*/
String promptTargetHeaderSuffix(@NonNull final ColumnType columnType) {
final String suggestedSuffix;
switch (columnType) {
case SEALED:
suggestedSuffix = ColumnHeader.DEFAULT_SEALED_SUFFIX;
break;
case FINGERPRINT:
suggestedSuffix = ColumnHeader.DEFAULT_FINGERPRINT_SUFFIX;
break;
default:
// no suffix for cleartext columns
suggestedSuffix = null;
break;
}
final String suffix;
if (suggestedSuffix != null) {
final String prompt = "Add suffix `"
+ suggestedSuffix + "` to header to indicate how it was encrypted";
final boolean addSuffix = repeatUntilNotNull(() ->
promptYesOrNo(prompt, true));
suffix = addSuffix ? suggestedSuffix : null;
} else {
suffix = null;
}
return suffix;
}
/**
* Ask the user what they would like the column name in the output file to be. The default is the same as the input name. This is not
* yet suggesting a suffix be added based off of encryption type.
*
* @param sourceHeader Input column name
* @return Output column name
* @throws C3rRuntimeException If there's an unexpected end of user input
*/
private ColumnHeader promptTargetHeaderPreSuffix(final ColumnHeader sourceHeader) {
final String input;
final ColumnHeader targetHeader;
if (sourceHeader != null) {
consoleOutput.print("Target column header name (default `" + sourceHeader + "`)? ");
} else {
consoleOutput.print("Target column header name? ");
}
try {
// We intentionally do not use readNextLineLowercase() here so that we can check if the
// string was normalized and report it to the user for their awareness (see below).
input = consoleInput.readLine();
if (input != null && input.isBlank() && sourceHeader != null) {
consoleOutput.println("Using default name `" + sourceHeader + "`.");
targetHeader = sourceHeader;
} else {
targetHeader = new ColumnHeader(input);
}
} catch (C3rIllegalArgumentException e) {
consoleOutput.println("Expected a valid header name, but found a problem: " + e.getMessage());
return null;
} catch (IOException e) {
throw new C3rRuntimeException("Unexpected end of user input.", e);
}
if (!targetHeader.toString().equals(input) && targetHeader != sourceHeader) {
consoleOutput.println("Target header was normalized to `" + targetHeader + "`.");
}
return targetHeader;
}
/**
* Walks the user through the entire process of choosing an output column name, from the base name in
* {@link #promptTargetHeaderPreSuffix} to the suffix in {@link #promptTargetHeaderSuffix}.
*
* @param sourceHeader Name of the input column
* @param type Type of cryptographic transform being done
* @return Complete name for target column
*/
private ColumnHeader promptTargetHeaderAndSuffix(
final ColumnHeader sourceHeader,
@NonNull final ColumnType type) {
// Ask the user for a header name
final ColumnHeader targetHeader = promptTargetHeaderPreSuffix(sourceHeader);
if (targetHeader == null) {
return null;
}
// Check if the user wants a type-based suffix, if applicable.
final String suffix = promptTargetHeaderSuffix(type);
if (suffix != null) {
try {
return new ColumnHeader(targetHeader + suffix);
} catch (C3rIllegalArgumentException e) {
consoleOutput.println("Unable to add header suffix: " + e.getMessage());
return null;
}
} else {
return targetHeader;
}
}
/**
* Gets the desired output header and verifies it does not match a name already specified.
*
* @param sourceHeader Name of input column
* @param type Encryption transform selected
* @return Name of the output column
*/
ColumnHeader promptTargetHeader(final ColumnHeader sourceHeader,
@NonNull final ColumnType type) {
final ColumnHeader targetHeader = promptTargetHeaderAndSuffix(sourceHeader, type);
if (usedColumnHeaders.contains(targetHeader)) {
consoleOutput.println("Expected a unique target header, but `" + targetHeader + "` has already been used in this schema.");
return null;
} else {
usedColumnHeaders.add(targetHeader);
}
return targetHeader;
}
/**
* If the user chose {@link ColumnType#SEALED} as the transform type, ask what kind of data padding should be used, if any.
*
* @param targetHeader Output column name
* @param defaultType Default type of padding to use if the user doesn't specify an option
* @return Type of padding to use for output column
*/
PadType promptPadType(@NonNull final ColumnHeader targetHeader, final PadType defaultType) {
final PadType type;
consoleOutput.print("`" + targetHeader + "` padding type: [n]one, [f]ixed, or [m]ax");
if (defaultType != null) {
consoleOutput.print(" (default `" + defaultType.toString().toLowerCase() + "`)");
}
consoleOutput.print("? ");
final String userInput = readNextLineLowercase();
if (userInput.isBlank()) {
if (defaultType == null) {
consoleOutput.println("Expected a padding type, but found no input.");
}
type = defaultType;
} else if ("none".startsWith(userInput)) {
type = PadType.NONE;
} else if ("fixed".startsWith(userInput)) {
type = PadType.FIXED;
} else if ("max".startsWith(userInput)) {
type = PadType.MAX;
} else {
consoleOutput.println("Expected a valid padding type, but got `" + userInput + "`.");
type = null;
}
return type;
}
/**
* Get the type of padding to be used (see {@link PadType}) and length if the user chose {@link ColumnType#SEALED}.
*
* @param targetHeader Name of the output column
* @return Pad type and length
* @see PadType
* @see Pad
*/
Pad promptPad(@NonNull final ColumnHeader targetHeader) {
final PadType padType = repeatUntilNotNull(() ->
promptPadType(targetHeader, PadType.MAX)
);
if (padType == PadType.NONE) {
return Pad.DEFAULT;
}
final String basePrompt;
final Integer defaultLength;
if (padType == PadType.FIXED) {
defaultLength = null;
basePrompt = "Byte-length to pad cleartext to in `" + targetHeader + "`";
} else {
// padType == PadType.MAX
defaultLength = 0;
consoleOutput.println("All values in `" + targetHeader + "` will be padded to the byte-length of the");
consoleOutput.println("longest value plus a specified number of additional padding bytes.");
basePrompt = "How many additional padding bytes should be used";
}
final int length = repeatUntilNotNull(() ->
promptNonNegativeInt(basePrompt, defaultLength, PadUtil.MAX_PAD_BYTES)
);
return Pad.builder().type(padType).length(length).build();
}
/**
* Prompt for all column info to generate a target column.
*
* @param sourceHeader Source column target is derived from
* @param currentTargetColumnCount This is column `N` of {@code totalTargetColumnCount}
* being generated from {@code sourceHeader}
* @param totalTargetColumnCount Total number of columns being generated from {@code sourceHeader}.
* @return The user-provided column specification.
*/
ColumnSchema promptColumnInfo(final ColumnHeader sourceHeader,
final int currentTargetColumnCount,
final int totalTargetColumnCount) {
consoleOutput.println();
consoleOutput.print("Gathering information for target column ");
if (totalTargetColumnCount > 1) {
consoleOutput.print(currentTargetColumnCount + " of " + totalTargetColumnCount + " ");
}
final String columnRef = SchemaGeneratorUtils.columnReference(sourceHeader, getCurrentSourceColumnPosition());
consoleOutput.println("from source " + columnRef + ".");
final ClientDataType dataType = getCurrentSourceColumnDataType();
final ColumnType columnType;
if (dataType == ClientDataType.UNKNOWN) {
consoleOutput.println("Cryptographic computing is not supported for this column's data type.");
consoleOutput.println("This column's data will be cleartext.");
columnType = ColumnType.CLEARTEXT;
} else {
columnType = repeatUntilNotNull(this::promptColumnType);
}
final ColumnHeader targetHeader = repeatUntilNotNull(() -> promptTargetHeader(sourceHeader, columnType));
ColumnSchema.ColumnSchemaBuilder columnBuilder = ColumnSchema.builder()
.sourceHeader(sourceHeader)
.targetHeader(targetHeader)
.type(columnType);
if (columnType == ColumnType.SEALED) {
final Pad pad = repeatUntilNotNull(() -> promptPad(targetHeader));
columnBuilder = columnBuilder.pad(pad);
}
return columnBuilder.build();
}
/**
* Asks how many times this column will be mapped to output data. A one-to-one mapping is not assumed because multiple transform types
* may be used.
*
* @param sourceHeader Name of the input column
*/
void generateTargetColumns(final ColumnHeader sourceHeader) {
final String columnReference = SchemaGeneratorUtils.columnReference(sourceHeader, getCurrentSourceColumnPosition());
final int defaultTargetColumnCount = 1;
consoleOutput.println("\nExamining source " + columnReference + ".");
final boolean isSupportedType = getCurrentSourceColumnDataType() != ClientDataType.UNKNOWN;
final int targetColumnCount;
if (isSupportedType || allowCleartextColumns) {
if (!isSupportedType) {
// Warn that this column can only appear as cleartext
consoleOutput.println(SchemaGeneratorUtils.unsupportedTypeWarning(sourceHeader, getCurrentSourceColumnPosition()));
}
targetColumnCount = repeatUntilNotNull(() ->
promptNonNegativeInt(
"Number of target columns from source " + columnReference,
defaultTargetColumnCount,
Limits.ENCRYPTED_OUTPUT_COLUMN_COUNT_MAX));
} else {
// This column cannot even appear as cleartext because of collaboration settings,
// so warn that it will be skipped
consoleOutput.println(SchemaGeneratorUtils.unsupportedTypeSkippingColumnWarning(
sourceHeader,
getCurrentSourceColumnPosition()));
unsupportedTypeColumnCount++;
targetColumnCount = 0;
}
// schemas derived from the current source column are stored in this array
final var targetSchemasFromSourceColumn = new ArrayList<ColumnSchema>(targetColumnCount);
// 1-based indices since `i` is only used really to count and print user messages if `targetColumnCount > 1`
// and `1 of N` looks better than `0 of N-1` in printed messages.
for (int i = 1; i <= targetColumnCount; i++) {
targetSchemasFromSourceColumn.add(promptColumnInfo(sourceHeader, i, targetColumnCount));
}
generatedColumnSchemas.add(targetSchemasFromSourceColumn);
}
/**
* Ask the user how to map each input column to output data until all columns have been processed.
*/
private void generateColumns() {
if (headers != null) {
for (var header : headers) {
generateTargetColumns(header);
}
} else {
for (int i = 0; i < sourceColumnCount; i++) {
generateTargetColumns(null);
}
}
}
}
| 2,692 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/io/schema/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Utilities to create either a simple schema for how input data will be mapped to output data and a helper program for users
* who want to be walked through creating a complete schema for their data.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.io.schema; | 2,693 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/utils/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Utility classes that contain commonly used functionality across components.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.utils; | 2,694 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/utils/C3rCliProperties.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.utils;
import com.amazonaws.c3r.cli.CliDescriptions;
import software.amazon.awssdk.core.ApiName;
/**
* C3R CLI properties.
*/
public final class C3rCliProperties {
/**
* User agent for the C3R CLI.
*/
public static final ApiName API_NAME = ApiName.builder()
.name(CliDescriptions.APP_NAME)
.version(C3rSdkProperties.VERSION)
.build();
/**
* Hidden utility class constructor.
*/
private C3rCliProperties() {
}
}
| 2,695 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/cli/CliDescriptions.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
/**
* CliDescriptions contains the help mode description for all CLI parameters, so they are consistently described across classes.
*/
public final class CliDescriptions {
/**
* Name of the application.
*/
public static final String APP_NAME = "c3r-cli";
/**
* Description of AWS profile.
*/
public static final String AWS_PROFILE_DESCRIPTION = "AWS CLI profile for credentials and config (uses AWS "
+ " SDK default if omitted)";
/**
* Description of AWS region.
*/
public static final String AWS_REGION_DESCRIPTION = "AWS region for API requests (uses AWS SDK default if omitted)";
/**
* Description of how to allow for custom CSV values in the input file.
*/
public static final String ENCRYPT_CSV_INPUT_NULL_VALUE_DESCRIPTION = "Value representing how NULL is encoded in the input CSV data " +
"(unquoted blank values and empty quotes are all interpreted as NULL (e.g., `,,`, `, ,` and `,\"\",`) by default)";
/**
* Description of how to allow for custom CSV NULL values in the encrypted output file.
*/
public static final String ENCRYPT_CSV_OUTPUT_NULL_VALUE_DESCRIPTION = "The encoding of cleartext NULL values in the output file " +
"(encrypted NULLs are encoded unambiguously, cleartext values default to the empty value `,,`)";
/**
* Description of how to allow for custom CSV NULL value interpretation in the encrypted input file.
*/
public static final String DECRYPT_CSV_INPUT_NULL_VALUE_DESCRIPTION = "Value representing how the cleartext NULL value is encoded in" +
" the input CSV data (defaults to `,,` for cleartext fields is interpreted as NULL as encrypted NULLs are encoded " +
"unambiguously)";
/**
* Description of how to allow for custom CSV Null values in the decrypted output file.
*/
public static final String DECRYPT_CSV_OUTPUT_NULL_VALUE_DESCRIPTION = "How a cleartext NULL value is encoded in the output file " +
"(defaults to the empty value `,,`)";
/**
* Description of how to process primitive Parquet Binary values as strings if they have no logical annotations.
*/
public static final String PARQUET_BINARY_AS_STRING = "Treat primitive Parquet Binary types without logical annotations " +
"as if they had the string annotation.";
/**
* Explanation of dry run mode.
*/
public static final String DRY_RUN_DESCRIPTION = "Check settings and files to verify configuration is valid but skip processing " +
"the input file";
/**
* Explanation and warnings about enabling stack traces.
*/
public static final String ENABLE_STACKTRACE_DESCRIPTION = "Enable stack traces (WARNING: stack traces may contain sensitive info)";
/**
* List of acceptable file formats that can be specified.
*/
public static final String FILE_FORMAT_DESCRIPTION = "File format of <input>: ${COMPLETION-CANDIDATES}";
/**
* Explanation of allowing Fingerprint columns to pass through for debugging.
*/
public static final String FAIL_ON_FINGERPRINT_COLUMNS_DESCRIPTION = "Fail when encountering a fingerprint column during decryption " +
"(disabled by default)";
/**
* Setting that allows for overwriting a file.
*/
public static final String OVERWRITE_DESCRIPTION = "If output file exists, overwrite the file";
/**
* Description of how to set where temporary files are stored.
*/
public static final String TEMP_DIR_DESCRIPTION = "Location where temp files may be created while processing the input " +
"(defaults to the system temp directory)";
/**
* Setting for collaboration ID.
*/
public static final String ID_DESCRIPTION = "Unique identifier for the collaboration. " +
"Follows the pattern [0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}";
/**
* Description of data input source setting (note: CSV not specified in anticipation of future formats).
*/
public static final String INPUT_DESCRIPTION_CRYPTO = "Data to be processed";
/**
* Description of schema data source.
*/
public static final String INPUT_DESCRIPTION_SCHEMA = "Tabular file used for schema generation";
/**
* Description of output file naming when using CSV files.
*/
public static final String OUTPUT_DESCRIPTION_CRYPTO = "Output file name (defaults to `<input>.out`)";
/**
* Description of output file naming for schema creation.
*/
public static final String OUTPUT_DESCRIPTION_SCHEMA = "Output file name (defaults to `<input>`.json)";
/**
* Schema file location.
*/
public static final String SCHEMA_DESCRIPTION = "JSON file specifying table transformations";
/**
* Hidden constructor since this is a utility class.
*/
private CliDescriptions() {
}
} | 2,696 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/cli/DecryptMode.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.action.CsvRowUnmarshaller;
import com.amazonaws.c3r.action.ParquetRowUnmarshaller;
import com.amazonaws.c3r.action.RowUnmarshaller;
import com.amazonaws.c3r.config.DecryptConfig;
import com.amazonaws.c3r.data.CsvValue;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.utils.C3rSdkProperties;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import picocli.CommandLine;
import javax.crypto.SecretKey;
import java.util.UUID;
import java.util.concurrent.Callable;
import static com.amazonaws.c3r.cli.Main.generateCommandLine;
import static com.amazonaws.c3r.encryption.keys.KeyUtil.KEY_ENV_VAR;
/**
* Supports decrypting query results from an AWS Clean Rooms collaboration for analysis.
*/
@Slf4j
@Getter
@CommandLine.Command(name = "decrypt",
mixinStandardHelpOptions = true,
version = C3rSdkProperties.VERSION,
descriptionHeading = "%nDescription:%n",
description = "Decrypt data content derived from an AWS Clean Rooms collaboration.")
public class DecryptMode implements Callable<Integer> {
/**
* Required command line arguments.
*/
@Getter
static class RequiredArgs {
/**
* {@value CliDescriptions#INPUT_DESCRIPTION_CRYPTO}.
*/
@picocli.CommandLine.Parameters(description = CliDescriptions.INPUT_DESCRIPTION_CRYPTO,
paramLabel = "<input>")
private String input = null;
/**
* {@value CliDescriptions#ID_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--id"},
description = CliDescriptions.ID_DESCRIPTION,
paramLabel = "<value>",
required = true)
private UUID id = null;
}
/**
* Required values as specified by the user.
*/
@CommandLine.ArgGroup(multiplicity = "1", exclusive = false, heading = "%nRequired parameters:%n")
private RequiredArgs requiredArgs = new RequiredArgs();
/**
* Optional command line arguments.
*/
@Getter
private static class OptionalArgs {
/**
* {@value CliDescriptions#FILE_FORMAT_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--fileFormat", "-e"},
description = CliDescriptions.FILE_FORMAT_DESCRIPTION,
paramLabel = "<format>")
private FileFormat fileFormat = null;
/**
* {@value CliDescriptions#OUTPUT_DESCRIPTION_CRYPTO}.
*/
@CommandLine.Option(names = {"--output", "-o"},
description = CliDescriptions.OUTPUT_DESCRIPTION_CRYPTO,
paramLabel = "<dir>")
private String output = null;
/**
* {@value CliDescriptions#OVERWRITE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--overwrite", "-f"},
description = CliDescriptions.OVERWRITE_DESCRIPTION)
private boolean overwrite = false;
/**
* {@value CliDescriptions#DECRYPT_CSV_INPUT_NULL_VALUE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--csvInputNULLValue", "-r"},
description = CliDescriptions.DECRYPT_CSV_INPUT_NULL_VALUE_DESCRIPTION,
paramLabel = "<value>")
private String csvInputNullValue = null;
/**
* {@value CliDescriptions#DECRYPT_CSV_OUTPUT_NULL_VALUE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--csvOutputNULLValue", "-w"},
description = CliDescriptions.DECRYPT_CSV_OUTPUT_NULL_VALUE_DESCRIPTION,
paramLabel = "<value>")
private String csvOutputNullValue = null;
/**
* {@value CliDescriptions#FAIL_ON_FINGERPRINT_COLUMNS_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--failOnFingerprintColumns", "--fof"},
description = CliDescriptions.FAIL_ON_FINGERPRINT_COLUMNS_DESCRIPTION)
private boolean failOnFingerprintColumns = false;
/**
* {@value CliDescriptions#DRY_RUN_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--dryRun", "-n"},
description = CliDescriptions.DRY_RUN_DESCRIPTION)
private boolean dryRun = false;
/**
* {@value CliDescriptions#ENABLE_STACKTRACE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--enableStackTraces", "-v"},
description = CliDescriptions.ENABLE_STACKTRACE_DESCRIPTION)
private boolean enableStackTraces = false;
}
/**
* Optional values as specified by the user.
*/
@CommandLine.ArgGroup(exclusive = false, heading = "%nOptional parameters:%n")
private OptionalArgs optionalArgs = new OptionalArgs();
/**
* Return a CLI instance for decryption.
*
* <p>
* Note: {@link #getApp} is the intended method for manually creating this class
* with the appropriate CLI settings.
*/
DecryptMode() {
}
/**
* Get the decryption mode command line application with standard CLI settings.
*
* @return CommandLine interface for `decrypt` mode
*/
public static CommandLine getApp() {
return generateCommandLine(new DecryptMode());
}
/**
* Get all configuration settings for the current dataset.
*
* @return Information needed to decrypt dataset
*/
public DecryptConfig getConfig() {
final SecretKey keyMaterial = KeyUtil.sharedSecretKeyFromString(System.getenv(KEY_ENV_VAR));
return DecryptConfig.builder()
.sourceFile(requiredArgs.getInput())
.fileFormat(optionalArgs.fileFormat)
.targetFile(optionalArgs.output)
.overwrite(optionalArgs.overwrite)
.csvInputNullValue(optionalArgs.csvInputNullValue)
.csvOutputNullValue(optionalArgs.csvOutputNullValue)
.secretKey(keyMaterial)
.salt(requiredArgs.getId().toString())
.failOnFingerprintColumns(optionalArgs.failOnFingerprintColumns)
.build();
}
/**
* Ensure requirements are met to run.
*
* @throws C3rIllegalArgumentException If collaboration identifier is missing
*/
private void validate() {
if (requiredArgs.getId() == null || requiredArgs.getId().toString().isBlank()) {
throw new C3rIllegalArgumentException("Specified collaboration identifier is blank.");
}
}
/**
* Execute the decryption as specified on the command line.
*
* @return {@value Main#SUCCESS} if no errors encountered else {@value Main#FAILURE}
*/
@Override
public Integer call() {
try {
validate();
final DecryptConfig cfg = getConfig();
if (!optionalArgs.dryRun) {
log.info("Decrypting data from {}.", cfg.getSourceFile());
switch (cfg.getFileFormat()) {
case CSV:
final RowUnmarshaller<CsvValue> csvValueRowUnmarshaller = CsvRowUnmarshaller.newInstance(cfg);
csvValueRowUnmarshaller.unmarshal();
csvValueRowUnmarshaller.close();
break;
case PARQUET:
final RowUnmarshaller<ParquetValue> parquetRowUnmarshaller = ParquetRowUnmarshaller.newInstance(cfg);
parquetRowUnmarshaller.unmarshal();
parquetRowUnmarshaller.close();
break;
default:
throw new C3rIllegalArgumentException("Unrecognized file format: " + cfg.getFileFormat());
}
log.info("Decrypted data saved in {}.", cfg.getTargetFile());
} else {
log.info("Dry run: No data will be decrypted from {}.", cfg.getSourceFile());
}
} catch (Exception e) {
Main.handleException(e, optionalArgs.enableStackTraces);
return Main.FAILURE;
}
return Main.SUCCESS;
}
}
| 2,697 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/cli/SchemaMode.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.io.schema.CsvSchemaGenerator;
import com.amazonaws.c3r.io.schema.ParquetSchemaGenerator;
import com.amazonaws.c3r.utils.C3rCliProperties;
import com.amazonaws.c3r.utils.C3rSdkProperties;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import picocli.CommandLine;
import java.io.File;
import java.util.Objects;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.Callable;
import static com.amazonaws.c3r.cli.Main.generateCommandLine;
/**
* Command line utility to help create a schema for a data file. Walks the user through each column in the input to see if/how it should be
* transformed during encryption for upload to a collaboration.
*/
@Slf4j
@Getter
@CommandLine.Command(name = "schema",
mixinStandardHelpOptions = true,
version = C3rSdkProperties.VERSION,
descriptionHeading = "%nDescription:%n",
description = "Generate an encryption schema for a tabular file.")
public class SchemaMode implements Callable<Integer> {
/**
* Required command line arguments.
*/
@Getter
static class RequiredArgs {
/**
* {@value CliDescriptions#INPUT_DESCRIPTION_SCHEMA}.
*/
@CommandLine.Parameters(
description = CliDescriptions.INPUT_DESCRIPTION_SCHEMA,
paramLabel = "<input>")
private String input = null;
}
/**
* Required values as specified by the user.
*/
@CommandLine.ArgGroup(multiplicity = "1", heading = "%nRequired parameters:%n")
private RequiredArgs requiredArgs = new RequiredArgs();
/**
* Class for the different modes of scheme generation.
*/
@Getter
public static class SubMode {
/**
* Create a simple schema automatically.
*/
@CommandLine.Option(
names = {"--template", "-t"},
required = true,
description = {"Create template schema file for <input>.",
"NOTE: user needs to edit schema file before use."})
private boolean templateMode = false;
/**
* Walk user through entire schema creation process.
*/
@CommandLine.Option(
names = {"--interactive", "-i"},
required = true,
description = "Create a schema file interactively for <input>.")
private boolean interactiveMode = false;
}
/**
* Which generation mode to use for execution.
*/
@CommandLine.ArgGroup(multiplicity = "1", heading = "%nGeneration mode (specify one of these):%n")
private SubMode subMode = new SubMode();
/**
* Optional command line arguments.
*/
@Getter
static class OptionalArgs {
/**
* {@value CliDescriptions#AWS_PROFILE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--profile", "-l"},
description = CliDescriptions.AWS_PROFILE_DESCRIPTION)
private String profile = null;
/**
* {@value CliDescriptions#AWS_REGION_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--region", "-g"},
description = CliDescriptions.AWS_REGION_DESCRIPTION)
private String region = null;
/**
* For description see {@link CliDescriptions#ID_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--id"},
description = CliDescriptions.ID_DESCRIPTION,
paramLabel = "<value>")
private UUID id = null;
/**
* If this input file has headers.
*
* <p>
* Note: Using a default value of {@code true} means when the flag {@code --noHeaders}
* is passed, @{code hasHeaders} is set to {@code false}.
*/
@CommandLine.Option(names = {"--noHeaders", "-p"},
description = "Indicates <input> has no column headers (CSV only).")
private boolean hasHeaders = true;
/**
* {@value CliDescriptions#FILE_FORMAT_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--fileFormat", "-e"},
description = CliDescriptions.FILE_FORMAT_DESCRIPTION,
paramLabel = "<format>")
private FileFormat fileFormat = null;
/**
* {@value CliDescriptions#OUTPUT_DESCRIPTION_SCHEMA}.
*/
@CommandLine.Option(names = {"--output", "-o"},
description = CliDescriptions.OUTPUT_DESCRIPTION_SCHEMA,
paramLabel = "<file>")
private String output = null;
/**
* {@value CliDescriptions#OVERWRITE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--overwrite", "-f"},
description = CliDescriptions.OVERWRITE_DESCRIPTION)
private boolean overwrite = false;
/**
* {@value CliDescriptions#ENABLE_STACKTRACE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--enableStackTraces", "-v"},
description = CliDescriptions.ENABLE_STACKTRACE_DESCRIPTION)
private boolean enableStackTraces = false;
}
/**
* Optional values as specified by the user.
*/
@CommandLine.ArgGroup(exclusive = false, heading = "%nOptional parameters:%n")
private OptionalArgs optionalArgs = new OptionalArgs();
/** DAO for interacting with AWS Clean Rooms. */
private final CleanRoomsDao cleanRoomsDao;
/**
* Return a default CLI instance for schema generation.
*
* <p>
* Note: {@link #getApp} is the intended method for manually creating this class
* with the appropriate CLI settings.
*/
SchemaMode() {
this.cleanRoomsDao = CleanRoomsDao.builder().build();
}
/**
* Return a CLI instance for schema generation with a custom {@link CleanRoomsDao}.
*
* <p>
* Note: {@link #getApp} is the intended method for manually creating this class
* with the appropriate CLI settings.
*
* @param cleanRoomsDao Custom {@link CleanRoomsDao} to use for Clean Rooms API calls
*/
SchemaMode(final CleanRoomsDao cleanRoomsDao) {
this.cleanRoomsDao = cleanRoomsDao;
}
/**
* Get the schema mode command line application with a custom {@link CleanRoomsDao}.
*
* @param cleanRoomsDao Custom {@link CleanRoomsDao} to use for Clean Rooms API calls
* @return CommandLine interface for `schema` with customized AWS Clean Rooms access
*/
static CommandLine getApp(final CleanRoomsDao cleanRoomsDao) {
return generateCommandLine(new SchemaMode(cleanRoomsDao));
}
/**
* Get the settings from AWS Clean Rooms for this collaboration.
*
* @return Cryptographic computing rules for collaboration, or {@code null} if not applicable.
*/
public ClientSettings getClientSettings() {
if (optionalArgs.id == null) {
return null;
}
final var dao = cleanRoomsDao != null
? cleanRoomsDao
: CleanRoomsDao.builder().apiName(C3rCliProperties.API_NAME).build();
return dao.withProfile(optionalArgs.profile).withRegion(optionalArgs.region)
.getCollaborationDataEncryptionMetadata(optionalArgs.id.toString());
}
/**
* Validates that required information is specified.
*
* @throws C3rIllegalArgumentException If user input is invalid
*/
private void validate() {
if (requiredArgs.getInput().isBlank()) {
throw new C3rIllegalArgumentException("Specified input file name is blank.");
}
if (optionalArgs.output != null && optionalArgs.output.isBlank()) {
throw new C3rIllegalArgumentException("Specified output file name is blank.");
}
}
/**
* Execute schema generation help utility.
*
* @return {@value Main#SUCCESS} if no errors encountered else {@value Main#FAILURE}
*/
@Override
public Integer call() {
try {
validate();
final File file = new File(requiredArgs.getInput());
final String fileNameNoPath = file.getName();
final String outFile = Objects.requireNonNullElse(optionalArgs.output, fileNameNoPath + ".json");
final FileFormat fileFormat = Optional.ofNullable(optionalArgs.fileFormat).orElseGet(() ->
FileFormat.fromFileName(requiredArgs.getInput()));
if (fileFormat == null) {
throw new C3rIllegalArgumentException("Unknown file format (consider using the --format flag): " + requiredArgs.getInput());
}
switch (fileFormat) {
case CSV:
final var csvSchemaGenerator = CsvSchemaGenerator.builder()
.inputCsvFile(requiredArgs.getInput())
.hasHeaders(optionalArgs.hasHeaders)
.targetJsonFile(outFile)
.overwrite(optionalArgs.overwrite)
.clientSettings(getClientSettings())
.build();
csvSchemaGenerator.generateSchema(subMode);
break;
case PARQUET:
if (!optionalArgs.hasHeaders) {
throw new C3rIllegalArgumentException("--noHeaders is not applicable for Parquet files.");
}
final var parquetSchemaGenerator = ParquetSchemaGenerator.builder()
.inputParquetFile(requiredArgs.getInput())
.targetJsonFile(outFile)
.overwrite(optionalArgs.overwrite)
.clientSettings(getClientSettings())
.build();
parquetSchemaGenerator.generateSchema(subMode);
break;
default:
throw new C3rIllegalArgumentException("Unsupported file format for schema generation: " + fileFormat);
}
} catch (Exception e) {
Main.handleException(e, optionalArgs.enableStackTraces);
return Main.FAILURE;
}
return Main.SUCCESS;
}
}
| 2,698 |
0 | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-cli/src/main/java/com/amazonaws/c3r/cli/Main.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.cli;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.utils.C3rSdkProperties;
import lombok.extern.slf4j.Slf4j;
import picocli.CommandLine;
/**
* Top level class for the CLI. Global options such as how to handle command line parsing are configured here and then
* subcommand specific options are configured in each subcommand.
*/
@Slf4j
@CommandLine.Command(
name = CliDescriptions.APP_NAME,
mixinStandardHelpOptions = true,
version = C3rSdkProperties.VERSION,
description = "Cryptographic computing tool for use with AWS Clean Rooms.",
subcommands = {SchemaMode.class, EncryptMode.class, DecryptMode.class})
public final class Main {
/**
* Return value to indicate a child subcommand ran successfully.
*/
public static final int SUCCESS = 0;
/**
* Return value to indicate a child subcommand did not finish successfully.
* Further information about the failure will be in the logs/CLI.
*/
public static final int FAILURE = 1;
/**
* Create instance of the command line interface for all child subcommands.
*/
private Main() {
}
/**
* Get a copy of the application without passing in arguments yet.
* NOTE: The object keeps state between calls so if you include a boolean flag on one run and not on the next,
* the flag will still evaluate to true
*
* @return CommandLine interface to utility that you can use to add additional logging or information to
*/
static CommandLine getApp() {
return generateCommandLine(new Main());
}
/**
* Constructs a new CommandLine interpreter with the specified object with picocli annotations.
*
* @param command The object with appropriate picocli annotations.
* @return The constructed command line interpreter.
*/
static CommandLine generateCommandLine(final Object command) {
return new CommandLine(command).setTrimQuotes(true).setCaseInsensitiveEnumValuesAllowed(true);
}
/**
* Handle top level logging of errors during execution.
*
* @param e Error encountered
* @param enableStackTraces Whether the full stacktrace should be printed
*/
static void handleException(final Exception e, final boolean enableStackTraces) {
if (enableStackTraces) {
log.error("An error occurred: {}", e.getMessage(), e);
} else if (e instanceof C3rRuntimeException) {
log.error("An error occurred: {}", e.getMessage());
} else {
log.error("An unexpected error occurred: {}", e.getClass());
log.error("Note: the --enableStackTraces flag can provide additional context for errors.");
}
log.warn("Output files may have been left on disk.");
}
/**
* Execute the application with a particular set of arguments.
*
* @param args Set of strings containing the options to use on this execution pass
*/
public static void main(final String[] args) {
final int exitCode = getApp().execute(args);
System.exit(exitCode);
}
}
| 2,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.