index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/csv/SparkCsvReaderTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.CsvRowReader;
import com.amazonaws.c3r.spark.config.SparkConfig;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.DATA_SAMPLE_HEADERS;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.DATA_SAMPLE_HEADERS_NO_NORMALIZATION;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class SparkCsvReaderTest {
private final SparkSession session = SparkSessionTestUtility.initSparkSession();
private Path tempFile;
@BeforeEach
public void setup() throws IOException {
tempFile = FileTestUtility.createTempFile();
}
@Test
public void initReaderHeadersTest() {
final Map<String, String> properties = new HashMap<>();
properties.put("path", "../samples/csv/data_sample_with_quotes.csv");
final CsvRowReader reader = SparkCsvReader.initReader(properties);
assertEquals(
DATA_SAMPLE_HEADERS.stream().map(ColumnHeader::toString).sorted().collect(Collectors.toList()),
reader.getHeaders().stream().map(ColumnHeader::toString).sorted().collect(Collectors.toList()));
}
@Test
public void initReaderHeadersNoNormalizationTest() {
final Map<String, String> properties = new HashMap<>();
properties.put("path", "../samples/csv/data_sample_with_quotes.csv");
properties.put(SparkConfig.PROPERTY_KEY_SKIP_HEADER_NORMALIZATION, "true");
final CsvRowReader reader = SparkCsvReader.initReader(properties);
assertEquals(
DATA_SAMPLE_HEADERS_NO_NORMALIZATION.stream().map(ColumnHeader::toString).sorted().collect(Collectors.toList()),
reader.getHeaders().stream().map(ColumnHeader::toString).sorted().collect(Collectors.toList()));
}
@Test
public void initReaderNoHeadersTest() {
final List<ColumnHeader> customDataSampleHeaders =
Stream.of("FirstNameCustom",
"LastNameCustom",
"AddressCustom",
"CityCustom",
"StateCustom",
"PhoneNumberCustom",
"TitleCustom",
"LevelCustom",
"NotesCustom"
)
.map(ColumnHeader::new)
.collect(Collectors.toList());
final Map<String, String> properties = new HashMap<>();
properties.put("path", "../samples/csv/data_sample_no_headers.csv");
final String customHeader = customDataSampleHeaders.stream().map(ColumnHeader::toString).collect(Collectors.joining(","));
properties.put("headers", customHeader);
final CsvRowReader reader = SparkCsvReader.initReader(properties);
assertEquals(customDataSampleHeaders.size(), reader.getHeaders().size());
assertTrue(customDataSampleHeaders.containsAll(reader.getHeaders()));
}
@Test
public void initReaderNoPathTest() {
final Map<String, String> properties = new HashMap<>();
assertThrows(C3rRuntimeException.class, () -> SparkCsvReader.initReader(properties));
}
@Test
public void inputDirectoryTest() throws IOException {
final Path tempDir = FileTestUtility.createTempDir();
final Path file1 = tempDir.resolve("file1.csv");
Files.writeString(file1, "column,column2\nfoo,bar");
final Path file2 = tempDir.resolve("file2.csv");
Files.writeString(file2, "column,column2\nbaz,buzz");
final List<Row> fullDataset = SparkCsvReader.readInput(session,
tempDir.toString(),
null,
null)
.collectAsList();
final List<Row> dataset1 = SparkCsvReader.readInput(session,
file1.toString(),
null,
null)
.collectAsList();
final List<Row> dataset2 = SparkCsvReader.readInput(session,
file2.toString(),
null,
null)
.collectAsList();
assertTrue(fullDataset.containsAll(dataset1));
assertTrue(fullDataset.containsAll(dataset2));
}
@Test
public void inputNestedDirectoryTest() throws IOException {
final Path tempDir = FileTestUtility.createTempDir();
final Path file1 = tempDir.resolve("file1.csv");
Files.writeString(file1, "column,column2\nfoo,bar");
final Path nestedTempDir = tempDir.resolve("nested");
Files.createDirectory(nestedTempDir);
final Path file2 = nestedTempDir.resolve("file2.csv");
Files.writeString(file2, "column,column2\nbaz,buzz");
final List<Row> fullDataset = SparkCsvReader.readInput(session,
tempDir.toString(),
null,
null)
.collectAsList();
final List<Row> dataset1 = SparkCsvReader.readInput(session,
file1.toString(),
null,
null)
.collectAsList();
final List<Row> dataset2 = SparkCsvReader.readInput(session,
file2.toString(),
null,
null)
.collectAsList();
assertTrue(fullDataset.containsAll(dataset1));
// recursion currently not supported
assertFalse(fullDataset.containsAll(dataset2));
}
@Test
public void inputDirectoryDuplicatesTest() throws IOException {
final Path tempDir = FileTestUtility.createTempDir();
final Path file1 = tempDir.resolve("file1.csv");
final String duplicateFileContents = "column,column2\nfoo,bar";
Files.writeString(file1, duplicateFileContents);
final Path file2 = tempDir.resolve("file2.csv");
Files.writeString(file2, duplicateFileContents);
final List<Row> fullDataset = SparkCsvReader.readInput(session,
tempDir.toString(),
null,
null)
.collectAsList();
final List<Row> dataset1 = SparkCsvReader.readInput(session,
file1.toString(),
null,
null)
.collectAsList();
final List<Row> dataset2 = SparkCsvReader.readInput(session,
file2.toString(),
null,
null)
.collectAsList();
assertTrue(fullDataset.containsAll(dataset1));
assertTrue(fullDataset.containsAll(dataset2));
assertEquals(2, fullDataset.size());
}
@Test
public void inputDirectoryUnrelatedDatasetsTest() throws IOException {
final Path tempDir = FileTestUtility.createTempDir();
final Path file1 = tempDir.resolve("file1.csv");
Files.writeString(file1, "columnFoo,columnBar\nfoo,bar");
final Path file2 = tempDir.resolve("file2.csv");
Files.writeString(file2, "columnBaz,columnBuzz\nbaz,buzz");
assertThrows(C3rRuntimeException.class, () -> SparkCsvReader.readInput(session,
tempDir.toString(),
null,
null)
.collectAsList());
}
@Test
public void quotedSpaceTest() throws IOException {
final String singleRowQuotedSpace = "column\n\" \"";
Files.writeString(tempFile, singleRowQuotedSpace);
final List<Row> dataset = SparkCsvReader.readInput(session,
tempFile.toString(),
null,
null)
.collectAsList();
assertEquals(" ", dataset.get(0).getString(0));
}
@Test
public void unquotedBlankTest() throws IOException {
final String singleRowQuotedSpace = "column, column2\n ,";
Files.writeString(tempFile, singleRowQuotedSpace);
final List<Row> dataset = SparkCsvReader.readInput(session,
tempFile.toString(),
null,
null)
.collectAsList();
assertNull(dataset.get(0).get(0));
assertNull(dataset.get(0).get(1));
}
@Test
public void customNullTest() throws IOException {
final String singleRowQuotedSpace = "column, column2\ncolumn,";
Files.writeString(tempFile, singleRowQuotedSpace);
final Dataset<Row> dataset = SparkCsvReader.readInput(session,
tempFile.toString(),
"column",
null);
// ensure a column with a header that equals the custom null value is not dropped.
assertEquals("column", dataset.columns()[0]);
final List<Row> data = dataset.collectAsList();
// ensure custom null respected
assertNull(data.get(0).get(0));
// ensure empty value respected
assertNotNull(data.get(0).get(1));
assertEquals("", data.get(0).getString(1));
}
@Test
public void maliciousColumnHeaderTest() throws IOException {
final String singleRowQuotedSpace = "'; DROP ALL TABLES;";
Files.writeString(tempFile, singleRowQuotedSpace);
// Assert a malicious column header can't be read
assertThrows(C3rIllegalArgumentException.class, () -> SparkCsvReader.readInput(session,
tempFile.toString(),
null,
null));
}
}
| 2,400 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/parquet/SparkParquetWriterTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.parquet;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.json.GsonUtil;
import com.amazonaws.c3r.spark.config.SparkEncryptConfig;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.EXAMPLE_SALT;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class SparkParquetWriterTest {
private static SparkSession session;
private static SparkEncryptConfig config;
/**
* Initial setup done only once because the data is immutable and starting Spark sessions each time is expensive.
*
* @throws IOException if Schema can't be read.
*/
@BeforeAll
public static void setup() throws IOException {
final TableSchema schema = GsonUtil.fromJson(FileUtil.readBytes("../samples/schema/config_sample.json"), TableSchema.class);
session = SparkSessionTestUtility.initSparkSession();
config = SparkEncryptConfig.builder()
.source("../samples/parquet/data_sample.parquet")
.targetDir(FileTestUtility.createTempDir().resolve("output").toString())
.overwrite(true)
.secretKey(KeyUtil.sharedSecretKeyFromString(System.getenv(KeyUtil.KEY_ENV_VAR)))
.salt(EXAMPLE_SALT.toString())
.tableSchema(schema)
.settings(ClientSettings.lowAssuranceMode())
.build();
}
@Test
public void writeOutputTest() {
final Dataset<Row> originalDataset = SparkParquetReader.readInput(
session,
config.getSourceFile());
final List<String> originalColumns = Arrays.stream(originalDataset.columns())
.map(String::toLowerCase)
.sorted()
.collect(Collectors.toList());
SparkParquetWriter.writeOutput(originalDataset, config.getTargetFile());
final Dataset<Row> newDataset = SparkParquetReader.readInput(
session,
config.getTargetFile());
final List<String> newColumns = Arrays.stream(originalDataset.columns())
.map(String::toLowerCase)
.sorted()
.collect(Collectors.toList());
assertEquals(originalColumns.size(), newColumns.size());
assertTrue(originalColumns.containsAll(newColumns));
// Confirm after writing and reading back that no data was lost
final List<Row> originalDatasetRows = originalDataset.collectAsList();
final List<Row> newDatasetRows = newDataset.collectAsList();
assertEquals(originalDatasetRows.size(), newDatasetRows.size());
assertTrue(originalDatasetRows.containsAll(newDatasetRows));
}
}
| 2,401 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/io/parquet/SparkParquetReaderTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.parquet;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ParquetConfig;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Limits;
import com.amazonaws.c3r.json.GsonUtil;
import com.amazonaws.c3r.spark.config.SparkEncryptConfig;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import scala.collection.Iterable;
import scala.collection.immutable.Seq;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.DATA_SAMPLE_HEADERS;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.DATA_SAMPLE_HEADERS_NO_NORMALIZATION;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.EXAMPLE_SALT;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class SparkParquetReaderTest {
private static SparkSession session;
private static TableSchema schema;
private static SparkEncryptConfig config;
/**
* Initial setup done only once because the data is immutable and starting Spark sessions each time is expensive.
*
* @throws IOException if Schema can't be read.
*/
@BeforeAll
public static void setup() throws IOException {
schema = GsonUtil.fromJson(FileUtil.readBytes("../samples/schema/config_sample.json"), TableSchema.class);
session = SparkSessionTestUtility.initSparkSession();
config = SparkEncryptConfig.builder()
.source("../samples/parquet/data_sample.parquet")
.targetDir(FileTestUtility.createTempDir().resolve("output").toString())
.overwrite(true)
.secretKey(KeyUtil.sharedSecretKeyFromString(System.getenv(KeyUtil.KEY_ENV_VAR)))
.salt(EXAMPLE_SALT.toString())
.tableSchema(schema)
.settings(ClientSettings.lowAssuranceMode())
.build();
}
@Test
public void readInputColumnsTest() {
final Dataset<Row> dataset = SparkParquetReader.readInput(session, config.getSourceFile());
final List<String> columns = Arrays.stream(dataset.columns())
.sorted()
.collect(Collectors.toList());
assertEquals(
DATA_SAMPLE_HEADERS.stream().map(ColumnHeader::toString).sorted().collect(Collectors.toList()),
columns);
}
@Test
public void readInputColumnsNoNormalizationTest() {
final Dataset<Row> dataset = SparkParquetReader.readInput(session, config.getSourceFile(), /* skipHeaderNormalization */ true,
ParquetConfig.DEFAULT);
final List<String> columns = Arrays.stream(dataset.columns())
.sorted()
.collect(Collectors.toList());
assertEquals(
DATA_SAMPLE_HEADERS_NO_NORMALIZATION.stream().map(ColumnHeader::toString).sorted().collect(Collectors.toList()),
columns);
}
@Test
public void readInputDirectoryTest() throws IOException {
final Path tempDir = FileTestUtility.createTempDir();
final Path copiedFile = tempDir.resolve("copied.parquet");
Files.copy(Path.of("../samples/parquet/data_sample.parquet"), copiedFile);
final Dataset<Row> dataset = SparkParquetReader.readInput(session, tempDir.toString());
final List<String> columns = Arrays.stream(dataset.columns())
.map(String::toLowerCase)
.sorted()
.collect(Collectors.toList());
final List<String> expectedColumns = schema.getColumns().stream()
.map(columnSchema -> columnSchema.getSourceHeader().toString())
.distinct()
.sorted()
.collect(Collectors.toList());
assertEquals(expectedColumns.size(), columns.size());
assertTrue(expectedColumns.containsAll(columns));
}
@Test
public void maxColumnCountTest() {
final Dataset<Row> dataset = mock(Dataset.class);
when(dataset.columns()).thenReturn(new String[SparkParquetReader.MAX_COLUMN_COUNT + 1]);
when(dataset.count()).thenReturn(0L); // in range row size
assertThrows(C3rRuntimeException.class, () -> SparkParquetReader.validate(dataset));
}
@Test
public void maxRowCountTest() {
final Dataset<Row> dataset = mock(Dataset.class);
when(dataset.columns()).thenReturn(new String[0]); // in range column size
when(dataset.count()).thenReturn(Limits.ROW_COUNT_MAX + 1L);
assertThrows(C3rRuntimeException.class, () -> SparkParquetReader.validate(dataset));
}
@Test
public void maliciousColumnHeaderWithoutNormalizationTest() throws IOException {
final StructField maliciousColumn = DataTypes.createStructField("; DROP ALL TABLES;", DataTypes.StringType, true);
final StructType maliciousSchema = DataTypes.createStructType(new StructField[]{maliciousColumn});
final ArrayList<Row> data = new ArrayList<>();
data.add(Row.fromSeq(Seq.from(Iterable.single("value"))));
final Dataset<Row> maliciousDataset = session.createDataFrame(data, maliciousSchema);
final Path tempDir = FileTestUtility.createTempDir();
SparkParquetWriter.writeOutput(maliciousDataset, tempDir.toString());
final Dataset<Row> dataset = SparkParquetReader.readInput(session, tempDir.toString(), true, ParquetConfig.DEFAULT);
/*
Assert the malicious header is like any other.
While the standard Spark Parquet reader will allow special chars, since a ColumnHeader will not, we can assume
any fields like this will be dropped later before any further parsing.
*/
assertEquals(maliciousColumn.name(), dataset.columns()[0]);
// Assert values still exist
assertFalse(dataset.isEmpty());
}
@Test
public void maliciousColumnHeaderWithNormalizationTest() throws IOException {
final StructField maliciousColumn = DataTypes.createStructField("; DROP ALL TABLES;", DataTypes.StringType, true);
final StructType maliciousSchema = DataTypes.createStructType(new StructField[]{maliciousColumn});
final ArrayList<Row> data = new ArrayList<>();
data.add(Row.fromSeq(Seq.from(Iterable.single("value"))));
final Dataset<Row> maliciousDataset = session.createDataFrame(data, maliciousSchema);
final Path tempDir = FileTestUtility.createTempDir();
SparkParquetWriter.writeOutput(maliciousDataset, tempDir.toString());
// Assert a malicious column header can't be read
assertThrows(C3rIllegalArgumentException.class, () -> SparkParquetReader.readInput(session, tempDir.toString(),
false, ParquetConfig.DEFAULT));
}
}
| 2,402 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/DecryptSdkConfigTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.utils;
import lombok.Builder;
import lombok.Getter;
import javax.crypto.spec.SecretKeySpec;
/**
* Basic Decryption settings.
*/
@Builder
@Getter
public class DecryptSdkConfigTestUtility {
/**
* Key to use for decryption.
*/
@Builder.Default
private SecretKeySpec key = null;
/**
* Salt for key generation.
*/
@Builder.Default
private String salt = null;
/**
* Input file.
*/
@Builder.Default
private String input = null;
/**
* Column header names.
*/
@Builder.Default
private String[] columnHeaders = null;
}
| 2,403 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/SparkSessionTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.utils;
import org.apache.spark.SparkConf;
import org.apache.spark.sql.SparkSession;
public abstract class SparkSessionTestUtility {
/**
* Initializes a SparkSession object with the passed Spark Drive URL.
*
* @return A SparkSession connected to the Spark Driver
*/
public static SparkSession initSparkSession() {
// CHECKSTYLE:OFF
final SparkConf conf = new SparkConf()
.setAppName("C3R")
.setMaster("local[*]");
// CHECKSTYLE:ON
return SparkSession
.builder()
.config(conf)
.getOrCreate();
}
/**
* Shut down the Spark session.
*
* @param spark the SparkSession to close
*/
public static void closeSparkSession(final SparkSession spark) {
spark.stop();
}
} | 2,404 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/TimingResultTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.utils;
import com.amazonaws.c3r.config.ColumnType;
import lombok.Builder;
/**
* Used to store performance testing metrics.
*/
@Builder
public class TimingResultTestUtility {
/**
* Header names for timing results.
*/
public static final String[] HEADERS = {
"Columns",
"Rows",
"Marshal Time (s)",
"Unmarshal Time (s)",
"Input Size (MB)",
"Marshalled Size (MB)",
"Unmarshalled Size (MB)",
"Cleartext Columns",
"Sealed Columns",
"Fingerprint Columns",
"Chars/Entry"
};
/**
* How many column types we are supporting.
*/
private static final int NUM_COL_TYPES = ColumnType.values().length;
/**
* Conversion factor for bytes to megabytes.
*/
private static final double MB = Math.pow(2, 20);
/**
* How many characters per entry in the input file.
*/
private Integer charsPerEntry;
/**
* Number of columns in the files.
*/
private Integer columnCount;
/**
* Number of rows in the files.
*/
private Long rowCount;
/**
* Size of original input file.
*/
private Long inputSizeBytes;
/**
* Time spent marshalling data.
*/
private Long marshalTimeSec;
/**
* Size of marshalled file.
*/
private Long marshalledSizeBytes;
/**
* Time spent unmarshalling data.
*/
private Long unmarshalTimeSec;
/**
* Size of the unmarshalled file.
*/
private Long unmarshalledSizeBytes;
} | 2,405 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/TableGeneratorTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.utils;
import com.amazonaws.c3r.config.ColumnType;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import com.google.gson.JsonPrimitive;
import lombok.Builder;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Random;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
/**
* Used to generate CSV files with random data and an associated schema for testing purposes.
*/
@Builder
public final class TableGeneratorTestUtility {
/**
* Number of column types currently supported.
*/
private static final int COL_TYPES = ColumnType.values().length;
/**
* Hidden utility class constructor.
*/
private TableGeneratorTestUtility() {
}
/**
* Generates unique column header names based on type.
*
* @param columnIndex Which column to create a header for
* @return Column type name followed by column number
*/
private static String headerName(final int columnIndex) {
switch (columnIndex % COL_TYPES) {
case 0:
return "cleartext" + columnIndex;
case 1:
return "sealed" + columnIndex;
default:
return "fingerprint" + columnIndex;
}
}
/**
* Generates the JSON output for a column schema. During data generation the column types are evenly rotated between:
* <ul>
* <li>Cleartext</li>
* <li>Sealed with a Max Pad of Length 0</li>
* <li>Fingerprint</li>
* </ul>
*
* @param columnIndex Which column to generate a schema for (determines types)
* @return JSON object representing the column's schema
*/
private static JsonObject columnSchema(final int columnIndex) {
final JsonObject obj = new JsonObject();
final JsonObject pad = new JsonObject();
obj.addProperty("sourceHeader", headerName(columnIndex));
switch (columnIndex % COL_TYPES) {
case 0:
obj.addProperty("type", "cleartext");
break;
case 1:
obj.addProperty("type", "sealed");
pad.addProperty("type", "max");
pad.addProperty("length", 0);
obj.add("pad", pad);
break;
default:
obj.addProperty("type", "fingerprint");
break;
}
return obj;
}
/**
* Generates a prefix for the CSV and schema files.
*
* @param columnCount Number of columns in generated file
* @param rowCount Number of rows in generated file
* @return String value {@code misc<columnCount>by<rowCount>-} for start of file name
*/
public static String filePrefix(final int columnCount, final long rowCount) {
return "misc" + columnCount + "by" + rowCount + "-";
}
/**
* Generates a schema to match the generated CSV file. Column types rotate as specified in {@link #columnSchema(int)}.
*
* @param columnCount Number of columns in generated file
* @param rowCount Number of rows in generated file (used for naming file only)
* @return Path to schema file
* @throws IOException If there was an error writing the schema to disk
*/
public static Path generateSchema(final int columnCount, final long rowCount) throws IOException {
final JsonArray columns = new JsonArray(columnCount);
for (int i = 0; i < columnCount; i++) {
columns.add(columnSchema(i));
}
final JsonObject content = new JsonObject();
content.add("headerRow", new JsonPrimitive(true));
content.add("columns", columns);
final Path path = FileTestUtility.resolve(filePrefix(columnCount, rowCount) + ".json");
final var writer = Files.newBufferedWriter(path, StandardCharsets.UTF_8);
writer.write(content.toString());
writer.close();
return path;
}
/**
* Generate a random alphanumeric string of the specified size.
*
* @param size Number of characters in the string
* @return Random alphanumeric string
*/
private static String randomString(final int size) {
final String chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
final Random random = new Random();
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < size; i++) {
sb.append(chars.charAt(random.nextInt(chars.length())));
}
return sb.toString();
}
/**
* Creates a CSV file of the specified size filled with random alphanumeric strings.
*
* @param entrySize Number of characters in each entry
* @param columnCount Number of columns in the output file
* @param rowCount Number of rows in te output file
* @return Path to the generated file
* @throws IOException If an error occurred while writing the file
*/
public static Path generateCsv(final int entrySize, final int columnCount, final long rowCount)
throws IOException {
final Path path = FileTestUtility.resolve(filePrefix(columnCount, rowCount) + ".csv");
final var writer = Files.newBufferedWriter(path, StandardCharsets.UTF_8);
final var headers = IntStream.range(0, columnCount).boxed().map(TableGeneratorTestUtility::headerName)
.collect(Collectors.joining(","));
writer.write(headers);
writer.write(System.lineSeparator());
for (int i = 0; i < rowCount; i++) {
final String entry = randomString(entrySize);
final var entries = new String[columnCount];
Arrays.fill(entries, entry);
writer.write(String.join(",", entries));
writer.write(System.lineSeparator());
}
writer.close();
return path;
}
}
| 2,406 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/GeneralTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.utils;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.Pad;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import javax.crypto.spec.SecretKeySpec;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Set of Utilities used for Testing. A combination of file settings and helper functions.
*/
public abstract class GeneralTestUtility {
/**
* A 32-byte key used for testing.
*/
public static final byte[] EXAMPLE_KEY_BYTES =
new byte[]{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31
};
/**
* Example salt for testing.
*/
public static final UUID EXAMPLE_SALT = UUID.fromString("00000000-1111-2222-3333-444444444444");
/**
* List of headers from the golden test file (data_sample.csv).
*/
public static final List<String> DATA_SAMPLE_HEADER_STRINGS =
List.of("FirstName",
"LastName",
"Address",
"City",
"State",
"PhoneNumber",
"Title",
"Level",
"Notes"
);
public static final List<ColumnHeader> DATA_SAMPLE_HEADERS_NO_NORMALIZATION =
DATA_SAMPLE_HEADER_STRINGS.stream()
.map(ColumnHeader::ofRaw)
.collect(Collectors.toList());
public static final List<ColumnHeader> DATA_SAMPLE_HEADERS =
DATA_SAMPLE_HEADER_STRINGS.stream()
.map(ColumnHeader::new)
.collect(Collectors.toList());
/**
* Schema for data_sample.csv.
*/
public static final TableSchema CONFIG_SAMPLE = new MappedTableSchema(List.of(
cleartextColumn("firstname"),
cleartextColumn("lastname"),
sealedColumn("address", PadType.MAX, 32),
sealedColumn("city", PadType.MAX, 16),
fingerprintColumn("state"),
cleartextColumn("phonenumber", "phonenumber_cleartext"),
sealedColumn("phonenumber", "phonenumber_sealed"),
fingerprintColumn("phonenumber", "phonenumber_fingerprint"),
sealedColumn("title", PadType.FIXED, 128),
cleartextColumn("level"),
sealedColumn("notes", PadType.MAX, 100)
));
/**
* Encryption configuration used for data_sample.csv (matches decryption configuration for marshalled_data_sample.csv).
*/
public static final EncryptSdkConfigTestUtility TEST_CONFIG_DATA_SAMPLE = EncryptSdkConfigTestUtility.builder()
.input("../samples/csv/data_sample_with_quotes.csv")
.inputColumnHeaders(CONFIG_SAMPLE.getColumns().stream().map(ColumnSchema::getSourceHeader).map(ColumnHeader::toString)
.collect(Collectors.toList()))
.outputColumnHeaders(CONFIG_SAMPLE.getColumns().stream().map(ColumnSchema::getTargetHeader).map(ColumnHeader::toString)
.collect(Collectors.toList()))
.salt("saltybytes")
.key(new SecretKeySpec(EXAMPLE_KEY_BYTES, KeyUtil.KEY_ALG))
.schema(CONFIG_SAMPLE)
.build();
/**
* Encryption configuration used for one_row_null_sample.csv with only cleartext columns.
*/
public static final EncryptSdkConfigTestUtility TEST_CONFIG_ONE_ROW_NULL_SAMPLE_CLEARTEXT = EncryptSdkConfigTestUtility.builder()
.input("../samples/csv/one_row_null_sample.csv")
.inputColumnHeaders(List.of("firstname", "lastname", "address", "city"))
.outputColumnHeaders(List.of("firstname", "lastname", "address", "city"))
.salt("saltybytes")
.key(new SecretKeySpec(EXAMPLE_KEY_BYTES, KeyUtil.KEY_ALG))
.schema(new MappedTableSchema(Stream.of("firstname", "lastname", "address", "city").map(GeneralTestUtility::cleartextColumn)
.collect(Collectors.toList())))
.build();
/**
* Create a ColumnHeader if name isn't null.
*
* <p>
* This helper function is to support testing positional schemas. Those schemas need to have {@code null} as the value
* for the sourceHeader. However, {@code new ColumnHeader(null)} fails validation. Instead of using the ternary operator
* everywhere we assign the source value, we can call this function instead which is a bit cleaner. By having this helper,
* we don't need to make another full set of helper functions for schema creation, we can just pass {@code null} in to the
* existing helpers. {@link com.amazonaws.c3r.config.PositionalTableSchema} uses this functionality in the creation of all it's
* test variables at the top of the file if you want to see an example usage of why we need to pass null through.
*
* @param name Name of the column or {@code null} if there isn't one
* @return Input string transformed into {@link ColumnHeader} or {@code null} if {@code name} was {@code null}
*/
private static ColumnHeader nameHelper(final String name) {
if (name == null) {
return null;
}
return new ColumnHeader(name);
}
/**
* Helper function that handles cleartext column boilerplate.
*
* @param name Name to be used for input and output row
* @return An cleartext column schema
*/
public static ColumnSchema cleartextColumn(final String name) {
return ColumnSchema.builder()
.sourceHeader(nameHelper(name))
.targetHeader(nameHelper(name))
.pad(null)
.type(ColumnType.CLEARTEXT)
.build();
}
/**
* Helper function that handles cleartext column boilerplate.
*
* @param nameIn Source column header name
* @param nameOut Target column header name
* @return An cleartext column schema
*/
public static ColumnSchema cleartextColumn(final String nameIn, final String nameOut) {
return ColumnSchema.builder()
.sourceHeader(nameHelper(nameIn))
.targetHeader(nameHelper(nameOut))
.pad(null)
.type(ColumnType.CLEARTEXT)
.build();
}
/**
* Helper function for a sealed column with no pad.
*
* @param nameIn Source header name
* @param nameOut Target header name
* @return A sealed column schema
*/
public static ColumnSchema sealedColumn(final String nameIn, final String nameOut) {
return ColumnSchema.builder()
.sourceHeader(nameHelper(nameIn))
.targetHeader(nameHelper(nameOut))
.pad(Pad.DEFAULT)
.type(ColumnType.SEALED)
.build();
}
/**
* Helper function for a sealed column with specified padding.
*
* @param name Name for source and target column headers
* @param type What pad type to use
* @param length How long the pad should be
* @return A sealed column schema
*/
public static ColumnSchema sealedColumn(final String name, final PadType type, final Integer length) {
return ColumnSchema.builder()
.sourceHeader(nameHelper(name))
.targetHeader(nameHelper(name))
.pad(Pad.builder().type(type).length(length).build())
.type(ColumnType.SEALED)
.build();
}
/**
* Helper function for creating a fingerprint column.
*
* @param name The name to use for both the source and target header
* @return A fingerprint column schema
*/
public static ColumnSchema fingerprintColumn(final String name) {
return ColumnSchema.builder()
.sourceHeader(nameHelper(name))
.targetHeader(nameHelper(name))
.type(ColumnType.FINGERPRINT)
.build();
}
/**
* Helper function for creating a fingerprint column.
*
* @param nameIn The name to use for the source header
* @param nameOut The name to use for the target header
* @return A fingerprint column schema
*/
public static ColumnSchema fingerprintColumn(final String nameIn, final String nameOut) {
return ColumnSchema.builder()
.sourceHeader(nameHelper(nameIn))
.targetHeader(nameHelper(nameOut))
.type(ColumnType.FINGERPRINT)
.build();
}
/**
* Build a simple Row from strings for testing; string values are used verbatim.
*
* @param rowEntries CSV row entries given in key, value, key, value, etc... order a la `Map.of(..)`
* @return A row with the given key/value pairs
*/
public static Map<String, String> row(final String... rowEntries) {
final var row = new HashMap<String, String>();
for (int i = 0; i < rowEntries.length; i += 2) {
row.put(rowEntries[i], rowEntries[i + 1]);
}
return row;
}
/**
* Takes a mapping of column headers to values along with a set of map entries for a column header to a test function.
* This class creates the map of predicate functions by column header and calls {@link #assertRowEntryPredicates(Map, Map)}.
*
* @param content A map of column headers to row content
* @param predicates A variable length list of arguments that are map entries for testing row data
* @see #assertRowEntryPredicates(Map, Map)
*/
@SafeVarargs
public static void assertRowEntryPredicates(final Map<String, String> content,
final Map.Entry<String, Predicate<String>>... predicates) {
assertRowEntryPredicates(content, Map.ofEntries(predicates));
}
/**
* Using a mapping of headers to values and headers to test functions, verify each value in a row.
*
* @param content Map of column headers to row content
* @param predicateMap Map of column headers to a predicate function to check the column's value
* @throws RuntimeException If the number of tests don't match the number of entries in the row
*/
public static void assertRowEntryPredicates(final Map<String, String> content, final Map<String, Predicate<String>> predicateMap) {
if (!content.keySet().equals(predicateMap.keySet())) {
throw new RuntimeException(
String.join("\n",
"Bad test! Content keys and predicate keys don't match!",
" Content headers: " + String.join(",", content.keySet()),
"Predicate headers: " + String.join(",", predicateMap.keySet())));
}
content.forEach((header, value) ->
assertTrue(predicateMap.get(header).test(value),
"Row entry predicate failure: `" + header + "` -> `" + value + "`"));
}
}
| 2,407 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/StringTestUtilityTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.utils;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class StringTestUtilityTest {
@Test
public void countMatchesTest() {
assertEquals(0, StringTestUtility.countMatches("a", ""));
assertEquals(0, StringTestUtility.countMatches("a", "b"));
assertEquals(1, StringTestUtility.countMatches("a", "a"));
assertEquals(1, StringTestUtility.countMatches("a", "abcd"));
assertEquals(3, StringTestUtility.countMatches("a", "abcdabcdabcd"));
assertEquals(3, StringTestUtility.countMatches("aa", "aaabcdaaabcdaaabcd"));
}
}
| 2,408 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/EncryptSdkConfigTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.utils;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.TableSchema;
import lombok.Builder;
import lombok.Getter;
import javax.crypto.spec.SecretKeySpec;
import java.util.List;
/**
* Basic configuration settings for encryption.
*/
@Builder
@Getter
public class EncryptSdkConfigTestUtility {
/**
* Schema specification.
*/
@Builder.Default
private TableSchema schema = null;
/**
* Key to use for encryption.
*/
@Builder.Default
private SecretKeySpec key = null;
/**
* Salt to use for key generation.
*/
@Builder.Default
private String salt = null;
/**
* Security related parameters.
*/
@Builder.Default
private ClientSettings settings = ClientSettings.lowAssuranceMode();
/**
* Input file.
*/
@Builder.Default
private String input = null;
/**
* Column headers in the input file.
*/
@Builder.Default
private List<String> inputColumnHeaders = null;
/**
* Column headers to use in the output file.
*/
@Builder.Default
private List<String> outputColumnHeaders = null;
}
| 2,409 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/StringTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.utils;
import java.util.regex.Pattern;
public final class StringTestUtility {
private StringTestUtility() {
}
/**
* Counts how many times a search string occurs (non-overlapping) in given string content.
*
* @param searchString String to search for
* @param content Content to search in
* @return The number of occurrences of the search string in the content.
*/
public static int countMatches(final String searchString, final String content) {
return content.split(Pattern.quote(searchString), -1).length - 1;
}
}
| 2,410 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/utils/FileTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.utils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
/**
* A test utility for creating temporary Path resources for tests that will clean themselves up after execution.
*/
public abstract class FileTestUtility {
/**
* Creates a temporary directory with the prefix "temp" marked with deleteOnExit.
*
* @return A temporary Path
* @throws IOException If the temporary Path cannot be created
*/
public static Path createTempDir() throws IOException {
final Path tempDir = Files.createTempDirectory("temp");
tempDir.toFile().deleteOnExit();
return tempDir;
}
/**
* Creates a temporary file with the prefix "testFile" and suffix ".tmp" marked with deleteOnExit.
*
* @return A temporary Path
* @throws IOException If the temporary Path cannot be created
*/
public static Path createTempFile() throws IOException {
return createTempFile("testFile", ".tmp");
}
/**
* Creates a temporary file with the prefix and suffix provided marked with deleteOnExit.
*
* @param prefix The prefix of the Path to create
* @param suffix The suffix of the Path to create
* @return A temporary Path
* @throws IOException If the temporary Path cannot be created
*/
public static Path createTempFile(final String prefix, final String suffix) throws IOException {
final Path tempDir = createTempDir();
final Path tempFile = Files.createTempFile(tempDir, prefix, suffix);
tempFile.toFile().deleteOnExit();
return tempFile;
}
/**
* Resolves a temporary file with the file name provided marked with deleteOnExit.
*
* @param fileName The name of the Path to resolve
* @return A temporary Path
* @throws IOException If the temporary Path cannot be resolved
*/
public static Path resolve(final String fileName) throws IOException {
return resolve(fileName, createTempDir());
}
/**
* Resolves a temporary file with the prefix and suffix provided marked with deleteOnExit.
*
* @param fileName The name of the Path to resolve
* @param tempDir The Path to use to resolve the temporary file
* @return A temporary Path
*/
private static Path resolve(final String fileName, final Path tempDir) {
final Path tempFile = tempDir.resolve(fileName);
tempFile.toFile().deleteOnExit();
return tempFile;
}
}
| 2,411 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/MainEnvVarKeyInvalidTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import picocli.CommandLine;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
/*
* Tests specifically needing an invalid key in the environment
* variable for the shared secret key.
*/
public class MainEnvVarKeyInvalidTest {
private static final String ENC_INPUT_PATH = "../samples/csv/data_sample_without_quotes.csv";
private static final String SCHEMA_PATH = "../samples/schema/config_sample.json";
private static final String DEC_INPUT_PATH = "../samples/csv/marshalled_data_sample.csv";
private DecryptCliConfigTestUtility decArgs;
private CommandLine decMain;
private EncryptCliConfigTestUtility encArgs;
private CommandLine encMain;
public int runEncryptMainWithCliArgs() {
return encMain.execute(encArgs.toArrayWithoutMode());
}
public int runDecryptMainWithCliArgs() {
return decMain.execute(decArgs.toArrayWithoutMode());
}
@BeforeEach
public void setup() {
final SparkSession sparkSession = SparkSessionTestUtility.initSparkSession();
encArgs = EncryptCliConfigTestUtility.defaultDryRunTestArgs(ENC_INPUT_PATH, SCHEMA_PATH);
encMain = EncryptMode.getApp(null, sparkSession);
decArgs = DecryptCliConfigTestUtility.defaultDryRunTestArgs(DEC_INPUT_PATH);
decMain = DecryptMode.getApp(sparkSession);
}
@Test
public void validateEncryptSecretKeyInvalidTest() {
assertNotEquals(0, runEncryptMainWithCliArgs());
}
@Test
public void validateDecryptSecretKeyInvalidTest() {
assertNotEquals(0, runDecryptMainWithCliArgs());
}
}
| 2,412 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/CliTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.spark.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/**
* Utilities to interface with the CLI interface as if you were calling from the command line.
*/
public final class CliTestUtility {
/**
* Hidden utility class constructor.
*/
private CliTestUtility() {
}
/**
* Runs the cli with a mock to replace an actual connection to AWS Clean Rooms.
*
* @param args Command line parameters for encrypt mode
* @return {@value Main#SUCCESS} if no errors are encountered or {@value Main#FAILURE}
*/
public static int runWithoutCleanRooms(final EncryptCliConfigTestUtility args) {
final CleanRoomsDao cleanRoomsDao;
cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(args.getClientSettings());
return EncryptMode.getApp(cleanRoomsDao, SparkSessionTestUtility.initSparkSession())
.execute(args.toArrayWithoutMode());
}
}
| 2,413 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/MainArgParseTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
import picocli.CommandLine;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Class for testing CLI argument parsing from the top-level which intentionally
* does not execute any C3R business logic. I.e., only testing CLI parsing
* configurations are correct with respect to which arguments are required,
* which are exclusive, how certain common behaviors are triggered, etc.
*/
public class MainArgParseTest {
@Test
public void noArgsTest() {
final CommandLine.ParseResult result = Main.getApp().parseArgs();
assertFalse(result.isVersionHelpRequested());
assertFalse(result.isUsageHelpRequested());
assertEquals(0, result.subcommands().size());
}
@ParameterizedTest
@ValueSource(strings = {"-V", "--version"})
public void mainVersionTest(final String versionFlag) {
final CommandLine.ParseResult result = Main.getApp().parseArgs(versionFlag);
assertTrue(result.isVersionHelpRequested());
assertFalse(result.isUsageHelpRequested());
assertEquals(0, result.subcommands().size());
}
@ParameterizedTest
@ValueSource(strings = {"-h", "--help"})
public void mainHelpTest(final String helpFlag) {
final CommandLine.ParseResult result = Main.getApp().parseArgs(helpFlag);
assertFalse(result.isVersionHelpRequested());
assertTrue(result.isUsageHelpRequested());
assertEquals(0, result.subcommands().size());
}
/**
* Check help parses as expected for a certain mode.
*
* @param mode CLI mode
* @param help Help flag
*/
private void checkModeHelpFlag(final String mode, final String help) {
final CommandLine.ParseResult mainResult = Main.getApp().parseArgs(mode, help);
assertEquals(1, mainResult.subcommands().size());
final CommandLine.ParseResult modeResult = mainResult.subcommand();
assertEquals(mode, modeResult.commandSpec().name());
assertEquals(1, modeResult.expandedArgs().size());
assertEquals(help, modeResult.expandedArgs().get(0));
assertFalse(modeResult.isVersionHelpRequested());
assertTrue(modeResult.isUsageHelpRequested());
}
@ParameterizedTest
@ValueSource(strings = {"encrypt", "decrypt", "schema"})
public void modeHelpFlagTest(final String mode) {
checkModeHelpFlag(mode, "-h");
checkModeHelpFlag(mode, "--help");
}
/**
* Check version parses as expected for a certain mode.
*
* @param mode CLI mode
* @param version Version flag
*/
private void checkModeVersionFlag(final String mode, final String version) {
final CommandLine.ParseResult mainResult = Main.getApp().parseArgs(mode, version);
assertEquals(1, mainResult.subcommands().size());
final CommandLine.ParseResult modeResult = mainResult.subcommand();
assertEquals(mode, modeResult.commandSpec().name());
assertEquals(1, modeResult.expandedArgs().size());
assertEquals(version, modeResult.expandedArgs().get(0));
assertTrue(modeResult.isVersionHelpRequested());
assertFalse(modeResult.isUsageHelpRequested());
}
@ParameterizedTest
@ValueSource(strings = {"encrypt", "decrypt", "schema"})
public void modeVersionFlagTest(final String mode) {
checkModeVersionFlag(mode, "-V");
checkModeVersionFlag(mode, "--version");
}
@ParameterizedTest
@ValueSource(strings = {"encrypt", "decrypt", "schema"})
public void subcommandsWithNoArgsTest(final String mode) {
// NOTE: This assumes the above listed modes have _some_ required arguments
assertThrows(CommandLine.MissingParameterException.class, () -> Main.getApp().parseArgs(mode));
}
@Test
public void invalidSubcommandTest() {
// NOTE: This assumes the above listed modes have _some_ required arguments
assertThrows(CommandLine.UnmatchedArgumentException.class, () -> Main.getApp().parseArgs("not-a-real-mode"));
}
/**
* Asserts that no errors occur when using the given minimal args,
* and then asserts that removing any of the arguments after the
* first (i.e., the mode name itself) raises an error and a missing parameter).
*
* @param minimalArgs Minimal argument list - first element is mode name, remaining are arguments
* for that mode.
*/
public void checkMinimalRequiredModeArgs(final String[] minimalArgs) {
// NOTE: This assumes the above listed modes have _some_ required arguments
assertDoesNotThrow(() -> Main.getApp().parseArgs(minimalArgs));
// check that for this mode (element 0), removing any argument causes a CLI parse error
for (int pos = 1; pos < minimalArgs.length; pos++) {
final List<String> invalidParameters = Arrays.stream(minimalArgs).collect(Collectors.toList());
invalidParameters.remove(pos);
assertThrows(CommandLine.MissingParameterException.class, () ->
Main.getApp().parseArgs(invalidParameters.toArray(String[]::new)));
}
}
@Test
public void encryptWithRequiredArgs() {
final String[] parameters = {"encrypt", "input", "--id=00000000-1111-2222-3333-444444444444", "--schema=schema"};
checkMinimalRequiredModeArgs(parameters);
}
@Test
public void decryptWithRequiredArgs() {
final String[] parameters = {"decrypt", "input", "--id=00000000-1111-2222-3333-444444444444"};
checkMinimalRequiredModeArgs(parameters);
}
@ParameterizedTest
@ValueSource(strings = {"-t", "--template", "-i", "--interactive"})
public void schemaWithRequiredArgs(final String modeFlag) {
final String[] parameters = {"schema", "input", modeFlag};
checkMinimalRequiredModeArgs(parameters);
}
@Test
public void schemaGenModesExclusiveArgs() {
final String[] parameters = {"schema", "input", "-i", "-t"};
// parsing with both -i and -t errors due to those being mutually exclusive
assertThrows(CommandLine.MutuallyExclusiveArgsException.class, () -> Main.getApp().parseArgs(parameters));
// and simply dropping one fixes things
assertDoesNotThrow(() -> Main.getApp().parseArgs(Arrays.copyOfRange(parameters, 0, parameters.length - 1)));
}
}
| 2,414 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/EncryptCliConfigTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.spark.utils.GeneralTestUtility;
import lombok.Getter;
import lombok.Setter;
import java.util.ArrayList;
import java.util.List;
/**
* Class for conveniently generating various command line argument
* combinations for the `encrypt` command.
*/
@Setter
public final class EncryptCliConfigTestUtility {
/**
* Schema file location.
*/
private String schema;
/**
* Collaboration ID to use for computing shared secret keys.
*/
private String collaborationId;
/**
* Input file location.
*/
@Getter
private String input;
/**
* Value used in the input file to represent {@code null} in the CSV data.
*/
private String csvInputNullValue;
/**
* Value to use in the output file to represent {@code null} in the CSV data.
*/
private String csvOutputNullValue;
/**
* Location to write the output file.
*/
@Getter
private String output;
/**
* Whether the output file should be overwritten if it already exists.
*/
private boolean overwrite;
/**
* Whether encryption will actually be run or only the configuration will be validated.
*/
private boolean dryRun;
/**
* Whether plaintext values are allowed.
*/
private boolean allowCleartext;
/**
* Whether duplicate values are allowed in fingerprint columns.
*/
private boolean allowDuplicates;
/**
* Whether columns with different names should be allowed in a join statement.
*/
private boolean allowJoinsOnColumnsWithDifferentNames;
/**
* Whether {@code null} values should be preserved during encryption.
*/
private boolean preserveNulls;
/**
* Whether a stacktrace should be printed.
*/
private boolean enableStackTraces;
/**
* Input file data type.
*/
private FileFormat fileFormat;
/**
* AWS CLI profile.
*/
private String profile;
/**
* AWS region.
*/
private String region;
/**
* Hidden default constructor so static instance creators are used.
*/
private EncryptCliConfigTestUtility() {
}
/**
* Default test values for encryption args to use with tests.
*
* @return Default test values
*/
public static EncryptCliConfigTestUtility defaultTestArgs() {
final var args = new EncryptCliConfigTestUtility();
args.enableStackTraces = true;
args.allowCleartext = true;
args.overwrite = true;
args.schema = "mySchema";
args.collaborationId = GeneralTestUtility.EXAMPLE_SALT.toString();
args.input = "mySourceFile";
return args;
}
/**
* Creates a test configuration for a dry run. Skips all data processing and validates settings.
*
* @param file Input file to use for the dry run
* @param schema Schema file to use for the dry run
* @return Default dry run configuration with specified files
*/
public static EncryptCliConfigTestUtility defaultDryRunTestArgs(final String file, final String schema) {
final var args = new EncryptCliConfigTestUtility();
args.schema = (schema == null) ? "mySchema" : schema;
args.collaborationId = GeneralTestUtility.EXAMPLE_SALT.toString();
args.input = (file == null) ? "mySourceFile" : file;
args.overwrite = true;
args.dryRun = true;
args.allowCleartext = true;
args.enableStackTraces = true;
return args;
}
/**
* Empty CLI configuration.
*
* @return Configuration instance with no set values
*/
public static EncryptCliConfigTestUtility blankTestArgs() {
return new EncryptCliConfigTestUtility();
}
/**
* Create an instance of {@code ClientSettings} using the specified values.
*
* @return {@link ClientSettings} using values stored in this instance
*/
public ClientSettings getClientSettings() {
return ClientSettings.builder()
.allowCleartext(allowCleartext)
.allowDuplicates(allowDuplicates)
.allowJoinsOnColumnsWithDifferentNames(allowJoinsOnColumnsWithDifferentNames)
.preserveNulls(preserveNulls).build();
}
/**
* Converts the specified command line parameters to a list.
*
* @return List of command line parameters
* @see EncryptCliConfigTestUtility#getCliArgsWithoutMode
*/
public List<String> getCliArgs() {
final List<String> args = new ArrayList<>();
args.add("encrypt");
if (input != null) {
args.add(input);
}
if (schema != null) {
args.add("--schema=" + schema);
}
if (collaborationId != null) {
args.add("--id=" + collaborationId);
}
if (csvInputNullValue != null) {
args.add("--csvInputNULLValue=" + csvInputNullValue);
}
if (csvOutputNullValue != null) {
args.add("--csvOutputNULLValue=" + csvOutputNullValue);
}
if (output != null) {
args.add("--output=" + output);
}
if (overwrite) {
args.add("--overwrite");
}
if (dryRun) {
args.add("--dryRun");
}
if (enableStackTraces) {
args.add("--enableStackTraces");
}
if (fileFormat != null) {
args.add("--fileFormat=" + fileFormat);
}
if (profile != null) {
args.add("--profile=" + profile);
}
if (region != null) {
args.add("--region=" + region);
}
return args;
}
/**
* Converts the specified command line parameters to a list without including the CLI mode parameter.
*
* @return List of command line parameters.
* @see EncryptCliConfigTestUtility#getCliArgs
*/
public List<String> getCliArgsWithoutMode() {
final List<String> args = getCliArgs();
args.remove(0);
return args;
}
/**
* Converts the specified command line parameters to an array without including the CLI mode parameter.
*
* @return Array of command line parameters
*/
public String[] toArrayWithoutMode() {
return getCliArgsWithoutMode().toArray(String[]::new);
}
}
| 2,415 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/SchemaModeTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.spark.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.GeneralTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import com.amazonaws.c3r.spark.utils.StringTestUtility;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.stream.Collectors;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class SchemaModeTest {
private static final int SAMPLE_DATA_COLUMN_COUNT = 9;
private static final String ALL_COLUMN_TYPES =
"[" + Arrays.stream(ColumnType.values())
.map(ColumnType::toString)
.collect(Collectors.joining("|")) + "]";
private static final String ALL_COLUMN_TYPES_SANS_CLEARTEXT =
"[" + Arrays.stream(ColumnType.values())
.filter(c -> c != ColumnType.CLEARTEXT)
.map(ColumnType::toString)
.collect(Collectors.joining("|")) + "]";
private final SparkSession sparkSession = SparkSessionTestUtility.initSparkSession();
private Path schemaPath;
@BeforeEach
public void setup() throws IOException {
schemaPath = FileTestUtility.resolve("schema.json");
}
// Generate a template without settings and shallowly check content contains expected entries
private void runTemplateGeneratorNoSettings(final String inputFile,
final boolean hasHeaderRow) throws IOException {
final var args = SchemaCliConfigTestUtility.builder()
.input(inputFile)
.output(schemaPath.toString())
.subMode("--template")
.noHeaders(!hasHeaderRow)
.overwrite(true)
.build();
assertEquals(0, SchemaMode.getApp(null, sparkSession).execute(args.toArrayWithoutMode()));
assertTrue(Files.exists(schemaPath));
assertTrue(Files.size(schemaPath) > 0);
final String contents = Files.readString(schemaPath);
assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow));
assertEquals(hasHeaderRow ? SAMPLE_DATA_COLUMN_COUNT : 0,
StringTestUtility.countMatches("sourceHeader", contents));
assertEquals(SAMPLE_DATA_COLUMN_COUNT,
StringTestUtility.countMatches(ALL_COLUMN_TYPES, contents));
}
// Generate a template with permissive settings and shallowly check content contains expected entries
private void runTemplateGeneratorPermissiveSettings(final String inputFile,
final boolean hasHeaderRow) throws IOException {
final var args = SchemaCliConfigTestUtility.builder()
.input(inputFile)
.output(schemaPath.toString())
.subMode("--template")
.noHeaders(!hasHeaderRow)
.overwrite(true)
.collaborationId(GeneralTestUtility.EXAMPLE_SALT.toString())
.build();
final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.lowAssuranceMode());
assertEquals(0, SchemaMode.getApp(cleanRoomsDao, sparkSession).execute(args.toArrayWithoutMode()));
assertTrue(Files.exists(schemaPath));
assertTrue(Files.size(schemaPath) > 0);
final String contents = Files.readString(schemaPath);
assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow));
assertEquals(hasHeaderRow ? SAMPLE_DATA_COLUMN_COUNT : 0,
StringTestUtility.countMatches("sourceHeader", contents));
assertEquals(SAMPLE_DATA_COLUMN_COUNT,
StringTestUtility.countMatches(ALL_COLUMN_TYPES, contents));
}
// Generate a template with restrictive settings and shallowly check content contains expected entries
private void runTemplateGeneratorRestrictiveSettings(final String inputFile,
final int expectedTargetColumnCount,
final boolean hasHeaderRow) throws IOException {
final var args = SchemaCliConfigTestUtility.builder()
.input(inputFile)
.output(schemaPath.toString())
.subMode("--template")
.noHeaders(!hasHeaderRow)
.overwrite(true)
.collaborationId(GeneralTestUtility.EXAMPLE_SALT.toString())
.build();
final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.highAssuranceMode());
assertEquals(0, SchemaMode.getApp(cleanRoomsDao, sparkSession).execute(args.toArrayWithoutMode()));
assertTrue(Files.exists(schemaPath));
assertTrue(Files.size(schemaPath) > 0);
final String contents = Files.readString(schemaPath);
assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow));
assertEquals(hasHeaderRow ? expectedTargetColumnCount : 0,
StringTestUtility.countMatches("sourceHeader", contents));
assertEquals(expectedTargetColumnCount,
StringTestUtility.countMatches("targetHeader", contents));
assertEquals(expectedTargetColumnCount,
StringTestUtility.countMatches(ALL_COLUMN_TYPES_SANS_CLEARTEXT, contents));
}
// Run interactive schema gen without settings and check it returns results
// and shallowly check content contains expected entries
private void runInteractiveGeneratorNoSettings(final String inputFile,
final boolean hasHeaderRow) throws IOException {
final var args = SchemaCliConfigTestUtility.builder()
.input(inputFile)
.output(schemaPath.toString())
.subMode("--interactive")
.noHeaders(!hasHeaderRow)
.overwrite(true)
.build();
// number greater than test file column counts (test will fail if too low, so no incorrectness risk in
// picking a number)
final int columnCountUpperBound = 100;
// user input which repeatedly says the source column in question should generate one cleartext column
// with a trivial name
final StringBuilder inputBuilder = new StringBuilder();
for (int i = 0; i < columnCountUpperBound; i++) {
// 1 target column
inputBuilder.append("1\n");
// target column type
inputBuilder.append("cleartext\n");
// target column name
inputBuilder.append("column").append(i).append('\n');
}
final var userInput = new ByteArrayInputStream(inputBuilder.toString().getBytes(StandardCharsets.UTF_8));
System.setIn(new BufferedInputStream(userInput));
assertEquals(0, Main.getApp().execute(args.toArray()));
assertTrue(schemaPath.toFile().exists());
assertTrue(schemaPath.toFile().length() > 0);
final String contents = Files.readString(schemaPath);
assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow));
assertEquals(hasHeaderRow ? SAMPLE_DATA_COLUMN_COUNT : 0,
StringTestUtility.countMatches("sourceHeader", contents));
assertEquals(SAMPLE_DATA_COLUMN_COUNT,
StringTestUtility.countMatches("\"" + ColumnType.CLEARTEXT + "\"", contents));
}
// Run interactive schema gen with permissive settings and check it returns results
// and shallowly check content contains expected entries
private void runInteractiveGeneratorPermissiveSettings(final String inputFile,
final boolean hasHeaderRow) throws IOException {
final var args = SchemaCliConfigTestUtility.builder()
.input(inputFile)
.output(schemaPath.toString())
.subMode("--interactive")
.noHeaders(!hasHeaderRow)
.overwrite(true)
.collaborationId(GeneralTestUtility.EXAMPLE_SALT.toString())
.build();
// number greater than test file column counts (test will fail if too low, so no incorrectness risk in
// picking a number)
final int columnCountUpperBound = 100;
// user input which repeatedly says the source column in question should generate one cleartext column
// with a trivial name
final StringBuilder inputBuilder = new StringBuilder();
for (int i = 0; i < columnCountUpperBound; i++) {
// 1 target column
inputBuilder.append("1\n");
// target column type
inputBuilder.append("cleartext\n");
// target column name
inputBuilder.append("column").append(i).append('\n');
}
final var userInput = new ByteArrayInputStream(inputBuilder.toString().getBytes(StandardCharsets.UTF_8));
System.setIn(new BufferedInputStream(userInput));
final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.lowAssuranceMode());
assertEquals(0, SchemaMode.getApp(cleanRoomsDao, sparkSession).execute(args.toArrayWithoutMode()));
assertTrue(schemaPath.toFile().exists());
assertTrue(schemaPath.toFile().length() > 0);
final String contents = Files.readString(schemaPath);
assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow));
assertEquals(hasHeaderRow ? SAMPLE_DATA_COLUMN_COUNT : 0,
StringTestUtility.countMatches("sourceHeader", contents));
assertEquals(SAMPLE_DATA_COLUMN_COUNT,
StringTestUtility.countMatches("\"" + ColumnType.CLEARTEXT + "\"", contents));
}
// Run interactive schema gen with restrictive settings and check it returns results
// and shallowly check content contains expected entries=
private void runInteractiveGeneratorRestrictiveSettings(final String inputFile,
final int expectedTargetColumnCount,
final boolean hasHeaderRow) throws IOException {
final var args = SchemaCliConfigTestUtility.builder()
.input(inputFile)
.output(schemaPath.toString())
.subMode("--interactive")
.noHeaders(!hasHeaderRow)
.overwrite(true)
.collaborationId(GeneralTestUtility.EXAMPLE_SALT.toString())
.build();
// number greater than test file column counts (test will fail if too low, so no incorrectness risk in
// picking a number)
final int columnCountUpperBound = 100;
// user input which repeatedly says the source column in question should generate one cleartext column
// with a trivial name
final StringBuilder inputBuilder = new StringBuilder();
for (int i = 0; i < columnCountUpperBound; i++) {
// 1 target column
inputBuilder.append("1\n");
// target column type, will fail due to restrictive settings
inputBuilder.append("cleartext\n");
// target column type, will succeed
inputBuilder.append("fingerprint\n");
// target column name
inputBuilder.append("column").append(i).append('\n');
// skip suffix
inputBuilder.append("\n");
}
final var userInput = new ByteArrayInputStream(inputBuilder.toString().getBytes(StandardCharsets.UTF_8));
System.setIn(new BufferedInputStream(userInput));
final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.highAssuranceMode());
assertEquals(0, SchemaMode.getApp(cleanRoomsDao, sparkSession).execute(args.toArrayWithoutMode()));
assertTrue(schemaPath.toFile().exists());
assertTrue(schemaPath.toFile().length() > 0);
final String contents = Files.readString(schemaPath);
assertTrue(contents.contains("\"headerRow\": " + hasHeaderRow));
assertEquals(hasHeaderRow ? expectedTargetColumnCount : 0,
StringTestUtility.countMatches("sourceHeader", contents));
assertEquals(expectedTargetColumnCount,
StringTestUtility.countMatches("targetHeader", contents));
assertEquals(0,
StringTestUtility.countMatches(ColumnType.CLEARTEXT.toString(), contents));
assertEquals(expectedTargetColumnCount,
StringTestUtility.countMatches("\"" + ColumnType.FINGERPRINT + "\"", contents));
}
@Test
public void schemaTemplateCsvTest() throws IOException {
runTemplateGeneratorNoSettings("../samples/csv/data_sample_without_quotes.csv", true);
}
@Test
public void schemaTemplateCsvNoHeadersTest() throws IOException {
runTemplateGeneratorNoSettings("../samples/csv/data_sample_no_headers.csv", false);
}
@Test
public void schemaTemplateWithPermissiveSettingsCsvTest() throws IOException {
runTemplateGeneratorPermissiveSettings("../samples/csv/data_sample_without_quotes.csv", true);
}
@Test
public void schemaTemplateWithPermissiveSettingsCsvNoHeadersTest() throws IOException {
runTemplateGeneratorPermissiveSettings("../samples/csv/data_sample_no_headers.csv", false);
}
@Test
public void schemaTemplateWithRestrictiveSettingsCsvTest() throws IOException {
runTemplateGeneratorRestrictiveSettings("../samples/csv/data_sample_without_quotes.csv", SAMPLE_DATA_COLUMN_COUNT, true);
}
@Test
public void schemaTemplateWithRestrictiveSettingsCsvNoHeadersTest() throws IOException {
runTemplateGeneratorRestrictiveSettings("../samples/csv/data_sample_no_headers.csv", SAMPLE_DATA_COLUMN_COUNT, false);
}
@Test
public void schemaTemplateParquetTest() throws IOException {
runTemplateGeneratorNoSettings("../samples/parquet/data_sample.parquet", true);
}
@Test
public void schemaTemplateWithPermissiveSettingsParquetTest() throws IOException {
runTemplateGeneratorPermissiveSettings("../samples/parquet/data_sample.parquet", true);
}
@Test
public void schemaTemplateWithRestrictiveSettingsParquetTest() throws IOException {
runTemplateGeneratorRestrictiveSettings("../samples/parquet/data_sample.parquet", SAMPLE_DATA_COLUMN_COUNT, true);
}
@Test
public void schemaTemplateWithRestrictiveSettingsParquetMixedDataTest() throws IOException {
// only 1 column is a string, so we only expect 1 target columns
runTemplateGeneratorRestrictiveSettings("../samples/parquet/rows_100_groups_10_prim_data.parquet", 1, true);
}
@Test
public void schemaInteractiveCsvTest() throws IOException {
runInteractiveGeneratorNoSettings("../samples/csv/data_sample_without_quotes.csv", true);
}
// Check that interactive schema command returns results and shallowly check content contains expected entries
@Test
public void schemaInteractiveCsvNoHeadersTest() throws IOException {
runInteractiveGeneratorNoSettings("../samples/csv/data_sample_no_headers.csv", false);
}
@Test
public void schemaInteractiveParquetTest() throws IOException {
runInteractiveGeneratorNoSettings("../samples/parquet/data_sample.parquet", true);
}
@Test
public void schemaInteractivePermissiveSettingsCsvTest() throws IOException {
runInteractiveGeneratorPermissiveSettings("../samples/csv/data_sample_without_quotes.csv", true);
}
@Test
public void schemaInteractivePermissiveSettingsCsvNoHeadersTest() throws IOException {
runInteractiveGeneratorPermissiveSettings("../samples/csv/data_sample_no_headers.csv", false);
}
@Test
public void schemaInteractivePermissiveSettingsParquetTest() throws IOException {
runInteractiveGeneratorNoSettings("../samples/parquet/data_sample.parquet", true);
}
@Test
public void schemaInteractiveRestrictiveSettingsCsvTest() throws IOException {
runInteractiveGeneratorRestrictiveSettings("../samples/csv/data_sample_without_quotes.csv", SAMPLE_DATA_COLUMN_COUNT, true);
}
@Test
public void schemaInteractiveRestrictiveSettingsCsvNoHeadersTest() throws IOException {
runInteractiveGeneratorRestrictiveSettings("../samples/csv/data_sample_no_headers.csv", SAMPLE_DATA_COLUMN_COUNT, false);
}
@Test
public void schemaInteractiveRestrictiveSettingsParquetTest() throws IOException {
runInteractiveGeneratorRestrictiveSettings("../samples/parquet/data_sample.parquet", SAMPLE_DATA_COLUMN_COUNT, true);
}
@Test
public void schemaInteractiveRestrictiveSettingsParquetMixedDataTest() throws IOException {
// Only 1 column is of type string, so we expect 1 target column only
runInteractiveGeneratorRestrictiveSettings("../samples/parquet/rows_100_groups_10_prim_data.parquet", 1, true);
}
}
| 2,416 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/MainCsvSingleRowRoundTripTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.FingerprintTransformer;
import com.amazonaws.c3r.SealedTransformer;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.Pad;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.json.GsonUtil;
import com.amazonaws.c3r.spark.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.spark.io.CsvTestUtility;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.GeneralTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import static java.util.Map.entry;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
/*
* A test class with a single row of data containing self-descriptively named column entries
* across the gambit of possible kinds of data that could appear. Intended to act
* as easy to audit unit tests for round tripping through the C3R with various settings and CSV input/output.
*/
public class MainCsvSingleRowRoundTripTest {
// ColumnSchema Name -> ColumnSchema Value mappings used for convenient testing data
// written out to a CSV file and then parsed in
private final List<Map.Entry<String, String>> exampleCsvEntries = List.of(
Map.entry("foo", "foo"),
Map.entry("quoted-foo", "\"foo\""),
Map.entry("quoted-foo-newline-bar", "\"foo\nbar\""),
Map.entry("blank", ""), // `,,`
Map.entry("1space", " "), // `, ,`
Map.entry("quoted-blank", "\"\""),
Map.entry("quoted-1space", "\" \"")
);
private EncryptCliConfigTestUtility encArgs;
private DecryptCliConfigTestUtility decArgs;
private String encCsvInputNull;
private String encCsvOutputNull;
private String decCsvInputNull;
private String decCsvOutputNull;
private Path input;
private ColumnSchema createColumn(final String headerName, final ColumnType type, final Pad pad) {
final var columnBuilder = ColumnSchema.builder()
.sourceHeader(new ColumnHeader(headerName))
.targetHeader(new ColumnHeader(headerName))
.type(type);
if (type == ColumnType.SEALED) {
columnBuilder.pad(pad);
}
return columnBuilder.build();
}
// Create a schema where all columns have the same type and padding.
private TableSchema createMonoSchema(final ColumnType type, final Pad pad) {
if (type != ColumnType.SEALED && pad != null) {
throw new C3rRuntimeException("Bad test! Can't pad non-sealed columns!");
}
return new MappedTableSchema(exampleCsvEntries.stream()
.map(entry -> createColumn(entry.getKey(), type, pad))
.collect(Collectors.toList())
);
}
@BeforeEach
public void setup() throws IOException {
input = FileTestUtility.createTempFile("csv-values", ".csv");
final String headerRow = exampleCsvEntries.stream().map(Map.Entry::getKey).collect(Collectors.joining(","));
final String valueRow = exampleCsvEntries.stream().map(Map.Entry::getValue).collect(Collectors.joining(","));
Files.writeString(input,
String.join("\n",
headerRow,
valueRow));
encArgs = EncryptCliConfigTestUtility.blankTestArgs();
decArgs = DecryptCliConfigTestUtility.blankTestArgs();
encCsvInputNull = null;
encCsvOutputNull = null;
decCsvInputNull = null;
decCsvOutputNull = null;
}
private String encrypt(final ColumnType type, final Pad pad) throws IOException {
final String output = FileTestUtility.createTempDir().toString();
final Path schemaPath = FileTestUtility.createTempFile("schema", ".json");
schemaPath.toFile().deleteOnExit();
final var writer = Files.newBufferedWriter(schemaPath, StandardCharsets.UTF_8);
writer.write(GsonUtil.toJson(createMonoSchema(type, pad)));
writer.close();
encArgs.setInput(input.toString());
encArgs.setAllowCleartext(true);
encArgs.setEnableStackTraces(true);
encArgs.setSchema(schemaPath.toString());
encArgs.setCollaborationId(GeneralTestUtility.EXAMPLE_SALT.toString());
encArgs.setOutput(output);
encArgs.setOverwrite(true);
if (encCsvInputNull != null) {
encArgs.setCsvInputNullValue(encCsvInputNull);
}
if (encCsvOutputNull != null) {
encArgs.setCsvOutputNullValue(encCsvOutputNull);
}
final CleanRoomsDao cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(encArgs.getClientSettings());
final int exitCode = EncryptMode.getApp(cleanRoomsDao, SparkSessionTestUtility.initSparkSession())
.execute(encArgs.toArrayWithoutMode());
assertEquals(0, exitCode);
return CsvTestUtility.mergeOutput(Path.of(output)).toString();
}
private String encryptAllColumnsCleartext() throws IOException {
return encrypt(ColumnType.CLEARTEXT, null);
}
private String encryptAllColumnsSealed() throws IOException {
return encrypt(ColumnType.SEALED, Pad.DEFAULT);
}
private String encryptAllColumnsFingerprint() throws IOException {
return encrypt(ColumnType.FINGERPRINT, null);
}
private String decrypt(final String inPath) throws IOException {
final String output = FileTestUtility.createTempDir().toString();
decArgs.setInput(inPath);
decArgs.setFailOnFingerprintColumns(false);
decArgs.setEnableStackTraces(true);
decArgs.setCollaborationId(GeneralTestUtility.EXAMPLE_SALT.toString());
decArgs.setOutput(output);
decArgs.setOverwrite(true);
if (decCsvInputNull != null) {
decArgs.setCsvInputNullValue(decCsvInputNull);
}
if (decCsvOutputNull != null) {
decArgs.setCsvOutputNullValue(decCsvOutputNull);
}
final int exitCode = DecryptMode.getApp(SparkSessionTestUtility.initSparkSession()).execute(decArgs.toArrayWithoutMode());
assertEquals(0, exitCode);
return CsvTestUtility.mergeOutput(Path.of(output)).toString();
}
private Map<String, String> readSingleCsvRow(final String path) {
final var rows = CsvTestUtility.readRows(path);
assertEquals(1, rows.size());
return rows.get(0);
}
public void validateCleartextRoundTripEncDecRowContent(final Map<String, Predicate<String>> expectedEncRow,
final Map<String, Predicate<String>> expectedDecRow) throws IOException {
final String encryptedPath = encryptAllColumnsCleartext();
final var rowPostEncryption = readSingleCsvRow(encryptedPath);
GeneralTestUtility.assertRowEntryPredicates(rowPostEncryption, expectedEncRow);
final String decryptedPath = decrypt(encryptedPath);
final var rowPostDecryption = readSingleCsvRow(decryptedPath);
GeneralTestUtility.assertRowEntryPredicates(rowPostDecryption, expectedDecRow);
}
public void validateSealedRoundTripDecRowContent(final Map<String, Predicate<String>> expectedDecRow) throws IOException {
final String encryptedPath = encryptAllColumnsSealed();
final var rowPostEncryption = readSingleCsvRow(encryptedPath);
assertTrue(rowPostEncryption.values().stream().map((val) -> val.startsWith(SealedTransformer.DESCRIPTOR_PREFIX_STRING))
.dropWhile((val) -> val).collect(Collectors.toSet()).isEmpty());
final String decryptedPath = decrypt(encryptedPath);
final var rowPostDecryption = readSingleCsvRow(decryptedPath);
GeneralTestUtility.assertRowEntryPredicates(rowPostDecryption, expectedDecRow);
}
@Test
public void defaultEncNulls_defaultDecNulls_EncDec_CleartextTest() throws IOException {
final Map<String, Predicate<String>> expectedEncRow = Map.ofEntries(
entry("foo", (val) -> val.equals("foo")),
entry("quoted-foo", (val) -> val.equals("foo")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
// spaces are trimmed on unquoted input, so we again get `""` i.e. NULL
entry("1space", (val) -> val.equals("")),
// by default, a blank and a quoted blank both are treated as NULL
entry("quoted-blank", (val) -> val.equals("")),
// quotes do not preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateCleartextRoundTripEncDecRowContent(expectedEncRow, expectedEncRow);
}
@Test
public void customEncNulls_EncDec_CleartextTest() throws IOException {
encCsvInputNull = "foo";
encCsvOutputNull = "bar";
final Map<String, Predicate<String>> expectedEncRow = Map.ofEntries(
entry("foo", (val) -> val.equals("bar")),
entry("quoted-foo", (val) -> val.equals("bar")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
// spaces are trimmed on unquoted input, so we again get `""` i.e. NULL
entry("1space", (val) -> val.equals("")),
// by default, a blank and a quoted blank both are treated as NULL
entry("quoted-blank", (val) -> val.equals("")),
// quotes do not preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateCleartextRoundTripEncDecRowContent(expectedEncRow, expectedEncRow);
}
@Test
public void customNulls_Dec_CleartextTest() throws IOException {
decCsvInputNull = "foo";
decCsvOutputNull = "bar";
final Map<String, Predicate<String>> expectedEncryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("foo")),
entry("quoted-foo", (val) -> val.equals("foo")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
// spaces are trimmed on unquoted input, so we again get `""` i.e. NULL
entry("1space", (val) -> val.equals("")),
// by default, a blank and a quoted blank both are treated as NULL
entry("quoted-blank", (val) -> val.equals("")),
// quotes do not preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("bar")),
entry("quoted-foo", (val) -> val.equals("bar")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
// spaces are trimmed on unquoted input, so we again get `""` i.e. NULL
entry("1space", (val) -> val.equals("")),
// by default, a blank and a quoted blank both are treated as NULL
entry("quoted-blank", (val) -> val.equals("")),
// quotes do not preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateCleartextRoundTripEncDecRowContent(expectedEncryptRow, expectedDecryptRow);
}
@Test
public void customNulls_EncDec_CleartextTest() throws IOException {
encCsvInputNull = "foo";
encCsvOutputNull = "bar";
decCsvInputNull = "bar";
decCsvOutputNull = "baz";
final Map<String, Predicate<String>> expectedEncryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("bar")),
entry("quoted-foo", (val) -> val.equals("bar")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
// spaces are trimmed on unquoted input, so we again get `""` i.e. NULL
entry("1space", (val) -> val.equals("")),
// by default, a blank and a quoted blank both are treated as NULL
entry("quoted-blank", (val) -> val.equals("")),
// quotes do not preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("baz")),
entry("quoted-foo", (val) -> val.equals("baz")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
// spaces are trimmed on unquoted input, so we again get `""` i.e. NULL
entry("1space", (val) -> val.equals("")),
// by default, a blank and a quoted blank both are treated as NULL
entry("quoted-blank", (val) -> val.equals("")),
// quotes do not preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateCleartextRoundTripEncDecRowContent(expectedEncryptRow, expectedDecryptRow);
}
@Test
public void defaultEncNulls_defaultDecNulls_EncDec_SealedTest() throws IOException {
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("foo")),
entry("quoted-foo", (val) -> val.equals("foo")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
entry("1space", (val) -> val.equals("")),
entry("quoted-blank", (val) -> val.equals("")),
// quotes do not preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateSealedRoundTripDecRowContent(expectedDecryptRow);
}
@Test
public void customEncNulls_defaultDecOutNull_EncDec_SealedTest() throws IOException {
encCsvInputNull = "foo";
encCsvOutputNull = "bar";
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
// encrypted as NULL
entry("foo", (val) -> val.equals("")),
// encrypted as NULL
entry("quoted-foo", (val) -> val.equals("")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
// written as `,"",` since default NULL encoding `,,` is being used
entry("blank", (val) -> val.equals("\"\"")),
// written as `,"",` since default NULL encoding `,,` is being used
entry("1space", (val) -> val.equals("\"\"")),
// written as `,"",` since default NULL encoding `,,` is being used
entry("quoted-blank", (val) -> val.equals("\"\"")),
// quotes do not preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateSealedRoundTripDecRowContent(expectedDecryptRow);
}
@Test
public void customEncNulls_customDecOutNull_EncDec_SealedTest() throws IOException {
encCsvInputNull = "foo";
encCsvOutputNull = "bar";
decCsvOutputNull = "baz";
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("baz")),
entry("quoted-foo", (val) -> val.equals("baz")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
entry("1space", (val) -> val.equals("")),
entry("quoted-blank", (val) -> val.equals("")),
// quotes do not preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateSealedRoundTripDecRowContent(expectedDecryptRow);
}
@Test
public void defaultEncNulls_customDecNulls_EncDec_SealedTest() throws IOException {
decCsvInputNull = "";
decCsvOutputNull = "baz";
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("foo")),
entry("quoted-foo", (val) -> val.equals("foo")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("baz")),
entry("1space", (val) -> val.equals("baz")),
entry("quoted-blank", (val) -> val.equals("baz")),
// quotes do not preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateSealedRoundTripDecRowContent(expectedDecryptRow);
}
@Test
public void customEncNulls_customDecNulls_EncDec_SealedTest() throws IOException {
encCsvInputNull = "foo";
encCsvOutputNull = "Aliens";
decCsvInputNull = " ";
decCsvOutputNull = "Zombies, run!";
final Map<String, Predicate<String>> expectedDecryptRow = Map.ofEntries(
entry("foo", (val) -> val.equals("\"Zombies, run!\"")),
entry("quoted-foo", (val) -> val.equals("\"Zombies, run!\"")),
entry("quoted-foo-newline-bar", (val) -> val.equals("\"foo\nbar\"")),
entry("blank", (val) -> val.equals("")),
entry("1space", (val) -> val.equals("")),
entry("quoted-blank", (val) -> val.equals("")),
// quotes do not preserve leading/trailing space
entry("quoted-1space", (val) -> val.equals("\" \""))
);
validateSealedRoundTripDecRowContent(expectedDecryptRow);
}
public void defaultNull_EncDec_Fingerprint(final boolean allowJoinsOnColumnsWithDifferentNames) throws IOException {
encArgs.setAllowJoinsOnColumnsWithDifferentNames(allowJoinsOnColumnsWithDifferentNames);
final String encryptedPath = encryptAllColumnsFingerprint();
final var rowPostEncryption = readSingleCsvRow(encryptedPath);
final Predicate<String> isFingerprintEncrypted = (val) -> val.startsWith(FingerprintTransformer.DESCRIPTOR_PREFIX_STRING);
GeneralTestUtility.assertRowEntryPredicates(rowPostEncryption,
entry("foo", isFingerprintEncrypted),
entry("quoted-foo", isFingerprintEncrypted),
entry("quoted-foo-newline-bar", isFingerprintEncrypted),
entry("blank", isFingerprintEncrypted),
entry("1space", isFingerprintEncrypted),
entry("quoted-blank", isFingerprintEncrypted),
entry("quoted-1space", isFingerprintEncrypted)
);
// check non-NULL values (`foo` and `"foo"`) get the same encoding
// iff allowJoinsOnColumnsWithDifferentNames is true
if (allowJoinsOnColumnsWithDifferentNames) {
assertEquals(
rowPostEncryption.get("foo"),
rowPostEncryption.get("quoted-foo"));
} else {
assertNotEquals(
rowPostEncryption.get("foo"),
rowPostEncryption.get("quoted-foo"));
}
// ensure we always transform NULL to unique values to preserve
// the "uniqueness" of NULLs w.r.t. SQL semantics
assertNotEquals(
rowPostEncryption.get("blank"),
rowPostEncryption.get("1space"));
assertNotEquals(
rowPostEncryption.get("blank"),
rowPostEncryption.get("quoted-blank"));
// fingerprint values don't get decrypted
final String decryptedPath = decrypt(encryptedPath);
final var rowPostDecryption = readSingleCsvRow(decryptedPath);
GeneralTestUtility.assertRowEntryPredicates(rowPostDecryption,
entry("foo", isFingerprintEncrypted),
entry("quoted-foo", isFingerprintEncrypted),
entry("quoted-foo-newline-bar", isFingerprintEncrypted),
entry("blank", isFingerprintEncrypted),
entry("1space", isFingerprintEncrypted),
entry("quoted-blank", isFingerprintEncrypted),
entry("quoted-1space", isFingerprintEncrypted)
);
}
@Test
public void defaultNull_EncDec_allowJoinsOnColumnsWithDifferentNamesIsTrue_FingerprintTest() throws IOException {
defaultNull_EncDec_Fingerprint(true);
}
@Test
public void defaultNull_EncDec_allowJoinsOnColumnsWithDifferentNamesIsFalse_FingerprintTest() throws IOException {
defaultNull_EncDec_Fingerprint(false);
}
public void blankEncNull_Fingerprint(final boolean allowJoinsOnColumnsWithDifferentNames) throws IOException {
encCsvInputNull = "";
encArgs.setAllowJoinsOnColumnsWithDifferentNames(allowJoinsOnColumnsWithDifferentNames);
final String encryptedPath = encryptAllColumnsFingerprint();
final var rowPostEncryption = readSingleCsvRow(encryptedPath);
final Predicate<String> isFingerprintEncrypted = (val) -> val.startsWith(FingerprintTransformer.DESCRIPTOR_PREFIX_STRING);
GeneralTestUtility.assertRowEntryPredicates(rowPostEncryption,
entry("foo", isFingerprintEncrypted),
entry("quoted-foo", isFingerprintEncrypted),
entry("quoted-foo-newline-bar", isFingerprintEncrypted),
entry("blank", isFingerprintEncrypted),
entry("1space", isFingerprintEncrypted),
entry("quoted-blank", isFingerprintEncrypted),
entry("quoted-1space", isFingerprintEncrypted)
);
// check that NULL values never get the same encoding
// (preserving NULL "uniqueness" for fingerprint columns)
assertNotEquals(
rowPostEncryption.get("blank"),
rowPostEncryption.get("1space"));
// check that `,,` and and `,"",` get different encoding when the user
// specifies `""` as the input NULL value
assertNotEquals(
rowPostEncryption.get("blank"),
rowPostEncryption.get("quoted-blank"));
}
@Test
public void blankEncNull_Enc_allowJoinsOnColumnsWithDifferentNamesIsTrue_FingerprintTest() throws IOException {
blankEncNull_Fingerprint(true);
}
@Test
public void blankEncNull_Enc_allowJoinsOnColumnsWithDifferentNamesIsFalse_FingerprintTest() throws IOException {
blankEncNull_Fingerprint(false);
}
@Test
public void emptyQuotesEncNull_FingerprintTest() throws IOException {
encCsvInputNull = "\"\"";
final String encryptedPath = encryptAllColumnsFingerprint();
final var rowPostEncryption = readSingleCsvRow(encryptedPath);
final Predicate<String> isFingerprintEncrypted = (val) -> val.startsWith(FingerprintTransformer.DESCRIPTOR_PREFIX_STRING);
GeneralTestUtility.assertRowEntryPredicates(rowPostEncryption,
entry("foo", isFingerprintEncrypted),
entry("quoted-foo", isFingerprintEncrypted),
entry("quoted-foo-newline-bar", isFingerprintEncrypted),
entry("blank", isFingerprintEncrypted),
entry("1space", isFingerprintEncrypted),
entry("quoted-blank", isFingerprintEncrypted),
entry("quoted-1space", isFingerprintEncrypted)
);
// check that `,"",` and `," ",` get different encodings
assertNotEquals(
rowPostEncryption.get("quoted-blank"),
rowPostEncryption.get("quoted-1space"));
// check that `,,` and and `,"",` get different encoding when the user
// specifies `""` as the input NULL value
assertNotEquals(
rowPostEncryption.get("blank"),
rowPostEncryption.get("quoted-blank"));
}
}
| 2,417 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/MainEnvVarKeyValidTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.spark.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import picocli.CommandLine;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class MainEnvVarKeyValidTest {
private static final String ENC_INPUT_PATH = "../samples/csv/data_sample_without_quotes.csv";
private static final String SCHEMA_PATH = "../samples/schema/config_sample.json";
private static final String DEC_INPUT_PATH = "../samples/csv/marshalled_data_sample.csv";
private DecryptCliConfigTestUtility decArgs;
private CommandLine decMain;
private EncryptCliConfigTestUtility encArgs;
private CommandLine encMain;
public int runEncryptMainWithCliArgs() {
return encMain.execute(encArgs.toArrayWithoutMode());
}
public int runDecryptMainWithCliArgs() {
return decMain.execute(decArgs.toArrayWithoutMode());
}
@BeforeEach
public void setup() throws IOException {
final String output = FileTestUtility.createTempDir().toString();
encArgs = EncryptCliConfigTestUtility.defaultDryRunTestArgs(ENC_INPUT_PATH, SCHEMA_PATH);
encArgs.setOutput(output);
final CleanRoomsDao cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(encArgs.getClientSettings());
final SparkSession sparkSession = SparkSessionTestUtility.initSparkSession();
encMain = EncryptMode.getApp(cleanRoomsDao, sparkSession);
decArgs = DecryptCliConfigTestUtility.defaultDryRunTestArgs(DEC_INPUT_PATH);
decArgs.setOutput(output);
decMain = DecryptMode.getApp(sparkSession);
}
@Test
public void validateEncryptSecretKeyInvalidTest() {
assertEquals(0, runEncryptMainWithCliArgs());
}
@Test
public void validateDecryptSecretKeyInvalidTest() {
assertEquals(0, runDecryptMainWithCliArgs());
}
}
| 2,418 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/MainPerfTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.spark.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.spark.io.CsvTestUtility;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import com.amazonaws.c3r.spark.utils.TableGeneratorTestUtility;
import com.amazonaws.c3r.spark.utils.TimingResultTestUtility;
import com.univocity.parsers.csv.CsvParser;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Path;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class MainPerfTest {
private EncryptCliConfigTestUtility encArgs;
private DecryptCliConfigTestUtility decArgs;
@BeforeEach
public void setup() {
encArgs = EncryptCliConfigTestUtility.defaultTestArgs();
encArgs.setAllowDuplicates(true);
decArgs = DecryptCliConfigTestUtility.defaultTestArgs();
decArgs.setFailOnFingerprintColumns(false);
}
public TimingResultTestUtility timeCsvRoundTrips(final int repetitions, final int entrySize, final int columnCount, final long rowCount)
throws IOException {
final String schemaPath = TableGeneratorTestUtility.generateSchema(columnCount, rowCount).toString();
final Path dataPath = TableGeneratorTestUtility.generateCsv(entrySize, columnCount, rowCount);
final long inputSizeBytes = dataPath.toFile().length();
final Path marshalledPath = FileTestUtility.createTempDir();
final Path unmarshalledPath = FileTestUtility.createTempDir();
encArgs.setInput(dataPath.toString());
encArgs.setSchema(schemaPath);
encArgs.setOutput(marshalledPath.toString());
final CleanRoomsDao cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(encArgs.getClientSettings());
long totalMarshalTimeSec = 0;
for (int i = 0; i < repetitions; i++) {
final long startTimeMs = System.currentTimeMillis();
final int exitCode = EncryptMode.getApp(cleanRoomsDao, SparkSessionTestUtility.initSparkSession())
.execute(encArgs.toArrayWithoutMode());
final long endTimeMs = System.currentTimeMillis();
totalMarshalTimeSec = totalMarshalTimeSec + ((endTimeMs - startTimeMs) / 1000);
assertEquals(0, exitCode);
}
final Path mergedMarshalledData = CsvTestUtility.mergeOutput(marshalledPath);
final long marshalledSizeBytes = mergedMarshalledData.toFile().length();
decArgs.setFailOnFingerprintColumns(false);
decArgs.setInput(mergedMarshalledData.toString());
decArgs.setOutput(unmarshalledPath.toString());
// printCliArgs();
long totalUnmarshalTimeSec = 0;
for (int i = 0; i < repetitions; i++) {
final long startTimeMs = System.currentTimeMillis();
final int exitCode = DecryptMode.getApp(SparkSessionTestUtility.initSparkSession()).execute(decArgs.toArrayWithoutMode());
final long endTimeMs = System.currentTimeMillis();
totalUnmarshalTimeSec = totalUnmarshalTimeSec + ((endTimeMs - startTimeMs) / 1000);
assertEquals(0, exitCode);
}
final Path mergedUnmarshalledData = CsvTestUtility.mergeOutput(unmarshalledPath);
final long unmarshalledSizeBytes = mergedUnmarshalledData.toFile().length();
final CsvParser parser = CsvTestUtility.getCsvParser(mergedUnmarshalledData.toString(), columnCount);
parser.parseNext(); // skip the header
long readRows = 0;
String[] row = parser.parseNext();
while (row != null) {
assertEquals(columnCount, row.length);
readRows++;
row = parser.parseNext();
}
assertEquals(rowCount, readRows);
return TimingResultTestUtility.builder()
.charsPerEntry(entrySize)
.columnCount(columnCount)
.rowCount(rowCount)
.inputSizeBytes(inputSizeBytes)
.marshalTimeSec(totalMarshalTimeSec / repetitions)
.marshalledSizeBytes(marshalledSizeBytes)
.unmarshalTimeSec(totalUnmarshalTimeSec / repetitions)
.unmarshalledSizeBytes(unmarshalledSizeBytes)
.build();
}
@Test
public void timeVariousColRowSizes() throws IOException {
final int[] columnCounts = {3, 6};
final long[] rowCounts = {100, 1000};
final int repetitions = 1;
final int entrySize = 20;
for (var nCols : columnCounts) {
for (var nRows : rowCounts) {
timeCsvRoundTrips(repetitions, entrySize, nCols, nRows);
}
}
}
}
| 2,419 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/SchemaCliConfigTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.io.FileFormat;
import lombok.Builder;
import lombok.Getter;
import lombok.Setter;
import java.util.ArrayList;
import java.util.List;
/**
* Class for conveniently generating various command line argument
* combinations for the `schema` command.
*/
@Builder
@Setter
public class SchemaCliConfigTestUtility {
/**
* Whether template or interactive mode should be used.
*/
@Builder.Default
private String subMode = "--template";
/**
* Data file for building the schema.
*/
@Builder.Default
private String input = "mySourceFile";
/**
* Output file location for the schema.
*/
@Builder.Default
@Getter
private String output = null;
/**
* Whether the output file should be overwritten if it exists.
*/
@Builder.Default
private boolean overwrite = false;
/**
* How much detail is printed to the console and log files.
*/
@Builder.Default
private String verbosity = null;
/**
* Whether a stacktrace should be displayed.
*/
@Builder.Default
private boolean enableStackTraces = true;
/**
* Data type.
*/
private FileFormat fileFormat;
/**
* If the data file has no headers.
*/
@Builder.Default
private boolean noHeaders = false;
/**
* Collaboration ID.
*/
private String collaborationId;
/**
* AWS CLI profile.
*/
private String profile;
/**
* AWS region.
*/
private String region;
/**
* Converts the specified command line parameters to a list.
*
* @return List of command line parameters
* @see SchemaCliConfigTestUtility#toListWithoutMode
*/
public List<String> toList() {
final List<String> args = new ArrayList<>(List.of(
"schema",
input
));
args.add(subMode);
if (output != null) {
args.add("--output=" + output);
}
if (overwrite) {
args.add("--overwrite");
}
if (verbosity != null) {
args.add("--verbosity=" + verbosity);
}
if (enableStackTraces) {
args.add("--enableStackTraces");
}
if (fileFormat != null) {
args.add("--fileFormat=" + fileFormat);
}
if (noHeaders) {
args.add("--noHeaders");
}
if (collaborationId != null) {
args.add("--id=" + collaborationId);
}
if (profile != null) {
args.add("--profile=" + profile);
}
if (region != null) {
args.add("--region=" + region);
}
return args;
}
/**
* Converts the specified command line parameters to a list without including the CLI mode parameter.
*
* @return List of command line parameters.
* @see SchemaCliConfigTestUtility#toList
*/
public List<String> toListWithoutMode() {
final List<String> args = toList();
args.remove(0);
return args;
}
/**
* Converts the specified command line parameters to an array.
*
* @return Array of command line parameters
* @see SchemaCliConfigTestUtility#toArrayWithoutMode
*/
public String[] toArray() {
return toList().toArray(String[]::new);
}
/**
* Converts the specified command line parameters to an array without including the CLI mode parameter.
*
* @return Array of command line parameters
* @see SchemaCliConfigTestUtility#toArray
*/
public String[] toArrayWithoutMode() {
return toListWithoutMode().toArray(String[]::new);
}
}
| 2,420 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/EncryptModeDryRunTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.spark.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.spark.io.CsvTestUtility;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.GeneralTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.stubbing.Answer;
import picocli.CommandLine;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class EncryptModeDryRunTest {
private static final String INPUT_PATH = "../samples/csv/data_sample_without_quotes.csv";
private static final String SCHEMA_PATH = "../samples/schema/config_sample.json";
private EncryptCliConfigTestUtility encArgs;
private EncryptMode main;
private CleanRoomsDao mockCleanRoomsDao;
@BeforeEach
public void setup() throws IOException {
final String output = FileTestUtility.createTempDir().toString();
encArgs = EncryptCliConfigTestUtility.defaultDryRunTestArgs(INPUT_PATH, SCHEMA_PATH);
encArgs.setOutput(output);
mockCleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(mockCleanRoomsDao.getCollaborationDataEncryptionMetadata(any()))
.thenAnswer((Answer<ClientSettings>) invocation -> encArgs.getClientSettings());
main = new EncryptMode(mockCleanRoomsDao, SparkSessionTestUtility.initSparkSession());
}
public int runMainWithCliArgs() {
return new CommandLine(main).execute(encArgs.toArrayWithoutMode());
}
@Test
public void minimumViableArgsTest() {
assertEquals(0, runMainWithCliArgs());
assertEquals(SCHEMA_PATH, main.getRequiredArgs().getSchema());
assertEquals(INPUT_PATH, main.getRequiredArgs().getInput());
assertEquals(GeneralTestUtility.EXAMPLE_SALT, main.getRequiredArgs().getId());
}
@Test
public void validateInputBlankTest() {
encArgs.setInput("");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void validateConfigBlankTest() {
encArgs.setSchema("");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void validateCollaborationIdBlankTest() {
encArgs.setCollaborationId("");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void validateCollaborationIdInvalidUuidTest() {
encArgs.setCollaborationId("123456");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void getTargetFileEmptyTest() {
encArgs.setOutput("");
assertNotEquals(0, runMainWithCliArgs());
}
private void checkBooleans(final Function<Boolean, Boolean> action) {
assertEquals(true, action.apply(true));
assertEquals(false, action.apply(false));
}
@Test
public void allowCleartextFlagTest() {
checkBooleans(b -> {
encArgs.setAllowCleartext(b);
runMainWithCliArgs();
return main.getClientSettings().isAllowCleartext();
});
}
@Test
public void allowDuplicatesFlagTest() {
checkBooleans(b -> {
encArgs.setAllowDuplicates(b);
runMainWithCliArgs();
return main.getClientSettings().isAllowDuplicates();
});
}
@Test
public void allowJoinsOnColumnsWithDifferentNamesFlagTest() {
checkBooleans(b -> {
encArgs.setAllowJoinsOnColumnsWithDifferentNames(b);
runMainWithCliArgs();
return main.getClientSettings().isAllowJoinsOnColumnsWithDifferentNames();
});
}
@Test
public void preserveNullsFlagTest() {
checkBooleans(b -> {
encArgs.setPreserveNulls(b);
runMainWithCliArgs();
return main.getClientSettings().isPreserveNulls();
});
}
@Test
public void inputFileFormatTest() throws IOException {
final String input = FileTestUtility.createTempFile("input", ".unknown").toString();
encArgs.setInput(input);
assertNotEquals(0, runMainWithCliArgs());
encArgs.setFileFormat(FileFormat.CSV);
assertEquals(0, runMainWithCliArgs());
}
@Test
public void noProfileOrRegionFlagsTest() {
main = new EncryptMode(mockCleanRoomsDao, SparkSessionTestUtility.initSparkSession());
new CommandLine(main).execute(encArgs.toArrayWithoutMode());
assertNull(main.getOptionalArgs().getProfile());
assertNull(mockCleanRoomsDao.getRegion());
}
@Test
public void profileFlagTest() throws IOException {
// Ensure that passing a value via the --profile flag is given to the CleanRoomsDao builder's `profile(..)` method.
final String myProfileName = "my-profile-name";
assertNotEquals(myProfileName, mockCleanRoomsDao.toString());
when(mockCleanRoomsDao.withRegion(any())).thenThrow(new RuntimeException("test failure - region should have have been set"));
encArgs.setProfile(myProfileName);
main = new EncryptMode(mockCleanRoomsDao, SparkSessionTestUtility.initSparkSession());
new CommandLine(main).execute(encArgs.toArrayWithoutMode());
assertEquals(myProfileName, main.getOptionalArgs().getProfile());
assertEquals(myProfileName, main.getCleanRoomsDao().getProfile());
}
@Test
public void regionFlagTest() {
final String myRegion = "collywobbles";
encArgs.setRegion(myRegion);
main = new EncryptMode(mockCleanRoomsDao, SparkSessionTestUtility.initSparkSession());
new CommandLine(main).execute(encArgs.toArrayWithoutMode());
assertEquals(myRegion, main.getOptionalArgs().getRegion());
assertEquals(myRegion, main.getCleanRoomsDao().getRegion());
}
/*
* Add an extra column to a known valid schema and make sure it's not accepted because it doesn't have the same number
* of columns as the csv file. Easiest to run through the CLI since we need the CSV parser for verification.
*/
@Test
public void tooManyColumnsPositionalSchemaTest() throws IOException {
final String tempJson = FileUtil.readBytes("../samples/schema/config_sample_no_headers.json");
final int closeOuter = tempJson.lastIndexOf("]");
final String json = tempJson.substring(0, closeOuter - 1) + ", [] ] }";
final Path schema = FileTestUtility.createTempFile("schema", ".json");
Files.writeString(schema, json);
final EncryptCliConfigTestUtility args =
EncryptCliConfigTestUtility.defaultDryRunTestArgs("../samples/csv/data_sample_without_quotes.csv",
schema.toString());
args.setDryRun(false);
final var inputArgs = args.toArrayWithoutMode();
assertEquals(Main.FAILURE, EncryptMode.getApp(null, SparkSessionTestUtility.initSparkSession()).execute(inputArgs));
}
/*
* Remove a column to a known valid schema and make sure it's not accepted because it doesn't have the same number
* of columns as the csv file. Easiest to run through the CLI since we need the CSV parser for verification.
*/
@Test
public void tooFewColumnsPositionalSchemaTest() throws IOException {
final String tempJson = FileUtil.readBytes("../samples/schema/config_sample_no_headers.json");
final int lastElementStart = tempJson.lastIndexOf("],");
final String json = tempJson.substring(0, lastElementStart - 1) + "]]}";
final Path schema = FileTestUtility.createTempFile("schema", ".json");
Files.writeString(schema, json);
final var args = EncryptCliConfigTestUtility.defaultDryRunTestArgs("../samples/csv/data_sample_no_headers" +
".csv", schema.toString());
args.setDryRun(false);
final var inputArgs = args.toArrayWithoutMode();
assertEquals(Main.FAILURE, EncryptMode.getApp(null, SparkSessionTestUtility.initSparkSession()).execute(inputArgs));
}
/*
* Make sure only the columns with ColumnSchemas are included in the output. Easiest to run through the CLI since we need
* the CSV parser for verification.
*/
@Test
public void notAllColumnsUsedTest() throws IOException {
final String json = "{ \"headerRow\": false, \"columns\": [" +
"[{\"targetHeader\":\"firstname\", \"type\": \"cleartext\"}]," +
"[]," +
"[]," +
"[]," +
"[]," +
"[]," +
"[]," +
"[]," +
"[]" +
"]}";
final Path schema = FileTestUtility.createTempFile("schema", ".json");
Files.writeString(schema, json);
final EncryptCliConfigTestUtility args =
EncryptCliConfigTestUtility.defaultDryRunTestArgs("../samples/csv/data_sample_without_quotes.csv", schema.toString());
final String output = FileTestUtility.createTempDir().toString();
args.setOutput(output);
args.setDryRun(false);
assertEquals(Main.SUCCESS, CliTestUtility.runWithoutCleanRooms(args));
final Path mergedOutput = CsvTestUtility.mergeOutput(Path.of(output));
final List<Map<String, String>> rows = CsvTestUtility.readRows(mergedOutput.toString());
assertTrue(rows.size() > 0);
for (Map<String, String> row : rows) {
assertEquals(1, row.size());
assertTrue(row.containsKey("firstname"));
}
}
} | 2,421 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/DecryptCliConfigTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.spark.utils.GeneralTestUtility;
import lombok.Getter;
import lombok.Setter;
import java.util.ArrayList;
import java.util.List;
/**
* Class for conveniently generating various command line argument
* combinations for the `decrypt` command.
*/
@Setter
public final class DecryptCliConfigTestUtility {
/**
* Collaboration ID to use for computing shared secret keys.
*/
private String collaborationId;
/**
* Input file location.
*/
@Getter
private String input;
/**
* Value used in the input file to represent {@code null} in the CSV data.
*/
private String csvInputNullValue;
/**
* Value to use in the output file to represent {@code null} in the CSV data.
*/
private String csvOutputNullValue;
/**
* Location to write the output file.
*/
@Getter
private String output;
/**
* Whether the output file should be overwritten if it already exists.
*/
private boolean overwrite;
/**
* Whether encryption will actually be run or only the configuration will be validated.
*/
private boolean dryRun;
/**
* Whether to fail if a fingerprint column is seen in the data file.
*/
private boolean failOnFingerprintColumns;
/**
* Whether a stacktrace should be printed.
*/
private boolean enableStackTraces;
/**
* Input file data type.
*/
private FileFormat fileFormat;
/**
* Hidden default constructor so static instance creators are used.
*/
private DecryptCliConfigTestUtility() {
}
/**
* Default test values for encryption args to use with tests.
*
* @return Default test values
*/
public static DecryptCliConfigTestUtility defaultTestArgs() {
final var args = new DecryptCliConfigTestUtility();
args.enableStackTraces = true;
args.overwrite = true;
args.collaborationId = GeneralTestUtility.EXAMPLE_SALT.toString();
args.input = "mySourceFile";
return args;
}
/**
* Creates a test configuration for a dry run. Skips all data processing and validates settings.
*
* @param file Input file to use for the dry run
* @return Default dry run configuration
*/
public static DecryptCliConfigTestUtility defaultDryRunTestArgs(final String file) {
final var args = new DecryptCliConfigTestUtility();
args.collaborationId = GeneralTestUtility.EXAMPLE_SALT.toString();
args.input = (file == null) ? "mySourceFile" : file;
args.overwrite = true;
args.dryRun = true;
args.enableStackTraces = true;
return args;
}
/**
* Empty CLI configuration.
*
* @return Configuration instance with no set values
*/
public static DecryptCliConfigTestUtility blankTestArgs() {
return new DecryptCliConfigTestUtility();
}
/**
* Converts the specified command line parameters to a list.
*
* @return List of command line parameters
* @see DecryptCliConfigTestUtility#getCliArgsWithoutMode
*/
public List<String> getCliArgs() {
final List<String> args = new ArrayList<>();
args.add("decrypt");
if (input != null) {
args.add(input);
}
if (collaborationId != null) {
args.add("--id=" + collaborationId);
}
if (csvInputNullValue != null) {
args.add("--csvInputNULLValue=" + csvInputNullValue);
}
if (csvOutputNullValue != null) {
args.add("--csvOutputNULLValue=" + csvOutputNullValue);
}
if (output != null) {
args.add("--output=" + output);
}
if (overwrite) {
args.add("--overwrite");
}
if (dryRun) {
args.add("--dryRun");
}
if (failOnFingerprintColumns) {
args.add("--failOnFingerprintColumns");
}
if (enableStackTraces) {
args.add("--enableStackTraces");
}
if (fileFormat != null) {
args.add("--fileFormat=" + fileFormat);
}
return args;
}
/**
* Converts the specified command line parameters to a list without including the CLI mode parameter.
*
* @return List of command line parameters.
* @see DecryptCliConfigTestUtility#getCliArgs
*/
public List<String> getCliArgsWithoutMode() {
final List<String> args = getCliArgs();
args.remove(0);
return args;
}
/**
* Converts the specified command line parameters to an array without including the CLI mode parameter.
*
* @return Array of command line parameters
*/
public String[] toArrayWithoutMode() {
return getCliArgsWithoutMode().toArray(String[]::new);
}
}
| 2,422 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/DecryptModeDryRunTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.GeneralTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import picocli.CommandLine;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
public class DecryptModeDryRunTest {
private static final String INPUT_PATH = "../samples/csv/marshalled_data_sample.csv";
private DecryptCliConfigTestUtility decArgs;
private DecryptMode main;
@BeforeEach
public void setup() throws IOException {
final String output = FileTestUtility.createTempDir().toString();
decArgs = DecryptCliConfigTestUtility.defaultDryRunTestArgs(INPUT_PATH);
decArgs.setOutput(output);
main = new DecryptMode();
}
public int runMainWithCliArgs() {
return new CommandLine(main).execute(decArgs.toArrayWithoutMode());
}
@Test
public void minimumViableArgsTest() {
runMainWithCliArgs();
assertEquals(INPUT_PATH, main.getRequiredArgs().getInput());
assertEquals(GeneralTestUtility.EXAMPLE_SALT, main.getRequiredArgs().getId());
}
@Test
public void validateInputBlankTest() {
decArgs.setInput("");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void validateCollaborationIdBlankTest() {
decArgs.setCollaborationId("");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void validateCollaborationIdInvalidUuidTest() {
decArgs.setCollaborationId("123456");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void getTargetFileEmptyTest() {
decArgs.setOutput("");
assertNotEquals(0, runMainWithCliArgs());
}
@Test
public void inputFileFormatTest() throws IOException {
final String input = FileTestUtility.createTempFile("input", ".unknown").toString();
decArgs.setInput(input);
assertNotEquals(0, runMainWithCliArgs());
decArgs.setFileFormat(FileFormat.CSV);
assertEquals(0, runMainWithCliArgs());
}
}
| 2,423 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/MainTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.spark.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.spark.io.CsvTestUtility;
import com.amazonaws.c3r.spark.io.ParquetTestUtility;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.GeneralTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.stubbing.Answer;
import picocli.CommandLine;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Comparator;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class MainTest {
private final String config = "../samples/schema/config_sample.json";
private EncryptCliConfigTestUtility encArgs;
private DecryptCliConfigTestUtility decArgs;
private CleanRoomsDao cleanRoomsDao;
private Path output;
// Reset encryption and decryption command line arguments before each test
@BeforeEach
public void setup() throws IOException {
output = FileTestUtility.createTempDir();
encArgs = EncryptCliConfigTestUtility.defaultTestArgs();
encArgs.setSchema(config);
encArgs.setAllowDuplicates(true);
decArgs = DecryptCliConfigTestUtility.defaultTestArgs();
decArgs.setFailOnFingerprintColumns(false);
cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenAnswer((Answer<ClientSettings>) (invocation) ->
encArgs.getClientSettings());
}
// Verify calling the command with no argument fails
@Test
public void noArgsUsageTest() {
final ByteArrayOutputStream consoleOutput = new ByteArrayOutputStream();
System.setErr(new PrintStream(consoleOutput));
final int exitCode = Main.getApp().execute();
assertEquals(2, exitCode);
assertTrue(consoleOutput.toString().toLowerCase().contains("missing required subcommand"));
}
// Make sure help is printed out
@Test
public void helpTest() {
ByteArrayOutputStream consoleOutput = new ByteArrayOutputStream();
System.setErr(new PrintStream(consoleOutput));
consoleOutput = new ByteArrayOutputStream();
System.setOut(new PrintStream(consoleOutput));
Main.getApp().execute("--help");
assertTrue(consoleOutput.toString().toLowerCase().contains("usage"));
}
// Make sure a bad subcommand isn't accepted
@Test
public void validateCommandBadTest() {
final ByteArrayOutputStream consoleOutput = new ByteArrayOutputStream();
System.setErr(new PrintStream(consoleOutput));
final int exitCode = Main.getApp().execute("fly-to-the-moon");
assertEquals(2, exitCode);
}
// Test to make sure quotes are removed
@Test
public void quotesAreRemovedTest() {
final String[] args = {"C:\\User Name\\Here", "--schema=\"schema\"", "--id=\"" + GeneralTestUtility.EXAMPLE_SALT + "\"",
"--overwrite=\"true\""};
final CommandLine.ParseResult pr = EncryptMode.getApp(cleanRoomsDao, SparkSessionTestUtility.initSparkSession()).parseArgs(args);
final List<String> origArgs = pr.originalArgs();
assertArrayEquals(args, origArgs.toArray(String[]::new));
final List<String> parsedArgs = pr.matchedArgs().stream()
.map(CommandLine.Model.ArgSpec::getValue).map(Object::toString).collect(Collectors.toList());
assertEquals("encrypt", pr.commandSpec().name());
assertEquals("C:\\User Name\\Here", parsedArgs.get(0));
assertEquals("schema", parsedArgs.get(1));
assertEquals(GeneralTestUtility.EXAMPLE_SALT.toString(), parsedArgs.get(2));
assertTrue(Boolean.parseBoolean(parsedArgs.get(3)));
}
// Check the encrypt command to make sure it works as expected
@Test
public void marshalTest() throws IOException {
encArgs.setInput("../samples/csv/data_sample_without_quotes.csv");
encArgs.setOutput(output.toString());
final int exitCode = EncryptMode.getApp(cleanRoomsDao, SparkSessionTestUtility.initSparkSession())
.execute(encArgs.toArrayWithoutMode());
assertEquals(0, exitCode);
validateCsvOutput(Path.of(encArgs.getInput()), output);
}
// Check the decrypt command to make sure it works as expected
@Test
public void unmarshalTest() throws IOException {
decArgs.setInput("../samples/csv/marshalled_data_sample.csv");
decArgs.setOutput(output.toString());
final int exitCode = DecryptMode.getApp(SparkSessionTestUtility.initSparkSession()).execute(decArgs.toArrayWithoutMode());
assertEquals(0, exitCode);
validateCsvOutput(Path.of(decArgs.getInput()), output);
}
private void validateCsvOutput(final Path input, final Path output) throws IOException {
final long sourceLineCount;
try (Stream<String> source = Files.lines(input, StandardCharsets.UTF_8)) {
sourceLineCount = source.count();
}
final Path mergedOutput = CsvTestUtility.mergeOutput(output);
final List<String[]> outputData = CsvTestUtility.readContentAsArrays(mergedOutput.toString(), false);
assertFalse(outputData.isEmpty());
assertEquals(sourceLineCount, outputData.size());
// check each row in the result is the same size
final int columnCount = outputData.get(0).length;
for (var row : outputData) {
assertEquals(row.length, columnCount);
}
// check the number of output columns is as expected
assertEquals(11, columnCount);
}
/*
* Helper for basic round tripping tests - checks if the string will be interpreted as NULL
* by the C3R client with default settings, so we can check this _or_ equality for correctness
* depending on the input value.
*/
private boolean defaultNullString(final String string) {
return string.isBlank()
|| string.equals("\"\"");
}
/*
* A "round trip" test that encrypts and then decrypts data, checking the values match or are still HMACed.
*
* The test takes an original input file and "explodes" it out, generating
* 3 output columns for each input column such that each input column gets a
* corresponding `cleartext` column, a `sealed` column, and a `fingerprint` column
* as follows:
* - Columns `[ColumnA, ...]` are transformed into
* - columns `[ColumnA_cleartext, ColumnA_sealed, fingerprint, ...]` in the output.
*/
public void clientRoundTripTest(final FileFormat fileFormat) throws IOException {
// NOTE: We use a version of the sample data with enough quotes to make round trip
// equalities work more simply
final String originalPath;
if (fileFormat == FileFormat.CSV) {
originalPath = "../samples/csv/data_sample_with_quotes.csv";
} else {
originalPath = "../samples/parquet/data_sample.parquet";
}
final Path marshalledPath = FileTestUtility.createTempDir();
final Path unmarshalledPath = FileTestUtility.createTempDir();
encArgs.setInput(originalPath);
encArgs.setOutput(marshalledPath.toString());
encArgs.setSchema("../samples/schema/config_sample_x3.json");
encArgs.setPreserveNulls(false);
final var cleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(cleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(encArgs.getClientSettings());
int exitCode = EncryptMode.getApp(cleanRoomsDao, SparkSessionTestUtility.initSparkSession())
.execute(encArgs.toArrayWithoutMode());
assertEquals(0, exitCode);
final Path mergedMarshalledData;
if (fileFormat == FileFormat.CSV) {
mergedMarshalledData = CsvTestUtility.mergeOutput(marshalledPath);
} else {
mergedMarshalledData = ParquetTestUtility.mergeOutput(marshalledPath);
}
decArgs.setInput(mergedMarshalledData.toString());
decArgs.setOutput(unmarshalledPath.toString());
decArgs.setFailOnFingerprintColumns(false);
exitCode = DecryptMode.getApp(SparkSessionTestUtility.initSparkSession()).execute(decArgs.toArrayWithoutMode());
assertEquals(0, exitCode);
final Path mergedUnmarshalledData;
if (fileFormat == FileFormat.CSV) {
mergedUnmarshalledData = CsvTestUtility.mergeOutput(unmarshalledPath);
} else {
mergedUnmarshalledData = ParquetTestUtility.mergeOutput(unmarshalledPath);
}
final String outputData = FileUtil.readBytes(mergedUnmarshalledData.toString());
assertFalse(outputData.isBlank());
final List<String[]> preRows;
final List<String[]> postRows;
if (fileFormat == FileFormat.CSV) {
preRows = CsvTestUtility.readContentAsArrays(originalPath, false);
postRows = CsvTestUtility.readContentAsArrays(mergedUnmarshalledData.toString(), false);
} else {
preRows = ParquetTestUtility.readContentAsStringArrays(originalPath);
postRows = ParquetTestUtility.readContentAsStringArrays(mergedUnmarshalledData.toString());
}
// number of rows should be the same
assertEquals(preRows.size(), postRows.size());
// drop header row if source is a CSV file
if (fileFormat == FileFormat.CSV) {
preRows.remove(0);
postRows.remove(0);
}
// IMPORTANT! The original data should have no duplicates in the first row,
// so we can sort the data to easily compare it.
preRows.sort(Comparator.comparing(row -> row[0]));
postRows.sort(Comparator.comparing(row -> row[0]));
// check that the cleartext and sealed columns returned
// the same results back but the fingerprint column is still HMACed
for (int i = 0; i < preRows.size(); i++) {
final var preRow = preRows.get(i);
final var postRow = postRows.get(i);
assertEquals(preRow.length * 3, postRow.length);
for (int j = 0; j < preRow.length; j++) {
if (defaultNullString(preRow[j])) {
assertTrue(defaultNullString(postRow[j * 3]));
assertTrue(defaultNullString(postRow[j * 3 + 1]));
} else {
assertEquals(preRow[j], postRow[j * 3]);
assertEquals(preRow[j], postRow[j * 3 + 1]);
}
assertNotEquals(preRow[j], postRow[j * 3 + 2]);
}
}
}
// Make sure non-interactive schema returns results
@Test
public void csvRoundTripTest() throws IOException {
clientRoundTripTest(FileFormat.CSV);
}
@Test
public void parquetRoundTripTest() throws IOException {
clientRoundTripTest(FileFormat.PARQUET);
}
}
| 2,424 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cli/SchemaModeDryRunTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.spark.cleanrooms.CleanRoomsDaoTestUtility;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.GeneralTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import org.apache.spark.sql.SparkSession;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import picocli.CommandLine;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.UUID;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public class SchemaModeDryRunTest {
private static final String INPUT_CSV_PATH = "../samples/csv/data_sample_without_quotes.csv";
private static final String INPUT_PARQUET_PATH = "../samples/parquet/data_sample.parquet";
private final SparkSession sparkSession = SparkSessionTestUtility.initSparkSession();
private SchemaCliConfigTestUtility schemaArgs;
private SchemaMode main;
private CleanRoomsDao mockCleanRoomsDao;
@BeforeEach
public void setup() throws IOException {
final String output = FileTestUtility.createTempFile("schema", ".json").toString();
schemaArgs = SchemaCliConfigTestUtility.builder().overwrite(true).input(INPUT_CSV_PATH)
.output(output).build();
mockCleanRoomsDao = CleanRoomsDaoTestUtility.generateMockDao();
when(mockCleanRoomsDao.getCollaborationDataEncryptionMetadata(any())).thenReturn(ClientSettings.lowAssuranceMode());
}
public void runMainWithCliArgs(final boolean passes) {
main = new SchemaMode(mockCleanRoomsDao, sparkSession);
final int exitCode = new CommandLine(main).execute(schemaArgs.toArrayWithoutMode());
if (passes) {
assertEquals(0, exitCode);
} else {
assertNotEquals(0, exitCode);
}
}
@Test
public void minimumViableArgsTest() {
runMainWithCliArgs(true);
assertEquals(INPUT_CSV_PATH, main.getRequiredArgs().getInput());
}
@Test
public void defaultOutputFileTest() {
final File sourceFile = new File(INPUT_CSV_PATH);
final File targetFile = new File(sourceFile.getName() + ".json");
targetFile.deleteOnExit();
schemaArgs.setOutput(null);
runMainWithCliArgs(true);
assertNull(main.getOptionalArgs().getOutput());
assertTrue(targetFile.exists());
assertTrue(targetFile.length() > 0);
// assert sourceFile directory is stripped and targetFile is associated with the working directory.
assertNotNull(sourceFile.getParentFile());
assertNull(targetFile.getParentFile());
assertTrue(targetFile.getAbsolutePath().contains(FileUtil.CURRENT_DIR));
}
@Test
public void specifiedOutputFileTest() {
final File schemaOutput = new File("output.json");
schemaOutput.deleteOnExit();
schemaArgs.setOutput("output.json");
runMainWithCliArgs(true);
assertEquals("output.json", main.getOptionalArgs().getOutput());
}
@Test
public void validateInputBlankTest() {
schemaArgs.setInput("--invalid");
runMainWithCliArgs(false);
}
@Test
public void getTargetFileEmptyTest() {
schemaArgs.setOutput("");
runMainWithCliArgs(false);
}
@Test
public void validateBadLogLevelErrorTest() {
schemaArgs.setVerbosity("SUPER-LOUD-PLEASE");
runMainWithCliArgs(false);
}
@Test
public void schemaInteractiveTerminatedInputTest() throws IOException {
final Path schemaPath = Files.createTempFile("schema", ".json");
schemaPath.toFile().deleteOnExit();
schemaArgs.setOutput(schemaPath.toAbsolutePath().toString());
schemaArgs.setSubMode("--interactive");
final var args = schemaArgs.toList();
args.remove(0);
// user input which ends unexpectedly during interactive CLI session
final var userInput = new ByteArrayInputStream("".getBytes(StandardCharsets.UTF_8));
System.setIn(new BufferedInputStream(userInput));
final int exitCode = new CommandLine(new SchemaMode(mockCleanRoomsDao, sparkSession)).execute(args.toArray(new String[0]));
assertNotEquals(0, exitCode);
assertTrue(schemaPath.toFile().exists());
assertEquals(0, schemaPath.toFile().length());
}
@Test
public void testInvalidModeSetting() {
final ByteArrayOutputStream consoleOutput = new ByteArrayOutputStream();
final PrintStream pErr = new PrintStream(consoleOutput);
System.setErr(pErr);
schemaArgs.setSubMode("--invalidMode");
runMainWithCliArgs(false);
final String expected = "Unknown option: '--invalidMode'";
assertTrue(consoleOutput.toString(StandardCharsets.UTF_8).contains(expected));
}
@Test
public void testMissingModeSettings() {
final ByteArrayOutputStream nullConsoleOutput = new ByteArrayOutputStream();
final PrintStream pNullErr = new PrintStream(nullConsoleOutput);
System.setErr(pNullErr);
assertDoesNotThrow(() -> new CommandLine(new SchemaMode(mockCleanRoomsDao, sparkSession))
.execute("--output=" + schemaArgs.getOutput(), INPUT_CSV_PATH));
assertTrue(nullConsoleOutput.toString(StandardCharsets.UTF_8)
.startsWith("Error: Missing required argument (specify one of these):"
+ " (-t | -i)"));
}
@Test
public void unknownFileFormatTest() throws IOException {
final String schemaUnknownExtensionPath = FileTestUtility.createTempFile("schema", ".unknown").toString();
schemaArgs.setInput(schemaUnknownExtensionPath);
schemaArgs.setFileFormat(null);
runMainWithCliArgs(false);
}
@Test
public void supportedFileFormatFlagCsvTest() {
schemaArgs.setInput(INPUT_CSV_PATH);
schemaArgs.setFileFormat(FileFormat.CSV);
runMainWithCliArgs(true);
}
@Test
public void unsupportedFileFormatFlagTest() throws IOException {
final String schemaUnsupportedExtensionPath = FileTestUtility.createTempFile("schema", ".unsupported").toString();
schemaArgs.setInput(schemaUnsupportedExtensionPath);
schemaArgs.setFileFormat(FileFormat.PARQUET);
runMainWithCliArgs(false);
}
@Test
public void supportedFileFormatFlagParquetTest() {
schemaArgs.setInput(INPUT_PARQUET_PATH);
schemaArgs.setFileFormat(FileFormat.PARQUET);
runMainWithCliArgs(true);
}
@Test
public void noHeadersCsvTest() {
schemaArgs.setInput(INPUT_CSV_PATH);
schemaArgs.setFileFormat(FileFormat.CSV);
schemaArgs.setNoHeaders(true);
runMainWithCliArgs(true);
}
@Test
public void noHeadersParquetTest() {
schemaArgs.setInput(INPUT_PARQUET_PATH);
schemaArgs.setFileFormat(FileFormat.PARQUET);
schemaArgs.setNoHeaders(true);
runMainWithCliArgs(false);
}
@Test
public void testInvalidIdFormat() {
schemaArgs.setInput(INPUT_CSV_PATH);
schemaArgs.setCollaborationId("invalidCollaborationId");
runMainWithCliArgs(false);
}
@Test
public void testValidId() {
schemaArgs.setInput(INPUT_CSV_PATH);
schemaArgs.setCollaborationId(GeneralTestUtility.EXAMPLE_SALT.toString());
runMainWithCliArgs(true);
}
@Test
public void noProfileOrRegionFlagsTest() {
// Ensure that if no profile or region flag are passed, then the CleanRoomsDao are not constructed
// with any explicit values for them (i.e., ensuring the defaults are used)
main = new SchemaMode(mockCleanRoomsDao, sparkSession);
new CommandLine(main).execute(schemaArgs.toArrayWithoutMode());
assertNull(main.getOptionalArgs().getProfile());
assertNull(main.getCleanRoomsDao().getRegion());
}
@Test
public void profileFlagTest() throws IOException {
// Ensure that passing a value via the --profile flag is given to the CleanRoomsDao builder's `profile(..)` method.
final String myProfileName = "my-profile-name";
assertNotEquals(myProfileName, mockCleanRoomsDao.toString());
schemaArgs.setProfile(myProfileName);
schemaArgs.setCollaborationId(UUID.randomUUID().toString());
main = new SchemaMode(mockCleanRoomsDao, sparkSession);
new CommandLine(main).execute(schemaArgs.toArrayWithoutMode());
assertEquals(myProfileName, main.getOptionalArgs().getProfile());
assertEquals(myProfileName, main.getCleanRoomsDao().getProfile());
}
@Test
public void regionFlagTest() {
final String myRegion = "collywobbles";
schemaArgs.setRegion(myRegion);
schemaArgs.setCollaborationId(UUID.randomUUID().toString());
main = new SchemaMode(mockCleanRoomsDao, sparkSession);
new CommandLine(main).execute(schemaArgs.toArrayWithoutMode());
assertEquals(myRegion, main.getOptionalArgs().getRegion());
assertEquals(myRegion, main.getCleanRoomsDao().getRegion());
}
}
| 2,425 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/action/SparkUnmarshallerTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.action;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnInsight;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.ParquetConfig;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.json.GsonUtil;
import com.amazonaws.c3r.spark.config.SparkDecryptConfig;
import com.amazonaws.c3r.spark.config.SparkEncryptConfig;
import com.amazonaws.c3r.spark.io.csv.SparkCsvReader;
import com.amazonaws.c3r.spark.io.parquet.SparkParquetReader;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.List;
import java.util.stream.Collectors;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.EXAMPLE_SALT;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class SparkUnmarshallerTest {
private static Dataset<Row> dataset;
private static List<ColumnInsight> columnInsights;
private static SparkSession session;
private static SparkEncryptConfig encryptConfig;
private static SparkDecryptConfig decryptConfig;
/**
* Initial setup done only once because the data is immutable and starting Spark sessions each time is expensive.
*
* @throws IOException if Schema can't be read.
*/
@BeforeAll
public static void setupDataset() throws IOException {
final TableSchema schema = GsonUtil.fromJson(FileUtil.readBytes("../samples/schema/config_sample.json"), TableSchema.class);
columnInsights = schema.getColumns().stream().map(ColumnInsight::new)
.collect(Collectors.toList());
session = SparkSessionTestUtility.initSparkSession();
encryptConfig = SparkEncryptConfig.builder()
.source("../samples/csv/data_sample_without_quotes.csv")
.targetDir(FileTestUtility.createTempDir().resolve("output").toString())
.overwrite(true)
.secretKey(KeyUtil.sharedSecretKeyFromString(System.getenv(KeyUtil.KEY_ENV_VAR)))
.salt(EXAMPLE_SALT.toString())
.tableSchema(schema)
.settings(ClientSettings.lowAssuranceMode())
.build();
decryptConfig = SparkDecryptConfig.builder()
.source("../samples/csv/marshalled_data_sample.csv")
.targetDir(FileTestUtility.createTempDir().resolve("output").toString())
.overwrite(true)
.secretKey(KeyUtil.sharedSecretKeyFromString(System.getenv(KeyUtil.KEY_ENV_VAR)))
.salt(EXAMPLE_SALT.toString())
.build();
dataset = readDataset(encryptConfig.getSourceFile(), schema.getPositionalColumnHeaders());
}
private static Dataset<Row> readDataset(final String sourceFile, final List<ColumnHeader> columnHeaders) {
return SparkCsvReader.readInput(session,
sourceFile,
null,
columnHeaders,
/* skipHeaderNormalization */ true);
}
@Test
public void unmarshalDataTest() {
final Dataset<Row> encryptedData = SparkMarshaller.encrypt(dataset, encryptConfig);
final List<Row> decryptedData = SparkUnmarshaller.unmarshalData(encryptedData, decryptConfig).collectAsList();
final Dataset<Row> mappedDataset = SparkMarshaller.mapSourceToTargetColumns(dataset, columnInsights);
SparkMarshaller.populateColumnPositions(mappedDataset, columnInsights);
final List<Row> mappedDataList = mappedDataset.collectAsList();
compareValues(mappedDataList, decryptedData, columnInsights);
}
@Test
public void unmarshalDataParquetUnencryptedMixedTypesTest() {
final Dataset<Row> mixedDataset = SparkParquetReader
.readInput(session, "../samples/parquet/data_sample_with_non_string_types.parquet", /* skipHeaderNormalization */ true,
ParquetConfig.DEFAULT);
// assert there is indeed a non-String type
assertTrue(mixedDataset.schema().toList().filter(struct -> struct.dataType() != DataTypes.StringType).size() > 0);
final Dataset<Row> encryptedData = SparkMarshaller.encrypt(mixedDataset, encryptConfig);
final List<Row> decryptedData = SparkUnmarshaller.unmarshalData(encryptedData, decryptConfig).collectAsList();
final Dataset<Row> mappedDataset = SparkMarshaller.mapSourceToTargetColumns(mixedDataset, columnInsights);
SparkMarshaller.populateColumnPositions(mappedDataset, columnInsights);
final List<Row> mappedDataList = mappedDataset.collectAsList();
compareValues(mappedDataList, decryptedData, columnInsights);
}
@Test
public void decryptTest() {
final Dataset<Row> encryptedData = SparkMarshaller.encrypt(dataset, encryptConfig);
final List<Row> decryptedData = SparkUnmarshaller.decrypt(encryptedData, decryptConfig).collectAsList();
final Dataset<Row> mappedDataset = SparkMarshaller.mapSourceToTargetColumns(dataset, columnInsights);
SparkMarshaller.populateColumnPositions(mappedDataset, columnInsights);
final List<Row> mappedDataList = mappedDataset.collectAsList();
compareValues(mappedDataList, decryptedData, columnInsights);
}
private void compareValues(final List<Row> expected, final List<Row> actual, final List<ColumnInsight> columnInsights) {
assertEquals(expected.size(), actual.size());
// Encryption shuffled the rows
expected.sort((d1, d2) -> {
return d2.getString(0).compareTo(d1.getString(0)); //compare on first names
});
actual.sort((d1, d2) -> {
return d2.getString(0).compareTo(d1.getString(0)); //compare on first names
});
final List<Integer> fingerprintCols = columnInsights.stream()
.filter(columnInsight -> columnInsight.getType() == ColumnType.FINGERPRINT)
.map(ColumnInsight::getSourceColumnPosition)
.collect(Collectors.toList());
final List<Integer> decryptableCols = columnInsights.stream()
.filter(columnInsight -> columnInsight.getType() != ColumnType.FINGERPRINT)
.map(ColumnInsight::getSourceColumnPosition)
.collect(Collectors.toList());
for (int i = 0; i < actual.size(); i++) {
for (Integer fingerprintPos : fingerprintCols) {
if (expected.get(i).get(fingerprintPos) == null) {
assertNull(actual.get(i).get(fingerprintPos));
} else {
assertNotEquals(expected.get(i).get(fingerprintPos),
actual.get(i).get(fingerprintPos));
}
}
for (Integer decryptedPos : decryptableCols) {
assertEquals(expected.get(i).get(decryptedPos),
actual.get(i).get(decryptedPos));
}
}
}
}
| 2,426 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/action/SparkMarshallerTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.action;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnInsight;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.ParquetConfig;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Limits;
import com.amazonaws.c3r.json.GsonUtil;
import com.amazonaws.c3r.spark.config.SparkEncryptConfig;
import com.amazonaws.c3r.spark.io.csv.SparkCsvReader;
import com.amazonaws.c3r.spark.io.parquet.SparkParquetReader;
import com.amazonaws.c3r.spark.utils.FileTestUtility;
import com.amazonaws.c3r.spark.utils.SparkSessionTestUtility;
import com.amazonaws.c3r.utils.FileUtil;
import org.apache.spark.SparkException;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static com.amazonaws.c3r.spark.utils.GeneralTestUtility.EXAMPLE_SALT;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class SparkMarshallerTest {
private static Dataset<Row> dataset;
private static List<ColumnInsight> columnInsights;
private static SparkSession session;
private static TableSchema schema;
private static SparkEncryptConfig config;
/**
* Initial setup done only once because the data is immutable and starting Spark sessions each time is expensive.
*
* @throws IOException if Schema can't be read.
*/
@BeforeAll
public static void setupDataset() throws IOException {
schema = GsonUtil.fromJson(FileUtil.readBytes("../samples/schema/config_sample.json"), TableSchema.class);
columnInsights = schema.getColumns().stream().map(ColumnInsight::new)
.collect(Collectors.toList());
session = SparkSessionTestUtility.initSparkSession();
config = SparkEncryptConfig.builder()
.source("../samples/csv/data_sample_without_quotes.csv")
.targetDir(FileTestUtility.createTempDir().resolve("output").toString())
.overwrite(true)
.secretKey(KeyUtil.sharedSecretKeyFromString(System.getenv(KeyUtil.KEY_ENV_VAR)))
.salt(EXAMPLE_SALT.toString())
.tableSchema(schema)
.settings(ClientSettings.lowAssuranceMode())
.build();
dataset = readDataset(config.getSourceFile(), schema.getPositionalColumnHeaders());
}
private static Dataset<Row> readDataset(final String sourceFile, final List<ColumnHeader> columnHeaders) {
return SparkCsvReader.readInput(session,
sourceFile,
null,
columnHeaders);
}
@Test
public void filterSourceColumnsBySchemaNoMatchesTest() {
final List<ColumnInsight> emptyColumnInsights = new ArrayList<>();
final Dataset<Row> filteredDataset = SparkMarshaller.filterSourceColumnsBySchema(dataset, emptyColumnInsights);
assertEquals(0, filteredDataset.columns().length);
}
@Test
public void filterSourceColumnsBySchemaAllMatchesTest() {
final Dataset<Row> filteredDataset = SparkMarshaller.filterSourceColumnsBySchema(dataset, columnInsights);
final Set<String> sourceHeaders = columnInsights.stream()
.map(columnInsight -> columnInsight.getSourceHeader().toString()).collect(Collectors.toSet());
assertEquals(sourceHeaders.size(), filteredDataset.columns().length);
}
@Test
public void filterSourceColumnsBySchemaSomeMatchesTest() {
final List<ColumnInsight> trimmedColumnInsights = schema.getColumns().stream().map(ColumnInsight::new)
.filter(columnInsight -> columnInsight.getType() == ColumnType.CLEARTEXT)
.collect(Collectors.toList());
final Dataset<Row> filteredDataset = SparkMarshaller.filterSourceColumnsBySchema(dataset, trimmedColumnInsights);
assertNotEquals(trimmedColumnInsights.size(), columnInsights.size());
assertEquals(trimmedColumnInsights.size(), filteredDataset.columns().length);
}
@Test
public void updateMaxValuesPerColumnTest() {
SparkMarshaller.updateMaxValuesPerColumn(dataset, columnInsights);
final Map<String, ColumnInsight> targetToColumnInsight = columnInsights.stream()
.collect(Collectors.toMap(insight -> insight.getTargetHeader().toString(), insight -> insight));
// Assert all have a value set since there are no empty columns
assertFalse(columnInsights.stream().anyMatch(insight -> insight.getMaxValueLength() <= 0));
final int longestFirstnameByteLength = 5;
final int longestPhonenumberByteLength = 12;
final int longestNoteValueByteLength = 60;
if (!FileUtil.isWindows()) {
// Spot check our lengths ONLY on *nix system CI. On Windows the Java string literals appearing in the
// source code can end up getting encoded non-UTF8 initially, which then can muck with the length in
// annoying ways that just make the tests harder to write in a cross-platform way.
// NOTE: This concern is only relevant for the spot checks where we check string literal lengths
// (which feature Java string literals), because the actual application and tests run on external
// file data are operating only on bytes parsed in as UTF8 from a file REGARDLESS of the OS.
assertEquals(longestFirstnameByteLength, "Shana".getBytes(StandardCharsets.UTF_8).length);
assertEquals(longestPhonenumberByteLength, "407-555-8888".getBytes(StandardCharsets.UTF_8).length);
// Importantly, the longest `Notes` string has a unicode character `é` (U+00E9) that takes two bytes
// in UTF8 (0xC3 0xA9), and so relying on non-UTF8-byte-length notions of a string value's "length"
// can lead to errors on UTF8 data containing such values.
assertEquals(
longestNoteValueByteLength,
"This is a really long noté that could really be a paragraph"
.getBytes(StandardCharsets.UTF_8).length);
}
assertEquals(longestFirstnameByteLength, targetToColumnInsight.get("firstname").getMaxValueLength());
assertEquals(longestPhonenumberByteLength, targetToColumnInsight.get("phonenumber_cleartext").getMaxValueLength());
assertEquals(longestPhonenumberByteLength, targetToColumnInsight.get("phonenumber_sealed").getMaxValueLength());
assertEquals(longestNoteValueByteLength, targetToColumnInsight.get("notes").getMaxValueLength());
}
@Test
public void updateMaxValuesPerColumnEmptyDatasetTest() {
final TableSchema schema = GsonUtil.fromJson(FileUtil.readBytes("../samples/schema/6column.json"), TableSchema.class);
final List<ColumnInsight> columnInsights = schema.getColumns().stream().map(ColumnInsight::new)
.collect(Collectors.toList());
final Dataset<Row> emptyDataset = readDataset("../samples/csv/null5by6.csv", schema.getPositionalColumnHeaders());
SparkMarshaller.updateMaxValuesPerColumn(emptyDataset, columnInsights);
// Assert all have a max length of 0
assertFalse(columnInsights.stream().anyMatch(insight -> insight.getMaxValueLength() != 0));
}
@Test
public void updateMaxValuesPerColumnMaliciousDatasetTest() {
final StructField maliciousColumn = DataTypes.createStructField("; DROP ALL TABLES;", DataTypes.StringType, true);
final StructType maliciousSchema = dataset.schema().add(maliciousColumn);
final Dataset<Row> maliciousDataset = session.createDataFrame(dataset.collectAsList(), maliciousSchema);
// Assert that a malicious column header will fail
assertThrows(C3rIllegalArgumentException.class,
() -> SparkMarshaller.updateMaxValuesPerColumn(maliciousDataset, columnInsights));
}
@Test
public void validateDuplicatesAllowDuplicatesTrueTest() {
final ColumnSchema lastNameSchema = ColumnSchema.builder()
.sourceHeader(new ColumnHeader("lastname")) // Column has duplicate last names
.type(ColumnType.FINGERPRINT).build();
final TableSchema schema = new MappedTableSchema(List.of(lastNameSchema));
final List<ColumnInsight> lastNameInsights = schema.getColumns().stream().map(ColumnInsight::new)
.collect(Collectors.toList());
assertDoesNotThrow(() -> SparkMarshaller.validateDuplicates(ClientSettings.lowAssuranceMode(), dataset, lastNameInsights));
}
@Test
public void validateDuplicatesAllowDuplicatesFalseNonFingerprintColumnTest() {
final ColumnSchema lastNameSchema = ColumnSchema.builder()
.sourceHeader(new ColumnHeader("lastname")) // Column has duplicate last names
.type(ColumnType.CLEARTEXT).build();
final TableSchema schema = new MappedTableSchema(List.of(lastNameSchema));
final List<ColumnInsight> lastNameInsights = schema.getColumns().stream().map(ColumnInsight::new)
.collect(Collectors.toList());
assertDoesNotThrow(() -> SparkMarshaller.validateDuplicates(ClientSettings.highAssuranceMode(), dataset, lastNameInsights));
}
@Test
public void validateDuplicatesAllowDuplicatesFalseDuplicateValuesTest() {
final ColumnSchema lastNameSchema = ColumnSchema.builder()
.sourceHeader(new ColumnHeader("lastname")) // Column has duplicate last names
.type(ColumnType.FINGERPRINT).build();
final TableSchema schema = new MappedTableSchema(List.of(lastNameSchema));
final List<ColumnInsight> lastNameInsights = schema.getColumns().stream().map(ColumnInsight::new)
.collect(Collectors.toList());
assertThrows(C3rRuntimeException.class, () -> SparkMarshaller.validateDuplicates(ClientSettings.highAssuranceMode(), dataset,
lastNameInsights));
}
@Test
public void validateDuplicatesAllowDuplicatesFalseDuplicateNullsTest() {
final ColumnSchema lastNameSchema = ColumnSchema.builder()
.sourceHeader(new ColumnHeader("notes")) // Column has duplicate nulls
.type(ColumnType.FINGERPRINT).build();
final TableSchema schema = new MappedTableSchema(List.of(lastNameSchema));
final List<ColumnInsight> lastNameInsights = schema.getColumns().stream().map(ColumnInsight::new)
.collect(Collectors.toList());
assertThrows(C3rRuntimeException.class, () -> SparkMarshaller.validateDuplicates(ClientSettings.highAssuranceMode(), dataset,
lastNameInsights));
}
@Test
public void mapSourceToTargetColumnsTest() {
final Set<String> sourceColumns = columnInsights.stream()
.map(columnInsight -> columnInsight.getSourceHeader().toString())
.collect(Collectors.toSet());
final Set<String> datasetColumns = Arrays.stream(dataset.columns()).map(String::toLowerCase).collect(Collectors.toSet());
// assert initial state
assertEquals(sourceColumns, datasetColumns);
final Dataset<Row> mappedDataset = SparkMarshaller.mapSourceToTargetColumns(dataset, columnInsights);
final Set<String> targetColumns = columnInsights.stream()
.map(columnInsight -> columnInsight.getTargetHeader().toString())
.collect(Collectors.toSet());
// assert there are differences
assertNotEquals(sourceColumns, targetColumns);
final Set<String> mappedDatasetColumns =
Arrays.stream(mappedDataset.columns()).map(String::toLowerCase).collect(Collectors.toSet());
//assert final state
assertEquals(targetColumns, mappedDatasetColumns);
}
@Test
public void mapSourceToTargetColumnsSqlHeaderMaxLengthTest() {
final ColumnHeader maxLengthSqlHeader = new ColumnHeader("a".repeat(Limits.AWS_CLEAN_ROOMS_HEADER_MAX_LENGTH));
final ColumnSchema lastNameSchema =
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("lastname")) // Column has duplicate last names
.targetHeader(maxLengthSqlHeader)
.type(ColumnType.FINGERPRINT).build();
final TableSchema schema = new MappedTableSchema(List.of(lastNameSchema));
final List<ColumnInsight> lastNameInsights = schema.getColumns().stream().map(ColumnInsight::new)
.collect(Collectors.toList());
final Dataset<Row> mappedDataset = SparkMarshaller.mapSourceToTargetColumns(dataset, lastNameInsights);
// Ensure that SparkSQL handles the longest headers that are permitted and doesn't introduce shorter limits.
assertEquals(maxLengthSqlHeader.toString(), mappedDataset.columns()[0]);
}
@Test
public void mapSourceToTargetColumnsMaliciousDatasetTest() {
final StructField maliciousColumn = DataTypes.createStructField("; DROP ALL TABLES;", DataTypes.StringType, true);
final StructType maliciousSchema = dataset.schema().add(maliciousColumn);
final Dataset<Row> maliciousDataset = session.createDataFrame(dataset.collectAsList(), maliciousSchema);
final Dataset<Row> mappedDataset = SparkMarshaller.mapSourceToTargetColumns(maliciousDataset, columnInsights);
// Assert final state does not contain malicious column
assertFalse(mappedDataset.schema().toList().contains(maliciousColumn));
final Set<String> mappedDatasetColumns =
Arrays.stream(mappedDataset.columns()).map(String::toLowerCase).collect(Collectors.toSet());
final Set<String> targetColumns = columnInsights.stream()
.map(columnInsight -> columnInsight.getTargetHeader().toString())
.collect(Collectors.toSet());
// Assert remaining columns were unaffected
assertEquals(targetColumns, mappedDatasetColumns);
}
@Test
public void populateColumnPositionsTest() {
final Dataset<Row> mappedDataset = SparkMarshaller.mapSourceToTargetColumns(dataset, columnInsights);
SparkMarshaller.populateColumnPositions(mappedDataset, columnInsights);
final Map<String, ColumnInsight> targetToColumnInsight = columnInsights.stream()
.collect(Collectors.toMap(insight -> insight.getTargetHeader().toString(), insight -> insight));
// Assert all have a value set since there are no empty columns
assertFalse(columnInsights.stream().anyMatch(insight -> insight.getSourceColumnPosition() < 0));
// Spot checks
assertEquals(0, targetToColumnInsight.get("firstname").getSourceColumnPosition());
assertEquals(5, targetToColumnInsight.get("phonenumber_cleartext").getSourceColumnPosition());
assertEquals(6, targetToColumnInsight.get("phonenumber_sealed").getSourceColumnPosition());
assertEquals(10, targetToColumnInsight.get("notes").getSourceColumnPosition());
}
@Test
public void shuffleDataTest() {
final List<Row> shuffledData = SparkMarshaller.shuffleData(dataset).collectAsList();
final List<Row> originalData = dataset.collectAsList();
assertTrue(shuffledData.containsAll(originalData));
assertTrue(originalData.containsAll(shuffledData));
// It's possible the shuffling resulted in some rows in the same place given a short test file, but unlikely that several rows
// got the same position.
assertTrue(shuffledData.get(0) != originalData.get(0)
|| shuffledData.get(1) != originalData.get(1)
|| shuffledData.get(2) != originalData.get(2)
|| shuffledData.get(3) != originalData.get(3)
|| shuffledData.get(4) != originalData.get(4)
|| shuffledData.get(5) != originalData.get(5)
|| shuffledData.get(6) != originalData.get(6));
}
@Test
public void marshalDataTest() {
final Dataset<Row> mappedDataset = SparkMarshaller.mapSourceToTargetColumns(dataset, columnInsights);
SparkMarshaller.populateColumnPositions(mappedDataset, columnInsights);
final List<Row> marshalledData = SparkMarshaller.marshalData(mappedDataset, config, columnInsights).collectAsList();
final List<Row> mappedDataList = mappedDataset.collectAsList();
// Marshalling doesn't shuffle, so we can compare each row
compareValues(mappedDataList, marshalledData, columnInsights);
}
@Test
public void marshalDataParquetUnencryptedMixedTypesTest() {
final Dataset<Row> mixedDataset = SparkParquetReader
.readInput(session, "../samples/parquet/data_sample_with_non_string_types.parquet");
// assert there is indeed a non-String type
assertTrue(mixedDataset.schema().toList().filter(struct -> struct.dataType() != DataTypes.StringType).size() > 0);
final Dataset<Row> mappedDataset = SparkMarshaller.mapSourceToTargetColumns(mixedDataset, columnInsights);
SparkMarshaller.populateColumnPositions(mappedDataset, columnInsights);
final List<Row> marshalledData = SparkMarshaller.marshalData(mappedDataset, config, columnInsights).collectAsList();
final List<Row> mappedDataList = mappedDataset.collectAsList();
// Marshalling doesn't shuffle, so we can compare each row
compareValues(mappedDataList, marshalledData, columnInsights);
}
@Test
public void marshalDataParquetEncryptedMixedTypesTest() {
final Dataset<Row> mixedDataset = SparkParquetReader
.readInput(session, "../samples/parquet/data_sample_with_non_string_types.parquet", /* skipHeaderNormalization */ false,
ParquetConfig.DEFAULT);
// assert there is indeed a non-String type
assertTrue(mixedDataset.schema().toList().filter(struct -> struct.dataType() != DataTypes.StringType).size() > 0);
final ColumnSchema levelSchema = ColumnSchema.builder()
.sourceHeader(new ColumnHeader("level")) // Column is classified as an int
.type(ColumnType.FINGERPRINT).build();
final TableSchema schema = new MappedTableSchema(List.of(levelSchema));
final List<ColumnInsight> levelInsights = schema.getColumns().stream().map(ColumnInsight::new)
.collect(Collectors.toList());
final Dataset<Row> mappedDataset = SparkMarshaller.mapSourceToTargetColumns(mixedDataset, levelInsights);
SparkMarshaller.populateColumnPositions(mappedDataset, levelInsights);
assertThrows(SparkException.class, () -> SparkMarshaller.marshalData(mappedDataset, config, levelInsights).collectAsList());
}
@Test
public void encryptTest() {
final List<Row> encryptedData = SparkMarshaller.encrypt(dataset, config).collectAsList();
final Dataset<Row> mappedDataset = SparkMarshaller.mapSourceToTargetColumns(dataset, columnInsights);
SparkMarshaller.populateColumnPositions(mappedDataset, columnInsights);
final List<Row> mappedDataList = mappedDataset.collectAsList();
// Marshalling shuffles, so we need to sort before we can compare each row
encryptedData.sort((d1, d2) -> {
return d2.getString(0).compareTo(d1.getString(0)); //compare on first names
});
mappedDataList.sort((d1, d2) -> {
return d2.getString(0).compareTo(d1.getString(0)); //compare on first names
});
compareValues(mappedDataList, encryptedData, columnInsights);
}
private void compareValues(final List<Row> expected, final List<Row> actual, final List<ColumnInsight> columnInsights) {
assertEquals(expected.size(), actual.size());
final List<Integer> ciphertextCols =
columnInsights.stream().filter(columnInsight -> columnInsight.getType() != ColumnType.CLEARTEXT)
.map(ColumnInsight::getSourceColumnPosition)
.collect(Collectors.toList());
final List<Integer> cleartextCols = columnInsights.stream().filter(columnInsight -> columnInsight.getType() == ColumnType.CLEARTEXT)
.map(ColumnInsight::getSourceColumnPosition)
.collect(Collectors.toList());
for (int i = 0; i < actual.size(); i++) {
for (Integer ciphertextPos : ciphertextCols) {
if (expected.get(i).get(ciphertextPos) == null) {
assertNull(actual.get(i).get(ciphertextPos));
} else {
assertNotEquals(expected.get(i).get(ciphertextPos),
actual.get(i).get(ciphertextPos));
}
}
for (Integer cleartextPos : cleartextCols) {
assertEquals(expected.get(i).get(cleartextPos),
actual.get(i).get(cleartextPos));
}
}
}
}
| 2,427 |
0 | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/test/java/com/amazonaws/c3r/spark/cleanrooms/CleanRoomsDaoTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cleanrooms;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import org.mockito.stubbing.Answer;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.when;
public final class CleanRoomsDaoTestUtility {
/**
* Hidden utility class constructor.
*/
private CleanRoomsDaoTestUtility() {
}
public static CleanRoomsDao generateMockDao() {
final CleanRoomsDao mockCleanRoomsDao = org.mockito.Mockito.mock(CleanRoomsDao.class);
when(mockCleanRoomsDao.withProfile(any())).thenAnswer((Answer<CleanRoomsDao>) (invocation) -> {
when(mockCleanRoomsDao.getProfile()).thenReturn(invocation.getArgument(0));
return mockCleanRoomsDao;
});
when(mockCleanRoomsDao.withRegion(any())).thenAnswer((Answer<CleanRoomsDao>) (invocation) -> {
when(mockCleanRoomsDao.getRegion()).thenReturn(invocation.getArgument(0));
return mockCleanRoomsDao;
});
when(mockCleanRoomsDao.getRegion()).thenCallRealMethod();
return mockCleanRoomsDao;
}
}
| 2,428 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/config/SparkEncryptConfig.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.config;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import lombok.Builder;
import lombok.Getter;
import lombok.NonNull;
import javax.crypto.SecretKey;
/**
* Information needed when encrypting a data file.
*/
@Getter
public final class SparkEncryptConfig extends SparkConfig {
/**
* Clean room cryptographic settings.
*/
private final ClientSettings settings;
/**
* How the data in the input file maps to data in the output file.
*/
private final TableSchema tableSchema;
/**
* Set up configuration that will be used for encrypting data.
*
* @param secretKey Clean room key used to generate sub-keys for HMAC and encryption
* @param source Location of input data
* @param fileFormat Format of input data
* @param targetDir Where output should be saved
* @param overwrite Whether to overwrite the target file if it exists already
* @param csvInputNullValue What value should be interpreted as {@code null} for CSV files
* @param csvOutputNullValue What value should be saved in output to represent {@code null} values for CSV
* @param salt Salt that can be publicly known but adds to randomness of cryptographic operations
* @param settings Clean room cryptographic settings
* @param tableSchema How data in the input file maps to data in the output file
*/
@Builder
private SparkEncryptConfig(@NonNull final SecretKey secretKey,
@NonNull final String source,
final FileFormat fileFormat,
final String targetDir,
final boolean overwrite,
final String csvInputNullValue,
final String csvOutputNullValue,
@NonNull final String salt,
@NonNull final ClientSettings settings,
@NonNull final TableSchema tableSchema) {
super(secretKey, source, fileFormat, targetDir, overwrite, csvInputNullValue,
csvOutputNullValue, salt);
this.settings = settings;
this.tableSchema = tableSchema;
validate();
}
/**
* Verifies that settings are consistent.
* - Make sure the program can write to the temporary file directory
* - If the clean room doesn't allow cleartext columns, verify none are in the schema
*
* @throws C3rIllegalArgumentException If any of the rules are violated
*/
private void validate() {
TableSchema.validateSchemaAgainstClientSettings(tableSchema, settings);
}
}
| 2,429 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/config/SparkDecryptConfig.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.config;
import com.amazonaws.c3r.io.FileFormat;
import lombok.Builder;
import lombok.Getter;
import lombok.NonNull;
import javax.crypto.SecretKey;
/**
* Information needed when decrypting a data file.
*/
@Getter
public final class SparkDecryptConfig extends SparkConfig {
/**
* Whether to throw an error if a Fingerprint column is seen in the data.
*/
private final boolean failOnFingerprintColumns;
/**
* Set up configuration that will be used for decrypting data.
*
* @param secretKey Clean room key used to generate sub-keys for HMAC and encryption
* @param source Location of input data
* @param fileFormat Format of input data
* @param targetDir Where output should be saved
* @param overwrite Whether to overwrite the target file if it exists already
* @param csvInputNullValue What value should be interpreted as {@code null} for CSV files
* @param csvOutputNullValue What value should be saved in output to represent {@code null} values for CSV
* @param salt Salt that can be publicly known but adds to randomness of cryptographic operations
* @param failOnFingerprintColumns Whether to throw an error if a Fingerprint column is seen in the data
*/
@Builder
private SparkDecryptConfig(@NonNull final SecretKey secretKey,
@NonNull final String source,
final FileFormat fileFormat,
final String targetDir,
final boolean overwrite,
final String csvInputNullValue,
final String csvOutputNullValue,
@NonNull final String salt,
final boolean failOnFingerprintColumns) {
super(secretKey, source, fileFormat, targetDir, overwrite, csvInputNullValue,
csvOutputNullValue, salt);
this.failOnFingerprintColumns = failOnFingerprintColumns;
}
}
| 2,430 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/config/SparkConfig.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.config;
import com.amazonaws.c3r.config.Config;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.utils.FileUtil;
import lombok.NonNull;
import javax.crypto.SecretKey;
import java.nio.file.Path;
/**
* Basic information needed whether encrypting or decrypting data for Spark types.
*/
public class SparkConfig extends Config {
/**
* {@link org.apache.spark.sql.util.CaseInsensitiveStringMap} key for whether header normalization
* is skipped.
*/
public static final String PROPERTY_KEY_SKIP_HEADER_NORMALIZATION = "skipHeaderNormalization";
/**
* Basic configuration information needed for encrypting or decrypting data.
*
* @param secretKey Clean room key used to generate sub-keys for HMAC and encryption
* @param source Location of input data
* @param fileFormat Format of input data
* @param targetDir Where output should be saved
* @param overwrite Whether to overwrite the target file if it exists already
* @param csvInputNullValue What value should be interpreted as {@code null} for CSV files
* @param csvOutputNullValue What value should be saved in output to represent {@code null} values for CSV
* @param salt Salt that can be publicly known but adds to randomness of cryptographic operations
*/
protected SparkConfig(@NonNull final SecretKey secretKey, @NonNull final String source, final FileFormat fileFormat,
final String targetDir, final boolean overwrite, final String csvInputNullValue,
final String csvOutputNullValue, @NonNull final String salt) {
super(secretKey, source, fileFormat, (targetDir == null ? "output" : targetDir),
overwrite, csvInputNullValue, csvOutputNullValue, salt);
validate();
}
/**
* Verifies
/**
* Verifies that settings are consistent.
* - Make sure the program can write to the target directory
* - Make sure the source can be read from
* - If the input is a file, make sure the source file format is supported
* - If the input is a directory, make sure a file format was specified
* - If the source file format is not CSV, ensure no CSV configuration parameters are set
*
* @throws C3rIllegalArgumentException If any of the rules are violated
*/
private void validate() {
FileUtil.verifyWritableDirectory(getTargetFile(), isOverwrite());
final Path source = Path.of(getSourceFile());
if (source.toFile().isFile()) {
FileUtil.verifyReadableFile(getSourceFile());
if (getFileFormat() == null) {
throw new C3rIllegalArgumentException("Unknown file extension: please specify the file format for file "
+ getSourceFile() + ".");
}
} else {
FileUtil.verifyReadableDirectory(getSourceFile());
if (getFileFormat() == null) {
throw new C3rIllegalArgumentException("An input file format must be selected if providing a source directory.");
}
}
if (getFileFormat() != FileFormat.CSV) {
if (getCsvInputNullValue() != null || getCsvOutputNullValue() != null) {
throw new C3rIllegalArgumentException("CSV options specified for " + getFileFormat() + " file.");
}
}
}
}
| 2,431 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/config/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Classes that contain all the information needed to perform cryptographic computations on input data.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.spark.config; | 2,432 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/schema/SchemaGeneratorUtils.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.schema;
import com.amazonaws.c3r.config.ColumnHeader;
/**
* Common utility functions used by schema generators.
*/
public final class SchemaGeneratorUtils {
/** Hidden utility constructor. */
private SchemaGeneratorUtils() {
}
/**
* Returns a string for user-facing messages which references the specified column.
* I.e., either {@code "column `COLUMN_NAME`"} or {@code "column COLUMN_1BASED_INDEX"}
*
* @param columnHeader The column header (if one exists)
* @param columnIndex The column's 0-based index
* @return A reference string for user facing I/O
*/
public static String columnReference(final ColumnHeader columnHeader, final int columnIndex) {
if (columnHeader != null) {
return "column `" + columnHeader + "`";
} else {
return ColumnHeader.of(columnIndex).toString();
}
}
/**
* Returns a user-facing warning message stating the specified column cannot be encrypted in any way.
*
* @param columnHeader The column header (if one exists)
* @param columnIndex The column's 0-based index
* @return A warning string user facing I/O
*/
public static String unsupportedTypeWarning(final ColumnHeader columnHeader, final int columnIndex) {
final String columnName = columnReference(columnHeader, columnIndex);
return "WARNING: " + columnName + " contains non-string data and cannot be\n" +
" used for cryptographic computing. Any target column(s) generated\n" +
" from this column will be cleartext.";
}
/**
* Returns a user-facing message stating the specified column cannot be encrypted in any way AND is being skipped.
*
* @param columnHeader The column header (if one exists)
* @param columnIndex The column's 0-based index
* @return A warning string user facing I/O
*/
public static String unsupportedTypeSkippingColumnWarning(final ColumnHeader columnHeader, final int columnIndex) {
final String columnName = columnReference(columnHeader, columnIndex);
final var sb = new StringBuilder();
sb.append("WARNING: " + columnName + " contains non-string data and cannot be\n");
sb.append(" used for cryptographic computing. This column is being skipped\n");
sb.append(" because the collaboration does not permit cleartext columns.");
return sb.toString();
}
}
| 2,433 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/schema/ParquetSchemaGenerator.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.spark.io.parquet.SparkParquetReader;
import com.amazonaws.c3r.utils.FileUtil;
import lombok.Builder;
import lombok.NonNull;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import java.util.Arrays;
import java.util.stream.Collectors;
/**
* Used to generate a schema file for a specific Parquet file. User can ask for either a simple, autogenerated schema or be walked through
* the entire schema creation process.
*/
public final class ParquetSchemaGenerator extends SchemaGenerator {
/**
* Set up for schema generation and validate settings.
*
* @param inputParquetFile Parquet file to read header information from
* @param targetJsonFile Where to save the schema
* @param overwrite Whether the {@code targetJsonFile} should be overwritten (if it exists)
* @param clientSettings Collaboration's client settings if provided, else {@code null}
* @param sparkSession SparkSession to use for sampling the input schema
*/
@Builder
private ParquetSchemaGenerator(@NonNull final String inputParquetFile,
@NonNull final String targetJsonFile,
@NonNull final Boolean overwrite,
final ClientSettings clientSettings,
@NonNull final SparkSession sparkSession) {
super(inputParquetFile, targetJsonFile, overwrite, clientSettings);
FileUtil.verifyReadableFile(inputParquetFile);
final Dataset<Row> dataset = SparkParquetReader.readInput(sparkSession, inputParquetFile);
final String[] headers = dataset.columns();
sourceHeaders = Arrays.stream(headers).map(ColumnHeader::new).collect(Collectors.toList());
sourceColumnTypes = Arrays.stream(dataset.schema().fields())
.map(field -> {
if (field.dataType() == DataTypes.StringType) {
return ClientDataType.STRING;
} else {
return ClientDataType.UNKNOWN;
}
}).collect(Collectors.toList());
}
}
| 2,434 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/schema/TemplateSchemaGenerator.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.json.GsonUtil;
import com.google.gson.JsonArray;
import com.google.gson.JsonObject;
import lombok.Builder;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Collectors;
/**
* Used to create a simple schema without user input. Creates a one-to-one mapping in the output JSON file which the user can then edit to
* select the transform and padding types they would like.
*/
@Slf4j
public final class TemplateSchemaGenerator {
/**
* String for user-facing messaging showing column type options.
*/
private static final String ALL_COLUMN_TYPES = "[" +
Arrays.stream(ColumnType.values())
.map(ColumnType::toString)
.collect(Collectors.joining("|")) +
"]";
/**
* String for user-facing messaging showing column type options.
*/
private static final String ALL_COLUMN_TYPES_SANS_CLEARTEXT = "[" +
Arrays.stream(ColumnType.values())
.filter(c -> c != ColumnType.CLEARTEXT)
.map(ColumnType::toString)
.collect(Collectors.joining("|")) +
"]";
/**
* The contents to be printed for each pad in the output, along with instructions on how to use it.
*/
private static final JsonObject EXAMPLE_PAD;
static {
EXAMPLE_PAD = new JsonObject();
EXAMPLE_PAD.addProperty("COMMENT", "omit this pad entry unless column type is sealed");
EXAMPLE_PAD.addProperty("type", "[none|fixed|max]");
EXAMPLE_PAD.addProperty("length", "omit length property for type none, otherwise specify value in [0, 10000]");
}
/**
* Console output stream.
*/
private final PrintStream consoleOutput;
/**
* Names of the columns in the input data.
*/
private final List<ColumnHeader> headers;
/**
* Number of source columns.
*/
private final int sourceColumnCount;
/**
* Source column types (in the order they appear in the input file).
*/
private final List<ClientDataType> sourceColumnTypes;
/**
* Where to write the schema file.
*/
private final String targetJsonFile;
/**
* Options for column types based on ClientSettings (if provided).
*/
private final String columnTypeOptions;
/**
* Whether this schema can have cleartext columns.
*/
private final boolean allowCleartextColumns;
/**
* Initializes the automated schema generator.
*
* @param sourceHeaders List of column names in the input file
* @param sourceColumnTypes Source column types (in the order they appear in the input file)
* @param targetJsonFile Where to write the schema
* @param consoleOutput Connection to output stream (i.e., output for user)
* @param clientSettings Collaboration's client settings if provided, else {@code null}
* @throws C3rIllegalArgumentException If input sizes are inconsistent
*/
@Builder
private TemplateSchemaGenerator(final List<ColumnHeader> sourceHeaders,
@NonNull final List<ClientDataType> sourceColumnTypes,
@NonNull final String targetJsonFile,
final PrintStream consoleOutput,
final ClientSettings clientSettings) {
if (sourceHeaders != null && sourceHeaders.size() != sourceColumnTypes.size()) {
throw new C3rIllegalArgumentException("Template schema generator given "
+ sourceHeaders.size() + " headers and " + sourceColumnTypes.size() + " column data types.");
}
this.headers = sourceHeaders == null ? null : List.copyOf(sourceHeaders);
this.sourceColumnTypes = sourceColumnTypes;
this.sourceColumnCount = sourceColumnTypes.size();
this.targetJsonFile = targetJsonFile;
this.consoleOutput = (consoleOutput == null) ? new PrintStream(System.out, true, StandardCharsets.UTF_8)
: consoleOutput;
allowCleartextColumns = clientSettings == null || clientSettings.isAllowCleartext();
if (allowCleartextColumns) {
columnTypeOptions = ALL_COLUMN_TYPES;
} else {
columnTypeOptions = ALL_COLUMN_TYPES_SANS_CLEARTEXT;
}
}
/**
* Creates template column schemas from the provided (non-{@code null}) source {@code headers}.
*
* @return The generated template column schemas
*/
private JsonArray generateTemplateColumnSchemasFromSourceHeaders() {
final var columnSchemaArray = new JsonArray(headers.size());
for (int i = 0; i < sourceColumnCount; i++) {
final var header = headers.get(i);
final var entry = new JsonObject();
entry.addProperty("sourceHeader", header.toString());
entry.addProperty("targetHeader", header.toString());
if (sourceColumnTypes.get(i) != ClientDataType.UNKNOWN) {
entry.addProperty("type", columnTypeOptions);
entry.add("pad", EXAMPLE_PAD);
} else if (allowCleartextColumns) {
consoleOutput.println(SchemaGeneratorUtils.unsupportedTypeWarning(header, i));
entry.addProperty("type", ColumnType.CLEARTEXT.toString());
} else {
consoleOutput.println(SchemaGeneratorUtils.unsupportedTypeSkippingColumnWarning(header, i));
continue;
}
columnSchemaArray.add(entry);
}
return columnSchemaArray;
}
/**
* Creates template column schemas for headerless source.
*
* @return The generated template column schemas
*/
private JsonArray generateTemplateColumnSchemasFromColumnCount() {
final var columnSchemaArray = new JsonArray(sourceColumnCount);
for (int i = 0; i < sourceColumnCount; i++) {
// Array template entry will go in
final var entryArray = new JsonArray(1);
// template entry
final var templateEntry = new JsonObject();
templateEntry.addProperty("targetHeader", ColumnHeader.of(i).toString());
if (sourceColumnTypes.get(i) != ClientDataType.UNKNOWN) {
templateEntry.addProperty("type", columnTypeOptions);
templateEntry.add("pad", EXAMPLE_PAD);
entryArray.add(templateEntry);
} else if (allowCleartextColumns) {
templateEntry.addProperty("type", ColumnType.CLEARTEXT.toString());
entryArray.add(templateEntry);
} else {
// If the column type does not support cryptographic computing and cleartext columns are not allowed,
// then we do not add a template entry to the array, and we warn the user this column has been skipped.
consoleOutput.println(SchemaGeneratorUtils.unsupportedTypeSkippingColumnWarning(null, i));
}
columnSchemaArray.add(entryArray);
}
return columnSchemaArray;
}
/**
* Generate a template schema. I.e., the type (see {@link ColumnType}) and padding
* (see {@link PadType}) are left with all possible options and must be manually edited.
*
* @throws C3rRuntimeException If unable to write to the target file
*/
public void run() {
final var schemaContent = new JsonObject();
if (headers != null) {
schemaContent.addProperty("headerRow", true);
schemaContent.add("columns", generateTemplateColumnSchemasFromSourceHeaders());
} else {
schemaContent.addProperty("headerRow", false);
schemaContent.add("columns", generateTemplateColumnSchemasFromColumnCount());
}
try (BufferedWriter writer = Files.newBufferedWriter(Path.of(targetJsonFile), StandardCharsets.UTF_8)) {
writer.write(GsonUtil.toJson(schemaContent));
} catch (IOException e) {
throw new C3rRuntimeException("Could not write to target schema file.", e);
}
log.info("Template schema written to {}.", targetJsonFile);
log.info("Schema requires manual modification before use:");
log.info(" * Types for each column must be selected.");
log.info(" * Pad entry must be modified for each sealed column and removed for other column types.");
log.info("Resulting schema must be valid JSON (e.g., final entries in objects have no trailing comma, etc).");
}
}
| 2,435 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/schema/CsvSchemaGenerator.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.data.CsvValue;
import com.amazonaws.c3r.io.CsvRowReader;
import com.amazonaws.c3r.utils.FileUtil;
import lombok.Builder;
import lombok.Getter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import java.util.Collections;
/**
* Used to generate a schema file for a specific CSV file. User can ask for either a simple, autogenerated schema or be walked through
* the entire schema creation process.
*/
@Slf4j
public final class CsvSchemaGenerator extends SchemaGenerator {
/**
* How many columns are in the source file.
*/
@Getter
private final int sourceColumnCount;
/**
* CSV file to generate a schema for.
*/
private final String inputCsvFile;
/**
* Schema file location.
*/
private final String targetJsonFile;
/**
* Set up for schema generation and validate settings.
*
* @param inputCsvFile CSV file to read header information from
* @param targetJsonFile Where to save the schema
* @param overwrite If the {@code targetJsonFile} should be overwritten if it exists
* @param hasHeaders Does the first source row contain column headers?
* @param clientSettings Collaboration's client settings if provided, else {@code null}
*/
@Builder
private CsvSchemaGenerator(@NonNull final String inputCsvFile,
@NonNull final String targetJsonFile,
@NonNull final Boolean overwrite,
@NonNull final Boolean hasHeaders,
final ClientSettings clientSettings) {
super(inputCsvFile, targetJsonFile, overwrite, clientSettings);
this.inputCsvFile = inputCsvFile;
this.targetJsonFile = targetJsonFile;
FileUtil.initFileIfNotExists(targetJsonFile);
if (hasHeaders) {
final CsvRowReader reader = CsvRowReader.builder()
.sourceName(inputCsvFile)
.build();
sourceHeaders = reader.getHeaders();
sourceColumnCount = sourceHeaders.size();
reader.close();
} else {
sourceColumnCount = CsvRowReader.getCsvColumnCount(inputCsvFile, null);
sourceHeaders = null;
}
this.sourceColumnTypes = Collections.nCopies(sourceColumnCount, CsvValue.CLIENT_DATA_TYPE);
}
}
| 2,436 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/schema/SchemaGenerator.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.spark.cli.SchemaMode;
import com.amazonaws.c3r.utils.FileUtil;
import lombok.Getter;
import lombok.NonNull;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.util.List;
/**
* Helps generate a schema for a file with a supported data format.
*/
public abstract class SchemaGenerator {
/**
* The headers for the source file, or {@code null} if the file has none.
*/
@Getter
protected List<ColumnHeader> sourceHeaders;
/**
* Column types for the source file.
*/
@Getter
protected List<ClientDataType> sourceColumnTypes;
/**
* The file a schema will be generated for.
*/
private final String inputFile;
/**
* The location the generated schema will be stored.
*/
private final String targetJsonFile;
/**
* Clean room cryptographic settings.
*/
private final ClientSettings clientSettings;
/**
* Setup common schema generator component.
*
* @param inputFile Input data file for processing
* @param targetJsonFile Schema file mapping input to output file data
* @param overwrite Whether to overwrite the output file if it already exists
* @param clientSettings Collaboration settings if available, else {@code null}
*/
protected SchemaGenerator(@NonNull final String inputFile,
@NonNull final String targetJsonFile,
@NonNull final Boolean overwrite,
final ClientSettings clientSettings) {
this.inputFile = inputFile;
this.targetJsonFile = targetJsonFile;
validate(overwrite);
FileUtil.initFileIfNotExists(targetJsonFile);
this.clientSettings = clientSettings;
}
/**
* Verifies that input and target files have appropriate permissions.
*
* @param overwrite If the target JSON file can overwrite an existing file
*/
private void validate(final boolean overwrite) {
FileUtil.verifyReadableFile(inputFile);
FileUtil.verifyWritableFile(targetJsonFile, overwrite);
}
/**
* Generate a schema file.
*
* @param subMode How the schema file should be generated
* @throws C3rIllegalArgumentException If the schema generation mode is invalid
*/
public void generateSchema(final SchemaMode.SubMode subMode) {
// CHECKSTYLE:OFF
System.out.println();
System.out.println("A schema file will be generated for file " + inputFile + ".");
// CHECKSTYLE:ON
if (subMode.isInteractiveMode()) {
InteractiveSchemaGenerator.builder()
.sourceHeaders(getSourceHeaders())
.sourceColumnTypes(getSourceColumnTypes())
.targetJsonFile(targetJsonFile)
.consoleInput(new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8)))
.consoleOutput(System.out)
.clientSettings(clientSettings)
.build()
.run();
} else if (subMode.isTemplateMode()) {
TemplateSchemaGenerator.builder()
.sourceHeaders(getSourceHeaders())
.sourceColumnTypes(getSourceColumnTypes())
.targetJsonFile(targetJsonFile)
.clientSettings(clientSettings)
.build()
.run();
} else {
throw new C3rIllegalArgumentException("Schema generation mode must be interactive or template.");
}
}
} | 2,437 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/schema/InteractiveSchemaGenerator.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.schema;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.Pad;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.config.PositionalTableSchema;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Limits;
import com.amazonaws.c3r.internal.PadUtil;
import com.amazonaws.c3r.json.GsonUtil;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import lombok.Builder;
import lombok.NonNull;
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.Supplier;
import java.util.stream.Collectors;
/**
* Walks the user through creating a customized schema for their data.
*/
public final class InteractiveSchemaGenerator {
/**
* Column schemas generated by user input are stored here in a list of lists
* like a {@link PositionalTableSchema} during processing. When the user is done providing input,
* this list of lists is either used to create a {@link PositionalTableSchema} or is flattened
* and used to create a {@link MappedTableSchema}, depending on the type of schema being generated.
*
* <p>
* NOTE: The length of this list during user-interaction directly corresponds to the 0-based
* index of the source column schemas being generated at that moment.
*/
private final List<List<ColumnSchema>> generatedColumnSchemas;
/**
* Column header names already used for target column headers to prevent duplicates.
*/
private final Set<ColumnHeader> usedColumnHeaders;
/**
* Source headers from the input file or {@code null} if the file has no headers.
*/
private final List<ColumnHeader> headers;
/**
* Number of source columns.
*/
private final int sourceColumnCount;
/**
* Source column types (in the order they appear in the input file).
*/
private final List<ClientDataType> sourceColumnTypes;
/**
* Number of columns that cannot be supported (e.g., if the column cannot be
* encrypted and cleartext columns are disallowed).
*/
private int unsupportedTypeColumnCount;
/**
* JSON file schema will be written to.
*/
private final String targetJsonFile;
/**
* Console input from user.
*/
private final BufferedReader consoleInput;
/**
* Console output stream.
*/
private final PrintStream consoleOutput;
/**
* Whether cleartext columns possible for this schema.
*/
private final boolean allowCleartextColumns;
/**
* Sets up the schema generator to run in interactive mode. Makes I/O connections to console, processes header information and
* initializes preprocessing state.
*
* @param sourceHeaders Column names in data file if they exist, otherwise {@code null}
* @param sourceColumnTypes The column types in the file in the order they appear
* @param targetJsonFile Where schema should be written
* @param consoleInput Connection to input stream (i.e., input from user)
* @param consoleOutput Connection to output stream (i.e., output for user)
* @param clientSettings Collaboration's client settings if provided, else {@code null}
* @throws C3rIllegalArgumentException If input sizes are inconsistent
*/
@Builder
@SuppressFBWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"})
private InteractiveSchemaGenerator(final List<ColumnHeader> sourceHeaders,
@NonNull final List<ClientDataType> sourceColumnTypes,
@NonNull final String targetJsonFile,
final BufferedReader consoleInput,
final PrintStream consoleOutput,
final ClientSettings clientSettings) {
if (sourceHeaders != null && sourceHeaders.size() != sourceColumnTypes.size()) {
throw new C3rIllegalArgumentException("Interactive schema generator given " + sourceHeaders.size() + " headers and " +
sourceColumnTypes.size() + " column data types.");
}
this.headers = sourceHeaders == null ? null : List.copyOf(sourceHeaders);
this.sourceColumnTypes = sourceColumnTypes;
this.sourceColumnCount = sourceColumnTypes.size();
this.unsupportedTypeColumnCount = 0;
this.targetJsonFile = targetJsonFile;
this.consoleInput = (consoleInput == null)
? new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8))
: consoleInput;
this.consoleOutput = (consoleOutput == null) ? new PrintStream(System.out, true, StandardCharsets.UTF_8)
: consoleOutput;
this.allowCleartextColumns = clientSettings == null || clientSettings.isAllowCleartext();
generatedColumnSchemas = new ArrayList<>();
usedColumnHeaders = new HashSet<>();
}
/**
* Whether the source file has headers.
*
* @return {@code true} if the source file has headers, else {@code false}.
*/
private boolean hasHeaders() {
return headers != null;
}
/**
* Has the user create the schema and writes it to a file. Also does some validation on the created schema such as at least one output
* column was specified.
*
* @throws C3rRuntimeException If an I/O error occurs opening or creating the file
*/
public void run() {
if (!allowCleartextColumns) {
consoleOutput.println();
consoleOutput.println("NOTE: Cleartext columns are not permitted for this collaboration");
consoleOutput.println(" and will not be provided as an option in prompts.");
}
generateColumns();
final List<ColumnSchema> flattenedColumnSchemas = generatedColumnSchemas.stream()
.flatMap(List::stream)
.collect(Collectors.toList());
if (flattenedColumnSchemas.isEmpty()) {
if (unsupportedTypeColumnCount >= sourceColumnCount) {
consoleOutput.println("No source columns could be considered for output:");
consoleOutput.println(" all columns were of an unsupported type and the");
consoleOutput.println(" specified collaboration does not allow cleartext.");
} else {
consoleOutput.println("No target columns were specified.");
}
return;
}
final TableSchema schema;
if (hasHeaders()) {
schema = new MappedTableSchema(flattenedColumnSchemas);
} else {
schema = new PositionalTableSchema(generatedColumnSchemas);
}
try (BufferedWriter writer = Files.newBufferedWriter(Path.of(targetJsonFile), StandardCharsets.UTF_8)) {
writer.write(GsonUtil.toJson(schema));
} catch (IOException e) {
throw new C3rRuntimeException("Could not write to target schema file.", e);
}
consoleOutput.println("Schema written to " + targetJsonFile + ".");
}
/**
* The current source column index target columns are being generated from.
*
* @return The current positional zero-based source index.
*/
private int getCurrentSourceColumnPosition() {
return generatedColumnSchemas.size();
}
/**
* The current source column's client data type (how the data is represented).
*
* @return The client data type for the current source column.
*/
private ClientDataType getCurrentSourceColumnDataType() {
return sourceColumnTypes.get(getCurrentSourceColumnPosition());
}
/**
* Gets the next line of text from the user and converts it to lowercase.
*
* @return Normalized user input
* @throws C3rRuntimeException If there's an unexpected end of user input
*/
private String readNextLineLowercase() {
try {
final String nextLine = consoleInput.readLine();
if (nextLine == null) {
throw new C3rRuntimeException("Unexpected end of user input.");
}
return nextLine.toLowerCase();
} catch (IOException e) {
throw new C3rRuntimeException("Unexpected end of user input.", e);
}
}
/**
* Prompt the user for a non-negative integer value.
*
* @param baseUserPrompt User prompt, sans any default value or ending question mark
* @param defaultValue What is the default user response they can leverage by simply
* pressing `return` with no entered text. {@code defaultValue == null}
* implies there is no default value
* @param maxValue The maximum allowed value
* @return The user chosen value via the interaction, or {@code null} if no acceptable user input was found
*/
Integer promptNonNegativeInt(final String baseUserPrompt,
final Integer defaultValue,
final int maxValue) {
final var promptSB = new StringBuilder(baseUserPrompt);
if (defaultValue != null) {
promptSB.append(" (default `").append(defaultValue).append("`)");
}
promptSB.append("? ");
consoleOutput.print(promptSB);
final int num;
final String userInput = readNextLineLowercase();
try {
num = Integer.parseInt(userInput);
} catch (NumberFormatException e) {
if (userInput.isBlank()) {
if (defaultValue == null) {
consoleOutput.println("Expected an integer >= 0, but found no input.");
}
return defaultValue;
} else {
consoleOutput.println("Expected an integer >= 0, but found `" + userInput + "`.");
return null;
}
}
if (num < 0) {
consoleOutput.println("Expected an integer >= 0, but found " + num + ".");
return null;
} else if (num > maxValue) {
consoleOutput.println("Expected an integer >= 0 and < " + maxValue + ".");
return null;
}
return num;
}
/**
* Ask a user the {@code questionPrompt}, followed by a comma and [y]es or [n]o, and parse their response.
*
* @param questionPrompt What to print before `, [y]es or [n]o?`
* @param defaultAnswer A default answer for this prompt, or {@code null} if there is none.
* @return {@code true} if `yes`, {@code false} if `no`, {@code null} otherwise.
*/
Boolean promptYesOrNo(final String questionPrompt, final Boolean defaultAnswer) {
final var promptSB = new StringBuilder(questionPrompt).append(", [y]es or [n]o");
if (defaultAnswer != null) {
if (defaultAnswer) {
promptSB.append(" (default `yes`)");
} else {
promptSB.append(" (default `no`)");
}
}
promptSB.append("? ");
consoleOutput.print(promptSB);
final String userInput = readNextLineLowercase();
final Boolean answer;
if (userInput.isBlank()) {
if (defaultAnswer != null) {
answer = defaultAnswer;
} else {
consoleOutput.println("Expected [y]es or [n]o, but found no input.");
answer = null;
}
} else if ("yes".startsWith(userInput)) {
answer = true;
} else if ("no".startsWith(userInput)) {
answer = false;
} else {
consoleOutput.println("Expected [y]es or [n]o, but got `" + userInput + "`.");
answer = null;
}
return answer;
}
/**
* Attempt to read a ColumnType.
*
* @return The ColumnType if successful, or {@code null} if the input was invalid
*/
ColumnType promptColumnType() {
final ColumnType type;
if (allowCleartextColumns) {
consoleOutput.print("Target column type: [c]leartext, [f]ingerprint, or [s]ealed? ");
} else {
consoleOutput.print("Target column type: [f]ingerprint, or [s]ealed? ");
}
final String userInput = readNextLineLowercase();
if (userInput.isBlank()) {
consoleOutput.println("Expected a column type, but found no input.");
type = null;
} else if (allowCleartextColumns && "cleartext".startsWith(userInput)) {
type = ColumnType.CLEARTEXT;
} else if ("fingerprint".startsWith(userInput)) {
type = ColumnType.FINGERPRINT;
} else if ("sealed".startsWith(userInput)) {
type = ColumnType.SEALED;
} else {
consoleOutput.println("Expected a valid column type, but got `" + userInput + "`.");
type = null;
}
return type;
}
/**
* Repeat an action until it is non-{@code null}, e.g. for repeating requests for valid input.
*
* @param supplier Function that supplies the (eventually) non-null value.
* @param <T> The type of value to be returned by the supplier.
* @return The non-{@code null} value eventually returned by the supplier.
*/
static <T> T repeatUntilNotNull(final Supplier<T> supplier) {
T result = null;
while (result == null) {
result = supplier.get();
}
return result;
}
/**
* Suggest a suffix for the output column name based on the transform between input and output data selected.
*
* @param columnType The data transform type that will be used (see {@link ColumnType})
* @return The selected suffix for the column name
*/
String promptTargetHeaderSuffix(@NonNull final ColumnType columnType) {
final String suggestedSuffix;
switch (columnType) {
case SEALED:
suggestedSuffix = ColumnHeader.DEFAULT_SEALED_SUFFIX;
break;
case FINGERPRINT:
suggestedSuffix = ColumnHeader.DEFAULT_FINGERPRINT_SUFFIX;
break;
default:
// no suffix for cleartext columns
suggestedSuffix = null;
break;
}
final String suffix;
if (suggestedSuffix != null) {
final String prompt = "Add suffix `"
+ suggestedSuffix + "` to header to indicate how it was encrypted";
final boolean addSuffix = repeatUntilNotNull(() ->
promptYesOrNo(prompt, true));
suffix = addSuffix ? suggestedSuffix : null;
} else {
suffix = null;
}
return suffix;
}
/**
* Ask the user what they would like the column name in the output file to be. The default is the same as the input name. This is not
* yet suggesting a suffix be added based off of encryption type.
*
* @param sourceHeader Input column name
* @return Output column name
* @throws C3rRuntimeException If there's an unexpected end of user input
*/
private ColumnHeader promptTargetHeaderPreSuffix(final ColumnHeader sourceHeader) {
final String input;
final ColumnHeader targetHeader;
if (sourceHeader != null) {
consoleOutput.print("Target column header name (default `" + sourceHeader + "`)? ");
} else {
consoleOutput.print("Target column header name? ");
}
try {
// We intentionally do not use readNextLineLowercase() here so that we can check if the
// string was normalized and report it to the user for their awareness (see below).
input = consoleInput.readLine();
if (input != null && input.isBlank() && sourceHeader != null) {
consoleOutput.println("Using default name `" + sourceHeader + "`.");
targetHeader = sourceHeader;
} else {
targetHeader = new ColumnHeader(input);
}
} catch (C3rIllegalArgumentException e) {
consoleOutput.println("Expected a valid header name, but found a problem: " + e.getMessage());
return null;
} catch (IOException e) {
throw new C3rRuntimeException("Unexpected end of user input.", e);
}
if (!targetHeader.toString().equals(input) && targetHeader != sourceHeader) {
consoleOutput.println("Target header was normalized to `" + targetHeader + "`.");
}
return targetHeader;
}
/**
* Walks the user through the entire process of choosing an output column name, from the base name in
* {@link #promptTargetHeaderPreSuffix} to the suffix in {@link #promptTargetHeaderSuffix}.
*
* @param sourceHeader Name of the input column
* @param type Type of cryptographic transform being done
* @return Complete name for target column
*/
private ColumnHeader promptTargetHeaderAndSuffix(
final ColumnHeader sourceHeader,
@NonNull final ColumnType type) {
// Ask the user for a header name
final ColumnHeader targetHeader = promptTargetHeaderPreSuffix(sourceHeader);
if (targetHeader == null) {
return null;
}
// Check if the user wants a type-based suffix, if applicable.
final String suffix = promptTargetHeaderSuffix(type);
if (suffix != null) {
try {
return new ColumnHeader(targetHeader + suffix);
} catch (C3rIllegalArgumentException e) {
consoleOutput.println("Unable to add header suffix: " + e.getMessage());
return null;
}
} else {
return targetHeader;
}
}
/**
* Gets the desired output header and verifies it does not match a name already specified.
*
* @param sourceHeader Name of input column
* @param type Encryption transform selected
* @return Name of the output column
*/
ColumnHeader promptTargetHeader(final ColumnHeader sourceHeader,
@NonNull final ColumnType type) {
final ColumnHeader targetHeader = promptTargetHeaderAndSuffix(sourceHeader, type);
if (usedColumnHeaders.contains(targetHeader)) {
consoleOutput.println("Expected a unique target header, but `" + targetHeader + "` has already been used in this schema.");
return null;
} else {
usedColumnHeaders.add(targetHeader);
}
return targetHeader;
}
/**
* If the user chose {@link ColumnType#SEALED} as the transform type, ask what kind of data padding should be used, if any.
*
* @param targetHeader Output column name
* @param defaultType Default type of padding to use if the user doesn't specify an option
* @return Type of padding to use for output column
*/
PadType promptPadType(@NonNull final ColumnHeader targetHeader, final PadType defaultType) {
final PadType type;
consoleOutput.print("`" + targetHeader + "` padding type: [n]one, [f]ixed, or [m]ax");
if (defaultType != null) {
consoleOutput.print(" (default `" + defaultType.toString().toLowerCase() + "`)");
}
consoleOutput.print("? ");
final String userInput = readNextLineLowercase();
if (userInput.isBlank()) {
if (defaultType == null) {
consoleOutput.println("Expected a padding type, but found no input.");
}
type = defaultType;
} else if ("none".startsWith(userInput)) {
type = PadType.NONE;
} else if ("fixed".startsWith(userInput)) {
type = PadType.FIXED;
} else if ("max".startsWith(userInput)) {
type = PadType.MAX;
} else {
consoleOutput.println("Expected a valid padding type, but got `" + userInput + "`.");
type = null;
}
return type;
}
/**
* Get the type of padding to be used (see {@link PadType}) and length if the user chose {@link ColumnType#SEALED}.
*
* @param targetHeader Name of the output column
* @return Pad type and length
* @see PadType
* @see Pad
*/
Pad promptPad(@NonNull final ColumnHeader targetHeader) {
final PadType padType = repeatUntilNotNull(() ->
promptPadType(targetHeader, PadType.MAX)
);
if (padType == PadType.NONE) {
return Pad.DEFAULT;
}
final String basePrompt;
final Integer defaultLength;
if (padType == PadType.FIXED) {
defaultLength = null;
basePrompt = "Byte-length to pad cleartext to in `" + targetHeader + "`";
} else {
// padType == PadType.MAX
defaultLength = 0;
consoleOutput.println("All values in `" + targetHeader + "` will be padded to the byte-length of the");
consoleOutput.println("longest value plus a specified number of additional padding bytes.");
basePrompt = "How many additional padding bytes should be used";
}
final int length = repeatUntilNotNull(() ->
promptNonNegativeInt(basePrompt, defaultLength, PadUtil.MAX_PAD_BYTES)
);
return Pad.builder().type(padType).length(length).build();
}
/**
* Prompt for all column info to generate a target column.
*
* @param sourceHeader Source column target is derived from
* @param currentTargetColumnCount This is column `N` of {@code totalTargetColumnCount}
* being generated from {@code sourceHeader}
* @param totalTargetColumnCount Total number of columns being generated from {@code sourceHeader}.
* @return The user-provided column specification.
*/
ColumnSchema promptColumnInfo(final ColumnHeader sourceHeader,
final int currentTargetColumnCount,
final int totalTargetColumnCount) {
consoleOutput.println();
consoleOutput.print("Gathering information for target column ");
if (totalTargetColumnCount > 1) {
consoleOutput.print(currentTargetColumnCount + " of " + totalTargetColumnCount + " ");
}
final String columnRef = SchemaGeneratorUtils.columnReference(sourceHeader, getCurrentSourceColumnPosition());
consoleOutput.println("from source " + columnRef + ".");
final ClientDataType dataType = getCurrentSourceColumnDataType();
final ColumnType columnType;
if (dataType == ClientDataType.UNKNOWN) {
consoleOutput.println("Cryptographic computing is not supported for this column's data type.");
consoleOutput.println("This column's data will be cleartext.");
columnType = ColumnType.CLEARTEXT;
} else {
columnType = repeatUntilNotNull(this::promptColumnType);
}
final ColumnHeader targetHeader = repeatUntilNotNull(() -> promptTargetHeader(sourceHeader, columnType));
ColumnSchema.ColumnSchemaBuilder columnBuilder = ColumnSchema.builder()
.sourceHeader(sourceHeader)
.targetHeader(targetHeader)
.type(columnType);
if (columnType == ColumnType.SEALED) {
final Pad pad = repeatUntilNotNull(() -> promptPad(targetHeader));
columnBuilder = columnBuilder.pad(pad);
}
return columnBuilder.build();
}
/**
* Asks how many times this column will be mapped to output data. A one-to-one mapping is not assumed because multiple transform types
* may be used.
*
* @param sourceHeader Name of the input column
*/
void generateTargetColumns(final ColumnHeader sourceHeader) {
final String columnReference = SchemaGeneratorUtils.columnReference(sourceHeader, getCurrentSourceColumnPosition());
final int defaultTargetColumnCount = 1;
consoleOutput.println("\nExamining source " + columnReference + ".");
final boolean isSupportedType = getCurrentSourceColumnDataType() != ClientDataType.UNKNOWN;
final int targetColumnCount;
if (isSupportedType || allowCleartextColumns) {
if (!isSupportedType) {
// Warn that this column can only appear as cleartext
consoleOutput.println(SchemaGeneratorUtils.unsupportedTypeWarning(sourceHeader, getCurrentSourceColumnPosition()));
}
targetColumnCount = repeatUntilNotNull(() ->
promptNonNegativeInt(
"Number of target columns from source " + columnReference,
defaultTargetColumnCount,
Limits.ENCRYPTED_OUTPUT_COLUMN_COUNT_MAX));
} else {
// This column cannot even appear as cleartext because of collaboration settings,
// so warn that it will be skipped
consoleOutput.println(SchemaGeneratorUtils.unsupportedTypeSkippingColumnWarning(
sourceHeader,
getCurrentSourceColumnPosition()));
unsupportedTypeColumnCount++;
targetColumnCount = 0;
}
// schemas derived from the current source column are stored in this array
final var targetSchemasFromSourceColumn = new ArrayList<ColumnSchema>(targetColumnCount);
// 1-based indices since `i` is only used really to count and print user messages if `targetColumnCount > 1`
// and `1 of N` looks better than `0 of N-1` in printed messages.
for (int i = 1; i <= targetColumnCount; i++) {
targetSchemasFromSourceColumn.add(promptColumnInfo(sourceHeader, i, targetColumnCount));
}
generatedColumnSchemas.add(targetSchemasFromSourceColumn);
}
/**
* Ask the user how to map each input column to output data until all columns have been processed.
*/
private void generateColumns() {
if (headers != null) {
for (var header : headers) {
generateTargetColumns(header);
}
} else {
for (int i = 0; i < sourceColumnCount; i++) {
generateTargetColumns(null);
}
}
}
}
| 2,438 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/schema/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Utilities to create either a simple schema for how input data will be mapped to output data and a helper program for users
* who want to be walked through creating a complete schema for their data.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.spark.io.schema; | 2,439 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/SparkCsvReader.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.CsvRowReader;
import com.amazonaws.c3r.spark.config.SparkConfig;
import lombok.NonNull;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import java.io.File;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Utility class for Spark to read CSV files from disk.
*/
public abstract class SparkCsvReader {
/**
* Reads the input file for processing, normalizing headers.
*
* @param sparkSession The spark session to read with
* @param source Location of input data
* @param inputNullValue What should be interpreted as {@code null} in the input
* @param externalHeaders Strings to use as column header names if the file itself does not contain a header row
* @return The source data to be processed
*/
public static Dataset<Row> readInput(@NonNull final SparkSession sparkSession,
@NonNull final String source,
final String inputNullValue,
final List<ColumnHeader> externalHeaders) {
return readInput(sparkSession, source, inputNullValue, externalHeaders, false);
}
/**
* Reads the input file for processing.
*
* @param sparkSession The spark session to read with
* @param source Location of input data
* @param inputNullValue What should be interpreted as {@code null} in the input
* @param externalHeaders Strings to use as column header names if the file itself does not contain a header row
* @param skipHeaderNormalization Whether to skip the normalization of read in headers
* @return The source data to be processed
*/
public static Dataset<Row> readInput(@NonNull final SparkSession sparkSession,
@NonNull final String source,
final String inputNullValue,
final List<ColumnHeader> externalHeaders,
final boolean skipHeaderNormalization) {
final Map<String, String> options = new HashMap<>();
options.put("inputNullValue", inputNullValue);
options.put(SparkConfig.PROPERTY_KEY_SKIP_HEADER_NORMALIZATION, Boolean.toString(skipHeaderNormalization));
if (externalHeaders != null && !externalHeaders.isEmpty()) {
options.put("headers", externalHeaders.stream().map(ColumnHeader::toString).collect(Collectors.joining(",")));
}
return readFiles(sparkSession, source, options);
}
/**
* Reads the input for processing. If it is a directory, recurse over each file in the directory.
*
* @param sparkSession The spark session to read with
* @param source Location of input data
* @param options Configuration options
* @return The source data to be processed
* @throws C3rRuntimeException If the source cannot be read
*/
private static Dataset<Row> readFiles(@NonNull final SparkSession sparkSession,
@NonNull final String source,
final Map<String, String> options) {
final File sourceFile = Path.of(source).toFile();
if (sourceFile.isFile()) {
return readFile(sparkSession, source, options);
}
final File[] files = sourceFile.listFiles();
if (files == null) {
throw new C3rRuntimeException("Source could not be read at path " + sourceFile + ".");
}
Dataset<Row> dataset = null;
Set<String> columns = null;
for (File file : files) {
if (file.isDirectory()) {
continue; // Skip directories. Recursion not currently supported.
}
if (dataset == null) {
dataset = readFile(sparkSession, file.getAbsolutePath(), options);
columns = Set.of(dataset.columns());
} else {
final Dataset<Row> nextDataset = readFile(sparkSession, file.getAbsolutePath(), options);
final Set<String> nextDatasetColumns = Set.of(nextDataset.columns());
if (columns.size() != nextDatasetColumns.size() || !columns.containsAll(nextDatasetColumns)) {
// unionAll will merge data based on column position without further enforcement of schemas.
throw new C3rRuntimeException("Found mismatched columns between "
+ files[0].getAbsolutePath() + " and " + file.getAbsolutePath() + ".");
}
// We must use unionAll and not union because union filters on distinct rows.
dataset = dataset.unionAll(nextDataset);
}
}
return dataset;
}
/**
* Reads the input file for processing.
*
* @param sparkSession The spark session to read with
* @param sourceFile Location of input data
* @param options Configuration options
* @return The source data to be processed
*/
private static Dataset<Row> readFile(@NonNull final SparkSession sparkSession,
@NonNull final String sourceFile,
final Map<String, String> options) {
return sparkSession.read()
.options(options)
.format(Csv.class.getCanonicalName())
.load(sourceFile);
}
/**
* Constructs a CsvRowReader for parsing CSV files.
*
* @param properties A map of configuration settings
* @return a CsvRowReader for parsing CSV files
* @throws C3rRuntimeException if a path is not contained in the configuration settings
*/
public static CsvRowReader initReader(final Map<String, String> properties) {
if (!properties.containsKey("path")) {
throw new C3rRuntimeException("A `path` must be provided when reading.");
}
final boolean skipHeaderNormalization =
properties.getOrDefault(SparkConfig.PROPERTY_KEY_SKIP_HEADER_NORMALIZATION, "false")
.equalsIgnoreCase("true");
final String source = properties.get("path");
final String inputNullValue = properties.get("inputNullValue");
final List<ColumnHeader> externalHeaders = properties.get("headers") == null ?
null : Arrays.stream(properties.get("headers").split(","))
.map(ColumnHeader::new)
.collect(Collectors.toList());
final Charset fileCharset = properties.get("fileCharset") == null ?
StandardCharsets.UTF_8 : Charset.forName(properties.get("fileCharset"));
return CsvRowReader.builder().sourceName(source)
.inputNullValue(inputNullValue)
.externalHeaders(externalHeaders)
.fileCharset(fileCharset)
.skipHeaderNormalization(skipHeaderNormalization)
.build();
}
}
| 2,440 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/SparkCsvWriter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.CsvRowWriter;
import com.amazonaws.c3r.utils.FileUtil;
import lombok.NonNull;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Utility class for Spark to write CSV files to disk.
*/
public abstract class SparkCsvWriter {
/**
* Writes the Dataset to the root path.
*
* @param dataset The data to write to CSV
* @param targetName The target path to write to
* @param outputNullValue What should represent {@code null} in the output
*/
public static void writeOutput(@NonNull final Dataset<Row> dataset,
@NonNull final String targetName,
final String outputNullValue) {
final Map<String, String> options = new HashMap<>();
options.put("outputNullValue", outputNullValue);
final String headers = String.join(",", dataset.columns());
options.put("headers", headers);
options.put("sessionUuid", dataset.sparkSession().sessionUUID());
dataset.write().mode(SaveMode.Append).options(options).format(Csv.class.getCanonicalName())
.save(targetName);
}
/**
* Constructs a CsvRowWriter for writing CSV files.
*
* @param partitionId The partition being processed
* @param properties A map of configuration settings
* @return a CsvRowWriter for writing CSV files
* @throws C3rRuntimeException if a path is not contained in the configuration settings
*/
public static CsvRowWriter initWriter(final int partitionId, final Map<String, String> properties) {
if (!properties.containsKey("path")) {
throw new C3rRuntimeException("A `path` must be provided with the provided when writing.");
}
final Path path = Path.of(properties.get("path"));
FileUtil.initDirectoryIfNotExists(path.toString());
// Generate random file name matching normal Spark patterns.
final String sessionUuid = properties.get("sessionUuid");
final String formattedPartitionId = String.format("%05d", partitionId);
final String target = path.resolve("part-" + formattedPartitionId + "-" + sessionUuid + ".csv").toString();
FileUtil.initFileIfNotExists(target);
final String outputNullValue = properties.get("outputNullValue");
final List<ColumnHeader> headers = Arrays.stream(properties.get("headers").split(","))
.map(ColumnHeader::new)
.collect(Collectors.toList());
final Charset fileCharset = properties.get("fileCharset") == null ?
StandardCharsets.UTF_8 : Charset.forName(properties.get("fileCharset"));
return CsvRowWriter.builder().targetName(target)
.outputNullValue(outputNullValue)
.headers(headers)
.fileCharset(fileCharset)
.build();
}
}
| 2,441 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/CsvWriteBuilder.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import lombok.AllArgsConstructor;
import org.apache.spark.sql.connector.write.Write;
import org.apache.spark.sql.connector.write.WriteBuilder;
import java.util.Map;
/**
* An implementation of WriterBuilder for building the Write.
*/
@AllArgsConstructor
public class CsvWriteBuilder implements WriteBuilder {
/**
* A map of configuration settings.
*/
private final Map<String, String> properties;
/**
* {@inheritDoc}
*/
@Override
public Write build() {
return new CsvWrite(properties);
}
}
| 2,442 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/CsvBatch.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import lombok.AllArgsConstructor;
import org.apache.spark.sql.connector.read.Batch;
import org.apache.spark.sql.connector.read.InputPartition;
import org.apache.spark.sql.connector.read.PartitionReaderFactory;
import java.util.Map;
/**
* A physical representation of a data source scan for batch queries. This interface is used to provide physical information, like how
* many partitions the scanned data has, and how to read records from the partitions.
*/
@AllArgsConstructor
public class CsvBatch implements Batch {
/**
* A map of configuration settings.
*/
private final Map<String, String> properties;
/**
* {@inheritDoc}
*/
@Override
public InputPartition[] planInputPartitions() {
return new InputPartition[]{new CsvInputPartition()};
}
/**
* {@inheritDoc}
*/
@Override
public PartitionReaderFactory createReaderFactory() {
return new CsvPartitionReaderFactory(properties);
}
}
| 2,443 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/CsvInputPartition.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import org.apache.spark.sql.connector.read.InputPartition;
/**
* A serializable representation of an input partition returned by {@code Batch.planInputPartitions()} and the corresponding ones in
* streaming.
*
* <p>
* Note that InputPartition will be serialized and sent to executors, then PartitionReader will be created by {@code PartitionReaderFactory
* .createReader(InputPartition)} or {@code PartitionReaderFactory.createColumnarReader(InputPartition)} on executors to do the actual
* reading. So InputPartition must be serializable while PartitionReader doesn't need to be.
*/
public class CsvInputPartition implements InputPartition {
}
| 2,444 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/CsvTable.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import lombok.Getter;
import lombok.experimental.Accessors;
import org.apache.spark.sql.connector.catalog.SupportsRead;
import org.apache.spark.sql.connector.catalog.SupportsWrite;
import org.apache.spark.sql.connector.catalog.TableCapability;
import org.apache.spark.sql.connector.read.ScanBuilder;
import org.apache.spark.sql.connector.write.LogicalWriteInfo;
import org.apache.spark.sql.connector.write.WriteBuilder;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.sql.util.CaseInsensitiveStringMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
/**
* A mix-in implementation of Table, to indicate that it's readable and writeable.
*/
@Accessors(fluent = true)
@Getter
public class CsvTable implements SupportsRead, SupportsWrite {
/**
* A schema representation of the CSV file.
*/
private final StructType schema;
/**
* A map of configuration settings.
*/
private final Map<String, String> properties;
/**
* A set of capabilities this Table supports.
*/
private final Set<TableCapability> capabilities;
/**
* Constructs a new CsvTable.
*
* @param schema the schema of the CSV file if not being inferred
* @param properties A map of configuration settings
*/
public CsvTable(final StructType schema, final Map<String, String> properties) {
this.properties = new HashMap<>(properties);
this.capabilities = new HashSet<>();
capabilities.add(TableCapability.BATCH_READ);
capabilities.add(TableCapability.BATCH_WRITE);
this.schema = schema == null ? SchemaUtil.inferSchema(this.properties) : schema;
}
/**
* {@inheritDoc}
*/
@Override
public ScanBuilder newScanBuilder(final CaseInsensitiveStringMap options) {
return new CsvScanBuilder(schema, properties);
}
/**
* {@inheritDoc}
*/
@Override
public String name() {
return this.getClass().toString();
}
/**
* {@inheritDoc}
*/
@Override
public WriteBuilder newWriteBuilder(final LogicalWriteInfo info) {
return new CsvWriteBuilder(properties);
}
}
| 2,445 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/CsvScan.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import lombok.AllArgsConstructor;
import lombok.Getter;
import lombok.experimental.Accessors;
import org.apache.spark.sql.connector.read.Batch;
import org.apache.spark.sql.connector.read.Scan;
import org.apache.spark.sql.types.StructType;
import java.util.Map;
/**
* A logical representation of a data source scan. This interface is used to provide logical information, like what the actual read
* schema is.
*/
@AllArgsConstructor
public class CsvScan implements Scan {
/**
* A schema representation of the CSV file.
*/
@Getter
@Accessors(fluent = true)
private final StructType readSchema;
/**
* A map of configuration settings.
*/
private final Map<String, String> properties;
/**
* {@inheritDoc}
*/
@Override
public Batch toBatch() {
return new CsvBatch(properties);
}
}
| 2,446 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/CsvDataWriter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.CsvRow;
import com.amazonaws.c3r.data.CsvValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.CsvRowWriter;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.connector.write.DataWriter;
import org.apache.spark.sql.connector.write.WriterCommitMessage;
import java.io.IOException;
import java.util.List;
import java.util.Map;
/**
* A data writer returned by {@code DataWriterFactory.createWriter(int, long)} and is responsible for writing data for an input RDD
* partition.
*/
public class CsvDataWriter implements DataWriter<InternalRow> {
/**
* Writer for writing CSV files.
*/
private final CsvRowWriter writer;
/**
* Constructs a new CsvPartitionReader.
*
* @param partitionId The partition being processed
* @param properties A map of configuration settings
*/
public CsvDataWriter(final int partitionId, final Map<String, String> properties) {
this.writer = SparkCsvWriter.initWriter(partitionId, properties);
}
/**
* {@inheritDoc}
*/
@Override
public void write(final InternalRow record) throws IOException {
final Row<CsvValue> toWrite = new CsvRow();
final List<ColumnHeader> columns = writer.getHeaders();
if (record.numFields() != columns.size()) {
throw new C3rRuntimeException("Column count mismatch when writing row. Expected "
+ columns.size() + " but was " + record.numFields());
}
for (int i = 0; i < record.numFields(); i++) {
final CsvValue value;
if (record.getUTF8String(i) == null) { // Null UTF8String can't be converted to String
value = new CsvValue((String) null);
} else {
value = new CsvValue(record.getString(i));
}
toWrite.putValue(columns.get(i), value);
}
writer.writeRow(toWrite);
}
/**
* {@inheritDoc}
*/
@Override
public WriterCommitMessage commit() {
return null;
}
/**
* {@inheritDoc}
*/
@Override
public void abort() {
// no-op
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
writer.close();
}
}
| 2,447 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/CsvScanBuilder.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import lombok.AllArgsConstructor;
import org.apache.spark.sql.connector.read.Scan;
import org.apache.spark.sql.connector.read.ScanBuilder;
import org.apache.spark.sql.types.StructType;
import java.util.Map;
/**
* An implementation of ScanBuilder for building the Scan.
*/
@AllArgsConstructor
public class CsvScanBuilder implements ScanBuilder {
/**
* A schema representation of the CSV file.
*/
private final StructType schema;
/**
* A map of configuration settings.
*/
private final Map<String, String> properties;
/**
* {@inheritDoc}
*/
@Override
public Scan build() {
return new CsvScan(schema, properties);
}
}
| 2,448 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/CsvWrite.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import lombok.AllArgsConstructor;
import org.apache.spark.sql.connector.write.BatchWrite;
import org.apache.spark.sql.connector.write.Write;
import java.util.Map;
/**
* A logical representation of a data source write.
*/
@AllArgsConstructor
public class CsvWrite implements Write {
/**
* A map of configuration settings.
*/
private final Map<String, String> properties;
/**
* {@inheritDoc}
*/
@Override
public BatchWrite toBatch() {
return new CsvBatchWrite(properties);
}
}
| 2,449 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/Csv.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import org.apache.spark.sql.connector.catalog.Table;
import org.apache.spark.sql.connector.catalog.TableProvider;
import org.apache.spark.sql.connector.expressions.Transform;
import org.apache.spark.sql.types.StructType;
import org.apache.spark.sql.util.CaseInsensitiveStringMap;
import java.util.Map;
/**
* Custom CSV DataSource for Spark. Using this custom DataSource in place of Spark's built-in functionality allows us to maintain tighter
* controls of edge cases like {@code null}, quoted empty space, and custom null values.
*/
public class Csv implements TableProvider {
/**
* {@inheritDoc}
*/
@Override
public StructType inferSchema(final CaseInsensitiveStringMap options) {
return SchemaUtil.inferSchema(options);
}
/**
* {@inheritDoc}
*/
@Override
public Table getTable(final StructType schema, final Transform[] partitioning, final Map<String, String> properties) {
return new CsvTable(schema, properties);
}
}
| 2,450 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/CsvPartitionReaderFactory.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import lombok.AllArgsConstructor;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.connector.read.InputPartition;
import org.apache.spark.sql.connector.read.PartitionReader;
import org.apache.spark.sql.connector.read.PartitionReaderFactory;
import java.util.Map;
/**
* A factory used to create PartitionReader instances.
*
* <p>
* If Spark fails to execute any methods in the implementations of this interface or in the returned PartitionReader (by throwing an
* exception), corresponding Spark task would fail and get retried until hitting the maximum retry times.
*/
@AllArgsConstructor
public class CsvPartitionReaderFactory implements PartitionReaderFactory {
/**
* A map of configuration settings.
*/
private final Map<String, String> properties;
/**
* {@inheritDoc}
*/
@Override
public PartitionReader<InternalRow> createReader(final InputPartition partition) {
return new CsvPartitionReader(properties);
}
}
| 2,451 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/CsvPartitionReader.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.CsvValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.io.CsvRowReader;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.connector.read.PartitionReader;
import org.apache.spark.unsafe.types.UTF8String;
import scala.collection.JavaConverters;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
/**
* A partition reader returned by {@code PartitionReaderFactory.createReader(InputPartition)} or {@code PartitionReaderFactory
* .createColumnarReader(InputPartition)}. It's responsible for outputting data for a RDD partition.
*/
public class CsvPartitionReader implements PartitionReader<InternalRow> {
/**
* Reader for processing CSV files.
*/
private final CsvRowReader csvReader;
/**
* Constructs a new CsvPartitionReader.
*
* @param properties A map of configuration settings
*/
public CsvPartitionReader(final Map<String, String> properties) {
this.csvReader = SparkCsvReader.initReader(properties);
}
/**
* {@inheritDoc}
*/
@Override
public boolean next() {
return csvReader.hasNext();
}
/**
* {@inheritDoc}
*/
@Override
public InternalRow get() {
final Row<CsvValue> row = csvReader.next();
final List<ColumnHeader> headers = csvReader.getHeaders();
final Object[] data = new Object[row.size()];
for (int i = 0; i < data.length; i++) {
final CsvValue val = row.getValue(headers.get(i));
data[i] = val.isNull() ? null : UTF8String.fromString(val.toString());
}
return InternalRow.apply(JavaConverters.asScalaIteratorConverter(Arrays.asList(data).iterator()).asScala().toSeq());
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
csvReader.close();
}
}
| 2,452 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/CsvDataWriterFactory.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import lombok.AllArgsConstructor;
import org.apache.spark.sql.catalyst.InternalRow;
import org.apache.spark.sql.connector.write.DataWriter;
import org.apache.spark.sql.connector.write.DataWriterFactory;
import java.util.Map;
/**
* A factory of DataWriter returned by {@code BatchWrite.createBatchWriterFactory(PhysicalWriteInfo)}, which is responsible for creating and
* initializing the actual data writer at executor side.
*
* <p>
* Note that, the writer factory will be serialized and sent to executors, then the data writer will be created on executors and do the
* actual writing. So this interface must be serializable and DataWriter doesn't need to be.
*/
@AllArgsConstructor
public class CsvDataWriterFactory implements DataWriterFactory {
/**
* A map of configuration settings.
*/
private final Map<String, String> properties;
/**
* {@inheritDoc}
*/
@Override
public DataWriter<InternalRow> createWriter(final int partitionId, final long taskId) {
return new CsvDataWriter(partitionId, properties);
}
}
| 2,453 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Support for reading and writing rows of CSV data.
*
* <p>
* Only the SparkCsvReader and SparkCsvWriter should be used for development. All other classes are subject to change without notice.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.spark.io.csv; | 2,454 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/CsvBatchWrite.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import lombok.AllArgsConstructor;
import org.apache.spark.sql.connector.write.BatchWrite;
import org.apache.spark.sql.connector.write.DataWriterFactory;
import org.apache.spark.sql.connector.write.PhysicalWriteInfo;
import org.apache.spark.sql.connector.write.WriterCommitMessage;
import java.util.Map;
/**
* An implementation of BatchWrite that defines how to write the data to data source for batch processing.
*/
@AllArgsConstructor
public class CsvBatchWrite implements BatchWrite {
/**
* A map of configuration settings.
*/
private final Map<String, String> properties;
/**
* {@inheritDoc}
*/
@Override
public DataWriterFactory createBatchWriterFactory(final PhysicalWriteInfo info) {
return new CsvDataWriterFactory(properties);
}
/**
* {@inheritDoc}
*/
@Override
public void commit(final WriterCommitMessage[] messages) {
// no-op
}
/**
* {@inheritDoc}
*/
@Override
public void abort(final WriterCommitMessage[] messages) {
// no-op
}
}
| 2,455 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/csv/SchemaUtil.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.csv;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.io.CsvRowReader;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
/**
* Utility class for creating schemas from ColumnHeaders.
*/
public abstract class SchemaUtil {
/**
* Create a schema based on the headers provided.
*
* @param headers The headers used to create a schema
* @return a schema for the headers provided
*/
public static StructType inferSchema(final List<ColumnHeader> headers) {
final StructField[] fields = headers.stream().map(ColumnHeader::toString)
.map(header -> DataTypes.createStructField(header, DataTypes.StringType, true))
.toArray(StructField[]::new);
return DataTypes.createStructType(fields);
}
/**
* Create a schema based on the headers provided. If no headers were provided, attempt to read the headers.
*
* @param properties A map of configuration settings
* @return a schema for the headers provided
*/
public static StructType inferSchema(final Map<String, String> properties) {
if (!properties.containsKey("headers")) {
final CsvRowReader reader = SparkCsvReader.initReader(properties);
return SchemaUtil.inferSchema(reader.getHeaders());
}
final String[] headers = properties.get("headers").split(",");
final List<StructField> fields = Arrays.stream(headers)
.map(field -> DataTypes.createStructField(field, DataTypes.StringType, true))
.collect(Collectors.toList());
return DataTypes.createStructType(fields);
}
}
| 2,456 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/parquet/SparkParquetReader.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.parquet;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ParquetConfig;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Limits;
import lombok.NonNull;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.DataTypes;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Utility class for Spark to read Parquet files from disk.
*/
public abstract class SparkParquetReader {
/**
* Maximum number of columns allowed.
*/
static final int MAX_COLUMN_COUNT = 10000;
/**
* Reads the input file for processing, normalizing headers.
*
* @param sparkSession The spark session to read with
* @param source Location of input data
* @return The source data to be processed
*/
public static Dataset<Row> readInput(@NonNull final SparkSession sparkSession,
@NonNull final String source) {
return readInput(sparkSession, source, false, ParquetConfig.DEFAULT);
}
/**
* Reads the input file for processing, optionally normalizing headers.
*
* @param sparkSession The spark session to read with
* @param source Location of input data
* @param skipHeaderNormalization Whether to skip the normalization of read in headers
* @param parquetConfig Parquet specific configuration information
* @return The source data to be processed
*/
public static Dataset<Row> readInput(@NonNull final SparkSession sparkSession,
@NonNull final String source,
final boolean skipHeaderNormalization,
@NonNull final ParquetConfig parquetConfig) {
final Map<String, String> options = new HashMap<>();
Dataset<Row> dataset = sparkSession.read().options(options).parquet(source);
if (!skipHeaderNormalization) {
final Map<String, String> renameMap = Arrays.stream(dataset.columns())
.collect(Collectors.toMap(Function.identity(), c -> new ColumnHeader(c).toString()));
dataset = dataset.withColumnsRenamed(renameMap);
}
dataset = reconstructTypes(dataset, parquetConfig.getBinaryAsString());
validate(dataset);
return dataset;
}
/**
* Converts unannotated binary values to strings in the data set if needed.
*
* @param originalDataset The raw Parquet data
* @param binaryAsString {@code true} if binary values should be changed to strings
*
* @return A copy of the data set matching specifications
*/
private static Dataset<Row> reconstructTypes(final Dataset<Row> originalDataset, final Boolean binaryAsString) {
if (binaryAsString == null || !binaryAsString) {
return originalDataset;
}
Dataset<Row> reconstructedDataset = originalDataset;
for (var field : reconstructedDataset.schema().fields()) {
if (field.dataType() == DataTypes.BinaryType) {
reconstructedDataset = reconstructedDataset.withColumn(field.name(),
reconstructedDataset.col(field.name()).cast(DataTypes.StringType));
}
}
return reconstructedDataset;
}
/**
* Validate that the dataset is within the required limits.
*
* @param dataset The dataset to validate
* @throws C3rRuntimeException If the dataset exceeds any limits.
*/
static void validate(final Dataset<Row> dataset) {
if (dataset.columns().length > MAX_COLUMN_COUNT) {
throw new C3rRuntimeException("Couldn't parse input file. Please verify that column count does not exceed "
+ MAX_COLUMN_COUNT + ".");
}
if (dataset.count() > Limits.ROW_COUNT_MAX) {
throw new C3rRuntimeException("A table cannot contain more than " + Limits.ROW_COUNT_MAX + " rows.");
}
}
}
| 2,457 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/parquet/SparkParquetWriter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.io.parquet;
import lombok.NonNull;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
/**
* Utility class for Spark to write Parquet files to disk.
*/
public abstract class SparkParquetWriter {
/**
* Writes the Dataset to the root path.
*
* @param dataset The data to write
* @param targetName The target path to write to
*/
public static void writeOutput(@NonNull final Dataset<Row> dataset,
@NonNull final String targetName) {
dataset.write().mode(SaveMode.Append)
.parquet(targetName);
}
}
| 2,458 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/io/parquet/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Support for reading and writing rows of Parquet data.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.spark.io.parquet; | 2,459 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/utils/C3rCliSparkProperties.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.utils;
import com.amazonaws.c3r.utils.C3rSdkProperties;
import software.amazon.awssdk.core.ApiName;
/**
* C3R CLI for Apache Spark properties.
*/
public final class C3rCliSparkProperties {
/**
* Application name of C3R CLI client for Apache Spark.
*/
public static final String APP_NAME = "c3r-cli-spark";
/**
* User agent for the C3R CLI.
*/
public static final ApiName API_NAME = ApiName.builder()
.name(APP_NAME)
.version(C3rSdkProperties.VERSION)
.build();
/**
* Hidden utility class constructor.
*/
private C3rCliSparkProperties() {
}
}
| 2,460 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/utils/SparkSessionUtil.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.utils;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import org.apache.spark.sql.SparkSession;
/**
* Utility class for Spark Session functionality.
*/
public abstract class SparkSessionUtil {
/**
* Initializes a SparkSession object with the passed Spark Drive URL.
*
* @return A SparkSession connected to the Spark Driver
* @throws C3rRuntimeException if the Spark Driver cannot be connected to
*/
public static SparkSession initSparkSession() {
try {
return SparkSession
.builder()
.appName("C3R")
.getOrCreate();
} catch (Exception e) {
throw new C3rRuntimeException("Could not connect to Spark server.", e);
}
}
/**
* Shut down the Spark session.
*
* @param spark the SparkSession to close
*/
public static void closeSparkSession(final SparkSession spark) {
spark.stop();
}
}
| 2,461 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/utils/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Utility classes that contain commonly used functionality across components.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.spark.utils; | 2,462 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/cli/CliDescriptions.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
/**
* CliDescriptions contains the help mode description for all CLI parameters, so they are consistently described across classes.
*/
public final class CliDescriptions {
/**
* Description of AWS profile.
*/
public static final String AWS_PROFILE_DESCRIPTION = "AWS CLI profile for credentials and config (uses AWS "
+ " SDK default if omitted)";
/**
* Description of AWS region.
*/
public static final String AWS_REGION_DESCRIPTION = "AWS region for API requests (uses AWS SDK default if omitted)";
/**
* Description of how to allow for custom CSV values in the input file.
*/
public static final String ENCRYPT_CSV_INPUT_NULL_VALUE_DESCRIPTION = "Value representing how NULL is encoded in the input CSV data " +
"(unquoted blank values and empty quotes are all interpreted as NULL (e.g., `,,`, `, ,` and `,\"\",`) by default)";
/**
* Description of how to allow for custom CSV NULL values in the encrypted output file.
*/
public static final String ENCRYPT_CSV_OUTPUT_NULL_VALUE_DESCRIPTION = "The encoding of cleartext NULL values in the output file " +
"(encrypted NULLs are encoded unambiguously, cleartext values default to the empty value `,,`)";
/**
* Description of how to allow for custom CSV NULL value interpretation in the encrypted input file.
*/
public static final String DECRYPT_CSV_INPUT_NULL_VALUE_DESCRIPTION = "Value representing how the cleartext NULL value is encoded in" +
" the input CSV data (defaults to `,,` for cleartext fields is interpreted as NULL as encrypted NULLs are encoded " +
"unambiguously)";
/**
* Description of how to allow for custom CSV Null values in the decrypted output file.
*/
public static final String DECRYPT_CSV_OUTPUT_NULL_VALUE_DESCRIPTION = "How a cleartext NULL value is encoded in the output file " +
"(defaults to the empty value `,,`)";
/**
* Description of how to process primitive Parquet Binary values as strings if they have no logical annotations.
*/
public static final String PARQUET_BINARY_AS_STRING = "Treat primitive Parquet Binary types without logical annotations " +
"as if they had the string annotation.";
/**
* Explanation of dry run mode.
*/
public static final String DRY_RUN_DESCRIPTION = "Check settings and files to verify configuration is valid but skip processing " +
"the input file";
/**
* Explanation and warnings about enabling stack traces.
*/
public static final String ENABLE_STACKTRACE_DESCRIPTION = "Enable stack traces (WARNING: stack traces may contain sensitive info)";
/**
* List of acceptable file formats that can be specified.
*/
public static final String FILE_FORMAT_DESCRIPTION = "File format of <input>: ${COMPLETION-CANDIDATES}";
/**
* Explanation of allowing Fingerprint columns to pass through for debugging.
*/
public static final String FAIL_ON_FINGERPRINT_COLUMNS_DESCRIPTION = "Fail when encountering a fingerprint column during decryption " +
"(disabled by default)";
/**
* Setting that allows for overwriting a file.
*/
public static final String OVERWRITE_DESCRIPTION = "If output file exists, overwrite the file";
/**
* Setting for collaboration ID.
*/
public static final String ID_DESCRIPTION = "Unique identifier for the collaboration. " +
"Follows the pattern [0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}";
/**
* Description of data input source setting (note: CSV not specified in anticipation of future formats).
*/
public static final String INPUT_DESCRIPTION_CRYPTO = "Data to be processed";
/**
* Description of schema data source.
*/
public static final String INPUT_DESCRIPTION_SCHEMA = "Tabular file used for schema generation";
/**
* Description of output file naming when using CSV files.
*/
public static final String OUTPUT_DESCRIPTION_CRYPTO = "Output directory (defaults to `output`)";
/**
* Description of output file naming for schema creation.
*/
public static final String OUTPUT_DESCRIPTION_SCHEMA = "Output file name (defaults to `<input>`.json)";
/**
* Schema file location.
*/
public static final String SCHEMA_DESCRIPTION = "JSON file specifying table transformations";
/**
* Hidden constructor since this is a utility class.
*/
private CliDescriptions() {
}
} | 2,463 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/cli/DecryptMode.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.config.ParquetConfig;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.spark.action.SparkUnmarshaller;
import com.amazonaws.c3r.spark.config.SparkDecryptConfig;
import com.amazonaws.c3r.spark.io.csv.SparkCsvReader;
import com.amazonaws.c3r.spark.io.csv.SparkCsvWriter;
import com.amazonaws.c3r.spark.io.parquet.SparkParquetReader;
import com.amazonaws.c3r.spark.io.parquet.SparkParquetWriter;
import com.amazonaws.c3r.spark.utils.SparkSessionUtil;
import com.amazonaws.c3r.utils.C3rSdkProperties;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import picocli.CommandLine;
import javax.crypto.SecretKey;
import java.util.UUID;
import java.util.concurrent.Callable;
import static com.amazonaws.c3r.encryption.keys.KeyUtil.KEY_ENV_VAR;
import static com.amazonaws.c3r.spark.cli.Main.generateCommandLine;
/**
* Supports decrypting query results from an AWS Clean Rooms collaboration for analysis.
*/
@Slf4j
@Getter
@CommandLine.Command(name = "decrypt",
mixinStandardHelpOptions = true,
version = C3rSdkProperties.VERSION,
descriptionHeading = "%nDescription:%n",
description = "Decrypt data content derived from an AWS Clean Rooms collaboration as an Apache Spark job.")
public class DecryptMode implements Callable<Integer> {
/**
* Required command line arguments.
*/
@Getter
static class RequiredArgs {
/**
* {@value CliDescriptions#INPUT_DESCRIPTION_CRYPTO}.
*/
@picocli.CommandLine.Parameters(description = CliDescriptions.INPUT_DESCRIPTION_CRYPTO,
paramLabel = "<input>")
private String input = null;
/**
* {@value CliDescriptions#ID_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--id"},
description = CliDescriptions.ID_DESCRIPTION,
paramLabel = "<value>",
required = true)
private UUID id = null;
}
/**
* Required values as specified by the user.
*/
@CommandLine.ArgGroup(multiplicity = "1", exclusive = false, heading = "%nRequired parameters:%n")
private RequiredArgs requiredArgs = new RequiredArgs();
/**
* Optional command line arguments.
*/
@Getter
private static class OptionalArgs {
/**
* {@value CliDescriptions#FILE_FORMAT_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--fileFormat", "-e"},
description = CliDescriptions.FILE_FORMAT_DESCRIPTION,
paramLabel = "<format>")
private FileFormat fileFormat = null;
/**
* {@value CliDescriptions#OUTPUT_DESCRIPTION_CRYPTO}.
*/
@CommandLine.Option(names = {"--output", "-o"},
description = CliDescriptions.OUTPUT_DESCRIPTION_CRYPTO,
paramLabel = "<file>")
private String output = null;
/**
* {@value CliDescriptions#OVERWRITE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--overwrite", "-f"},
description = CliDescriptions.OVERWRITE_DESCRIPTION)
private boolean overwrite = false;
/**
* {@value CliDescriptions#DECRYPT_CSV_INPUT_NULL_VALUE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--csvInputNULLValue", "-r"},
description = CliDescriptions.DECRYPT_CSV_INPUT_NULL_VALUE_DESCRIPTION,
paramLabel = "<value>")
private String csvInputNullValue = null;
/**
* {@value CliDescriptions#DECRYPT_CSV_OUTPUT_NULL_VALUE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--csvOutputNULLValue", "-w"},
description = CliDescriptions.DECRYPT_CSV_OUTPUT_NULL_VALUE_DESCRIPTION,
paramLabel = "<value>")
private String csvOutputNullValue = null;
/**
* {@value CliDescriptions#FAIL_ON_FINGERPRINT_COLUMNS_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--failOnFingerprintColumns", "--fof"},
description = CliDescriptions.FAIL_ON_FINGERPRINT_COLUMNS_DESCRIPTION)
private boolean failOnFingerprintColumns = false;
/**
* {@value CliDescriptions#DRY_RUN_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--dryRun", "-n"},
description = CliDescriptions.DRY_RUN_DESCRIPTION)
private boolean dryRun = false;
/**
* {@value CliDescriptions#ENABLE_STACKTRACE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--enableStackTraces", "-v"},
description = CliDescriptions.ENABLE_STACKTRACE_DESCRIPTION)
private boolean enableStackTraces = false;
}
/**
* Optional values as specified by the user.
*/
@CommandLine.ArgGroup(exclusive = false, heading = "%nOptional parameters:%n")
private OptionalArgs optionalArgs = new OptionalArgs();
/** SparkSession for orchestration. */
private final SparkSession sparkSession;
/**
* Return a CLI instance for decryption.
*
* <p>
* Note: {@link #getApp} is the intended method for manually creating this class
* with the appropriate CLI settings.
*/
DecryptMode() {
this.sparkSession = SparkSessionUtil.initSparkSession();
}
/**
* Return a CLI instance for decryption with a custom SparkSession.
*
* <p>
* Note: {@link #getApp} is the intended method for manually creating this class
* with the appropriate CLI settings.
*
* @param sparkSession Custom SparkSession to use for orchestration
*/
DecryptMode(final SparkSession sparkSession) {
this.sparkSession = sparkSession;
}
/**
* Get the decryption mode command line application with standard CLI settings.
*
* @param sparkSession Custom SparkSession to use for orchestration
* @return CommandLine interface for `decrypt` mode
*/
public static CommandLine getApp(final SparkSession sparkSession) {
return generateCommandLine(new DecryptMode(sparkSession));
}
/**
* Get all configuration settings for the current dataset.
*
* @return Information needed to decrypt dataset
*/
public SparkDecryptConfig getConfig() {
final SecretKey keyMaterial = KeyUtil.sharedSecretKeyFromString(System.getenv(KEY_ENV_VAR));
return SparkDecryptConfig.builder()
.source(requiredArgs.getInput())
.fileFormat(optionalArgs.fileFormat)
.targetDir(optionalArgs.output)
.overwrite(optionalArgs.overwrite)
.csvInputNullValue(optionalArgs.csvInputNullValue)
.csvOutputNullValue(optionalArgs.csvOutputNullValue)
.secretKey(keyMaterial)
.salt(requiredArgs.getId().toString())
.failOnFingerprintColumns(optionalArgs.failOnFingerprintColumns)
.build();
}
/**
* Ensure requirements are met to run.
*
* @throws C3rIllegalArgumentException If user input is invalid
*/
private void validate() {
if (requiredArgs.getId() == null || requiredArgs.getId().toString().isBlank()) {
throw new C3rIllegalArgumentException("Specified collaboration identifier is blank.");
}
}
/**
* Execute the decryption as specified on the command line.
*
* @return {@value Main#SUCCESS} if no errors encountered else {@value Main#FAILURE}
*/
@Override
public Integer call() {
try {
validate();
final SparkDecryptConfig cfg = getConfig();
if (!optionalArgs.dryRun) {
log.info("Decrypting data from {}.", cfg.getSourceFile());
switch (cfg.getFileFormat()) {
case CSV:
final Dataset<Row> csvDataset = SparkCsvReader.readInput(sparkSession,
cfg.getSourceFile(),
cfg.getCsvInputNullValue(),
/* externalHeaders */ null,
/* skipHeaderNormalization */ true);
final Dataset<Row> unmarshalledCsvDataset = SparkUnmarshaller.decrypt(csvDataset, cfg);
SparkCsvWriter.writeOutput(unmarshalledCsvDataset, cfg.getTargetFile(), cfg.getCsvOutputNullValue());
break;
case PARQUET:
final Dataset<Row> parquetDataset = SparkParquetReader.readInput(
sparkSession,
cfg.getSourceFile(),
/* skipHeaderNormalization */ true,
ParquetConfig.DEFAULT);
final Dataset<Row> unmarshalledParquetDataset = SparkUnmarshaller.decrypt(parquetDataset, cfg);
SparkParquetWriter.writeOutput(unmarshalledParquetDataset, cfg.getTargetFile());
break;
default:
throw new C3rIllegalArgumentException("Unrecognized file format: " + cfg.getFileFormat());
}
SparkSessionUtil.closeSparkSession(sparkSession);
log.info("Decrypted data saved in {}.", cfg.getTargetFile());
} else {
log.info("Dry run: No data will be decrypted from {}.", cfg.getSourceFile());
}
} catch (Exception e) {
Main.handleException(e, optionalArgs.enableStackTraces);
return Main.FAILURE;
}
return Main.SUCCESS;
}
}
| 2,464 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/cli/SchemaMode.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.spark.io.schema.CsvSchemaGenerator;
import com.amazonaws.c3r.spark.io.schema.ParquetSchemaGenerator;
import com.amazonaws.c3r.spark.utils.C3rCliSparkProperties;
import com.amazonaws.c3r.spark.utils.SparkSessionUtil;
import com.amazonaws.c3r.utils.C3rSdkProperties;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.spark.sql.SparkSession;
import picocli.CommandLine;
import java.io.File;
import java.util.Objects;
import java.util.Optional;
import java.util.UUID;
import java.util.concurrent.Callable;
import static com.amazonaws.c3r.spark.cli.Main.generateCommandLine;
/**
* Command line utility to help create a schema for a data file. Walks the user through each column in the input to see if/how it should be
* transformed during encryption for upload to a collaboration.
*/
@Slf4j
@Getter
@CommandLine.Command(name = "schema",
mixinStandardHelpOptions = true,
version = C3rSdkProperties.VERSION,
descriptionHeading = "%nDescription:%n",
description = "Generate an encryption schema for a tabular file.")
public class SchemaMode implements Callable<Integer> {
/**
* Required command line arguments.
*/
@Getter
static class RequiredArgs {
/**
* {@value CliDescriptions#INPUT_DESCRIPTION_SCHEMA}.
*/
@CommandLine.Parameters(
description = CliDescriptions.INPUT_DESCRIPTION_SCHEMA,
paramLabel = "<input>")
private String input = null;
}
/**
* Required values as specified by the user.
*/
@CommandLine.ArgGroup(multiplicity = "1", heading = "%nRequired parameters:%n")
private RequiredArgs requiredArgs = new RequiredArgs();
/**
* Class for the different modes of scheme generation.
*/
@Getter
public static class SubMode {
/**
* Create a simple schema automatically.
*/
@CommandLine.Option(
names = {"--template", "-t"},
required = true,
description = {"Create template schema file for <input>.",
"NOTE: user needs to edit schema file before use."})
private boolean templateMode = false;
/**
* Walk user through entire schema creation process.
*/
@CommandLine.Option(
names = {"--interactive", "-i"},
required = true,
description = "Create a schema file interactively for <input>.")
private boolean interactiveMode = false;
}
/**
* Which generation mode to use for execution.
*/
@CommandLine.ArgGroup(multiplicity = "1", heading = "%nGeneration mode (specify one of these):%n")
private SubMode subMode = new SubMode();
/**
* Optional command line arguments.
*/
@Getter
static class OptionalArgs {
/**
* {@value CliDescriptions#AWS_PROFILE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--profile", "-l"},
description = CliDescriptions.AWS_PROFILE_DESCRIPTION)
private String profile = null;
/**
* {@value CliDescriptions#AWS_REGION_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--region", "-g"},
description = CliDescriptions.AWS_REGION_DESCRIPTION)
private String region = null;
/**
* For description see {@link CliDescriptions#ID_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--id"},
description = CliDescriptions.ID_DESCRIPTION,
paramLabel = "<value>")
private UUID id = null;
/**
* If this input file has headers.
*
* <p>
* Note: Using a default value of {@code true} means when the flag {@code --noHeaders}
* is passed, @{code hasHeaders} is set to {@code false}.
*/
@CommandLine.Option(names = {"--noHeaders", "-p"},
description = "Indicates <input> has no column headers (CSV only).")
private boolean hasHeaders = true;
/**
* {@value CliDescriptions#FILE_FORMAT_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--fileFormat", "-e"},
description = CliDescriptions.FILE_FORMAT_DESCRIPTION,
paramLabel = "<format>")
private FileFormat fileFormat = null;
/**
* {@value CliDescriptions#OUTPUT_DESCRIPTION_SCHEMA}.
*/
@CommandLine.Option(names = {"--output", "-o"},
description = CliDescriptions.OUTPUT_DESCRIPTION_SCHEMA,
paramLabel = "<file>")
private String output = null;
/**
* {@value CliDescriptions#OVERWRITE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--overwrite", "-f"},
description = CliDescriptions.OVERWRITE_DESCRIPTION)
private boolean overwrite = false;
/**
* {@value CliDescriptions#ENABLE_STACKTRACE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--enableStackTraces", "-v"},
description = CliDescriptions.ENABLE_STACKTRACE_DESCRIPTION)
private boolean enableStackTraces = false;
}
/**
* Optional values as specified by the user.
*/
@CommandLine.ArgGroup(exclusive = false, heading = "%nOptional parameters:%n")
private OptionalArgs optionalArgs = new OptionalArgs();
/** DAO for interacting with AWS Clean Rooms. */
private final CleanRoomsDao cleanRoomsDao;
/** SparkSession for orchestration. */
private final SparkSession sparkSession;
/**
* Return a default CLI instance for schema generation.
*
* <p>
* Note: {@link #getApp} is the intended method for manually creating this class
* with the appropriate CLI settings.
*/
SchemaMode() {
this.cleanRoomsDao = CleanRoomsDao.builder().apiName(C3rCliSparkProperties.API_NAME).build();
this.sparkSession = SparkSessionUtil.initSparkSession();
}
/**
* Return a CLI instance for schema generation with a custom {@link CleanRoomsDao}.
*
* <p>
* Note: {@link #getApp} is the intended method for manually creating this class
* with the appropriate CLI settings.
*
* @param cleanRoomsDao Custom {@link CleanRoomsDao} to use for Clean Rooms API calls
* @param sparkSession Custom SparkSession to use for orchestration
*/
SchemaMode(final CleanRoomsDao cleanRoomsDao, final SparkSession sparkSession) {
this.cleanRoomsDao = cleanRoomsDao;
this.sparkSession = sparkSession;
}
/**
* Get the schema mode command line application with a custom {@link CleanRoomsDao}.
*
* @param cleanRoomsDao Custom {@link CleanRoomsDao} to use for Clean Rooms API calls
* @param sparkSession Custom SparkSession to use for orchestration
* @return CommandLine interface for `schema` with customized AWS Clean Rooms access
*/
static CommandLine getApp(final CleanRoomsDao cleanRoomsDao, final SparkSession sparkSession) {
return generateCommandLine(new SchemaMode(cleanRoomsDao, sparkSession));
}
/**
* Get the settings from AWS Clean Rooms for this collaboration.
*
* @return Cryptographic computing rules for collaboration, or {@code null} if not applicable.
*/
public ClientSettings getClientSettings() {
if (optionalArgs.id == null) {
return null;
}
final var dao = cleanRoomsDao != null
? cleanRoomsDao :
CleanRoomsDao.builder().apiName(C3rCliSparkProperties.API_NAME).build();
return dao.withProfile(optionalArgs.profile).withRegion(optionalArgs.region)
.getCollaborationDataEncryptionMetadata(optionalArgs.id.toString());
}
/**
* Validates that required information is specified.
*
* @throws C3rIllegalArgumentException If user input is invalid
*/
private void validate() {
if (requiredArgs.getInput().isBlank()) {
throw new C3rIllegalArgumentException("Specified input file name is blank.");
}
if (optionalArgs.output != null && optionalArgs.output.isBlank()) {
throw new C3rIllegalArgumentException("Specified output file name is blank.");
}
}
/**
* Execute schema generation help utility.
*
* @return {@value Main#SUCCESS} if no errors encountered else {@value Main#FAILURE}
*/
@Override
public Integer call() {
try {
validate();
final File file = new File(requiredArgs.getInput());
final String fileNameNoPath = file.getName();
final String outFile = Objects.requireNonNullElse(optionalArgs.output, fileNameNoPath + ".json");
final FileFormat fileFormat = Optional.ofNullable(optionalArgs.fileFormat).orElseGet(() ->
FileFormat.fromFileName(requiredArgs.getInput()));
if (fileFormat == null) {
throw new C3rIllegalArgumentException("Unknown file format (consider using the --format flag): " + requiredArgs.getInput());
}
switch (fileFormat) {
case CSV:
final var csvSchemaGenerator = CsvSchemaGenerator.builder()
.inputCsvFile(requiredArgs.getInput())
.hasHeaders(optionalArgs.hasHeaders)
.targetJsonFile(outFile)
.overwrite(optionalArgs.overwrite)
.clientSettings(getClientSettings())
.build();
csvSchemaGenerator.generateSchema(subMode);
break;
case PARQUET:
if (!optionalArgs.hasHeaders) {
throw new C3rIllegalArgumentException("--noHeaders is not applicable for Parquet files.");
}
final var parquetSchemaGenerator = ParquetSchemaGenerator.builder()
.inputParquetFile(requiredArgs.getInput())
.targetJsonFile(outFile)
.overwrite(optionalArgs.overwrite)
.clientSettings(getClientSettings())
.sparkSession(sparkSession)
.build();
parquetSchemaGenerator.generateSchema(subMode);
break;
default:
throw new C3rIllegalArgumentException("Unsupported file format for schema generation: " + fileFormat);
}
} catch (Exception e) {
Main.handleException(e, optionalArgs.enableStackTraces);
return Main.FAILURE;
}
return Main.SUCCESS;
}
}
| 2,465 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/cli/Main.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.spark.utils.C3rCliSparkProperties;
import com.amazonaws.c3r.utils.C3rSdkProperties;
import lombok.extern.slf4j.Slf4j;
import picocli.CommandLine;
/**
* Top level class for the CLI. Global options such as how to handle command line parsing are configured here and then
* subcommand specific options are configured in each subcommand.
*/
@Slf4j
@CommandLine.Command(
name = C3rCliSparkProperties.APP_NAME,
mixinStandardHelpOptions = true,
version = C3rSdkProperties.VERSION,
description = "Cryptographic computing tool for use with AWS Clean Rooms and Apache Spark.",
subcommands = {SchemaMode.class, EncryptMode.class, DecryptMode.class})
public final class Main {
/**
* Return value to indicate a child subcommand ran successfully.
*/
public static final int SUCCESS = 0;
/**
* Return value to indicate a child subcommand did not finish successfully.
* Further information about the failure will be in the logs/CLI.
*/
public static final int FAILURE = 1;
/**
* Create instance of the command line interface for all child subcommands.
*/
private Main() {
}
/**
* Get a copy of the application without passing in arguments yet.
* NOTE: The object keeps state between calls so if you include a boolean flag on one run and not on the next,
* the flag will still evaluate to true
*
* @return CommandLine interface to utility that you can use to add additional logging or information to
*/
static CommandLine getApp() {
return generateCommandLine(new Main());
}
/**
* Constructs a new CommandLine interpreter with the specified object with picocli annotations.
*
* @param command The object with appropriate picocli annotations.
* @return The constructed command line interpreter.
*/
static CommandLine generateCommandLine(final Object command) {
return new CommandLine(command).setTrimQuotes(true).setCaseInsensitiveEnumValuesAllowed(true);
}
/**
* Handle top level logging of errors during execution.
*
* @param e Error encountered
* @param enableStackTraces Whether the full stacktrace should be printed
*/
static void handleException(final Exception e, final boolean enableStackTraces) {
if (enableStackTraces) {
log.error("An error occurred: {}", e.getMessage(), e);
} else if (e instanceof C3rRuntimeException) {
log.error("An error occurred: {}", e.getMessage());
} else {
log.error("An unexpected error occurred: {}", e.getClass());
log.error("Note: the --enableStackTraces flag can provide additional context for errors.");
}
log.warn("Output files may have been left on disk.");
}
/**
* Execute the application with a particular set of arguments.
*
* @param args Set of strings containing the options to use on this execution pass
*/
public static void main(final String[] args) {
final int exitCode = getApp().execute(args);
System.exit(exitCode);
}
}
| 2,466 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/cli/EncryptMode.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.cli;
import com.amazonaws.c3r.cleanrooms.CleanRoomsDao;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ParquetConfig;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.json.GsonUtil;
import com.amazonaws.c3r.spark.action.SparkMarshaller;
import com.amazonaws.c3r.spark.config.SparkEncryptConfig;
import com.amazonaws.c3r.spark.io.csv.SparkCsvReader;
import com.amazonaws.c3r.spark.io.csv.SparkCsvWriter;
import com.amazonaws.c3r.spark.io.parquet.SparkParquetReader;
import com.amazonaws.c3r.spark.io.parquet.SparkParquetWriter;
import com.amazonaws.c3r.spark.utils.C3rCliSparkProperties;
import com.amazonaws.c3r.spark.utils.SparkSessionUtil;
import com.amazonaws.c3r.utils.C3rSdkProperties;
import com.amazonaws.c3r.utils.FileUtil;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import picocli.CommandLine;
import javax.crypto.SecretKey;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
import static com.amazonaws.c3r.spark.cli.Main.generateCommandLine;
/**
* Supports cryptographic computations on data in preparation for upload to an AWS Clean Rooms collaboration.
*/
@Slf4j
@Getter
@CommandLine.Command(name = "encrypt",
mixinStandardHelpOptions = true,
version = C3rSdkProperties.VERSION,
descriptionHeading = "%nDescription:%n",
description = "Encrypt data content as an Apache Spark job for use in an AWS Clean Rooms collaboration.")
public class EncryptMode implements Callable<Integer> {
/**
* Required command line arguments.
*/
@Getter
static class RequiredArgs {
/**
* {@value CliDescriptions#INPUT_DESCRIPTION_CRYPTO}.
*/
@CommandLine.Parameters(description = CliDescriptions.INPUT_DESCRIPTION_CRYPTO,
paramLabel = "<input>")
private String input = null;
/**
* {@value CliDescriptions#SCHEMA_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--schema", "-s"},
description = CliDescriptions.SCHEMA_DESCRIPTION,
required = true,
paramLabel = "<file>")
private String schema = null;
/**
* {@value CliDescriptions#ID_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--id"},
description = CliDescriptions.ID_DESCRIPTION,
paramLabel = "<value>",
required = true)
private UUID id = null;
}
/**
* Required values as specified by the user.
*/
@CommandLine.ArgGroup(multiplicity = "1", exclusive = false, heading = "%nRequired parameters:%n")
private RequiredArgs requiredArgs = new RequiredArgs();
/**
* Optional command line arguments.
*/
@Getter
static class OptionalArgs {
/**
* {@value CliDescriptions#AWS_PROFILE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--profile", "-l"},
description = CliDescriptions.AWS_PROFILE_DESCRIPTION)
private String profile = null;
/**
* {@value CliDescriptions#AWS_REGION_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--region", "-g"},
description = CliDescriptions.AWS_REGION_DESCRIPTION)
private String region = null;
/**
* {@value CliDescriptions#FILE_FORMAT_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--fileFormat", "-e"},
description = CliDescriptions.FILE_FORMAT_DESCRIPTION,
paramLabel = "<format>")
private FileFormat fileFormat = null;
/**
* {@value CliDescriptions#OUTPUT_DESCRIPTION_CRYPTO}.
*/
@CommandLine.Option(names = {"--output", "-o"},
description = CliDescriptions.OUTPUT_DESCRIPTION_CRYPTO,
paramLabel = "<dir>")
private String output = null;
/**
* {@value CliDescriptions#OVERWRITE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--overwrite", "-f"},
description = CliDescriptions.OVERWRITE_DESCRIPTION)
private boolean overwrite = false;
/**
* {@value CliDescriptions#DRY_RUN_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--dryRun", "-n"},
description = CliDescriptions.DRY_RUN_DESCRIPTION)
private boolean dryRun = false;
/**
* {@value CliDescriptions#ENCRYPT_CSV_INPUT_NULL_VALUE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--csvInputNULLValue", "-r"},
description = CliDescriptions.ENCRYPT_CSV_INPUT_NULL_VALUE_DESCRIPTION,
paramLabel = "<value>")
private String csvInputNullValue = null;
/**
* {@value CliDescriptions#ENCRYPT_CSV_OUTPUT_NULL_VALUE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--csvOutputNULLValue", "-w"},
description = CliDescriptions.ENCRYPT_CSV_OUTPUT_NULL_VALUE_DESCRIPTION,
paramLabel = "<value>")
private String csvOutputNullValue = null;
/**
* {@value CliDescriptions#PARQUET_BINARY_AS_STRING}.
*/
@CommandLine.Option(names = {"--parquetBinaryAsString"},
description = CliDescriptions.PARQUET_BINARY_AS_STRING)
private Boolean parquetBinaryAsString = null;
/**
* {@value CliDescriptions#ENABLE_STACKTRACE_DESCRIPTION}.
*/
@CommandLine.Option(names = {"--enableStackTraces", "-v"},
description = CliDescriptions.ENABLE_STACKTRACE_DESCRIPTION)
private boolean enableStackTraces = false;
}
/**
* Optional values as specified by the user.
*/
@CommandLine.ArgGroup(exclusive = false, heading = "%nOptional parameters:%n")
private OptionalArgs optionalArgs = new OptionalArgs();
/** DAO for interacting with AWS Clean Rooms. */
private final CleanRoomsDao cleanRoomsDao;
/** SparkSession for orchestration. */
private final SparkSession sparkSession;
/**
* Return a default CLI instance for encryption.
*
* <p>
* Note: {@link #getApp} is the intended method for manually creating this class
* with the appropriate CLI settings.
*/
EncryptMode() {
this.cleanRoomsDao = CleanRoomsDao.builder().apiName(C3rCliSparkProperties.API_NAME).build();
this.sparkSession = SparkSessionUtil.initSparkSession();
}
/**
* Return a CLI instance for an encryption pass with a custom {@link CleanRoomsDao} and SparkSession.
*
* <p>
* Note: {@link #getApp} is the intended method for manually creating this class
* with the appropriate CLI settings.
*
* @param cleanRoomsDao Custom {@link CleanRoomsDao} to use for Clean Rooms API calls
* @param sparkSession Custom SparkSession to use for orchestration
*/
EncryptMode(final CleanRoomsDao cleanRoomsDao, final SparkSession sparkSession) {
this.cleanRoomsDao = cleanRoomsDao;
this.sparkSession = sparkSession;
}
/**
* Get the encrypt mode command line application with a custom {@link CleanRoomsDao} and SparkSession.
*
* @param cleanRoomsDao Custom {@link CleanRoomsDao} to use for Clean Rooms API calls
* @param sparkSession Custom SparkSession to use for orchestration
* @return CommandLine interface for `encrypt` with customized AWS Clean Rooms access and standard CLI settings
*/
public static CommandLine getApp(final CleanRoomsDao cleanRoomsDao, final SparkSession sparkSession) {
return generateCommandLine(new EncryptMode(cleanRoomsDao, sparkSession));
}
/**
* Get the settings from AWS Clean Rooms for this collaboration.
*
* @return Cryptographic computing rules for collaboration
*/
public ClientSettings getClientSettings() {
final var dao = cleanRoomsDao != null
? cleanRoomsDao
: CleanRoomsDao.builder().apiName(C3rCliSparkProperties.API_NAME).build();
return dao.withProfile(optionalArgs.profile).withRegion(optionalArgs.region)
.getCollaborationDataEncryptionMetadata(requiredArgs.id.toString());
}
/**
* All the configuration information needed for encrypting data.
*
* @return All cryptographic settings and information on data processing
* @throws C3rRuntimeException If the schema file can't be parsed
* @throws C3rIllegalArgumentException If the schema file is empty
*/
public SparkEncryptConfig getConfig() {
final SecretKey keyMaterial = KeyUtil.sharedSecretKeyFromString(System.getenv(KeyUtil.KEY_ENV_VAR));
final TableSchema tableSchema;
try {
tableSchema = GsonUtil.fromJson(FileUtil.readBytes(requiredArgs.getSchema()), TableSchema.class);
} catch (Exception e) {
throw new C3rRuntimeException("Failed to parse the table schema file: " + requiredArgs.getSchema()
+ ". Please see the stack trace for where the parsing failed.", e);
}
if (tableSchema == null) {
throw new C3rIllegalArgumentException("The table schema file was empty: " + requiredArgs.getSchema());
}
return SparkEncryptConfig.builder()
.source(requiredArgs.getInput())
.fileFormat(optionalArgs.fileFormat)
.targetDir(optionalArgs.output)
.overwrite(optionalArgs.overwrite)
.csvInputNullValue(optionalArgs.csvInputNullValue)
.csvOutputNullValue(optionalArgs.csvOutputNullValue)
.secretKey(keyMaterial)
.salt(requiredArgs.getId().toString())
.settings(getClientSettings())
.tableSchema(tableSchema)
.build();
}
/**
* All the configuration information needed specifically for Parquet files.
*
* @return All the settings on processing Parquet data
*/
public ParquetConfig getParquetConfig() {
return ParquetConfig.builder().binaryAsString(optionalArgs.parquetBinaryAsString).build();
}
/**
* Ensure required settings exist.
*
* @throws C3rIllegalArgumentException If user input is invalid
*/
private void validate() {
FileUtil.verifyReadableFile(requiredArgs.getSchema());
if (requiredArgs.getId() == null || requiredArgs.getId().toString().isBlank()) {
throw new C3rIllegalArgumentException("Specified collaboration identifier is blank.");
}
}
/**
* Log information about how the data is being encrypted.
*
* @param columnSchemas Description of how input data should be transformed during the encryption process
*/
void printColumCategoryInfo(final List<ColumnSchema> columnSchemas) {
if (columnSchemas.isEmpty()) {
return;
}
log.info("{} {} column{} being generated:",
columnSchemas.size(),
columnSchemas.get(0).getType(),
columnSchemas.size() > 1 ? "s" : "");
for (var c : columnSchemas) {
log.info(" * " + c.getTargetHeader());
}
}
/**
* Print summary information about what will be in the encrypted output.
*
* @param tableSchema How data will be transformed during encryption
*/
private void printColumnTransformInfo(final TableSchema tableSchema) {
final var sealedColumns = new ArrayList<ColumnSchema>();
final var fingerprintColumns = new ArrayList<ColumnSchema>();
final var cleartextColumns = new ArrayList<ColumnSchema>();
for (var c : tableSchema.getColumns()) {
switch (c.getType()) {
case SEALED:
sealedColumns.add(c);
break;
case FINGERPRINT:
fingerprintColumns.add(c);
break;
default:
cleartextColumns.add(c);
break;
}
}
printColumCategoryInfo(sealedColumns);
printColumCategoryInfo(fingerprintColumns);
printColumCategoryInfo(cleartextColumns);
}
/**
* Encrypt data for upload to an AWS Clean Rooms.
*
* @return {@link Main#SUCCESS} if no errors, else {@link Main#FAILURE}
*/
@Override
public Integer call() {
try {
validate();
final SparkEncryptConfig cfg = getConfig();
final ParquetConfig pCfg = getParquetConfig();
printColumnTransformInfo(cfg.getTableSchema());
if (!optionalArgs.dryRun) {
log.info("Encrypting data from {}.", cfg.getSourceFile());
switch (cfg.getFileFormat()) {
case CSV:
if (pCfg.isSet()) {
throw new C3rIllegalArgumentException("Parquet options specified for CSV file.");
}
final Dataset<Row> csvDataset = SparkCsvReader.readInput(sparkSession,
cfg.getSourceFile(),
cfg.getCsvInputNullValue(),
cfg.getTableSchema().getPositionalColumnHeaders());
final Dataset<Row> marshalledCsvDataset = SparkMarshaller.encrypt(csvDataset, cfg);
SparkCsvWriter.writeOutput(marshalledCsvDataset, cfg.getTargetFile(), cfg.getCsvOutputNullValue());
break;
case PARQUET:
final Dataset<Row> parquetDataset = SparkParquetReader.readInput(
sparkSession,
cfg.getSourceFile(),
/* skipHeaderNormalization */ false,
pCfg);
final Dataset<Row> marshalledParquetDataset = SparkMarshaller.encrypt(parquetDataset, cfg);
SparkParquetWriter.writeOutput(marshalledParquetDataset, cfg.getTargetFile());
break;
default:
throw new C3rIllegalArgumentException("Unrecognized file format: " + cfg.getFileFormat());
}
SparkSessionUtil.closeSparkSession(sparkSession);
log.info("Encrypted data was saved to {}.", cfg.getTargetFile());
} else {
log.info("Dry run: No data will be encrypted from {}.", cfg.getSourceFile());
}
} catch (Exception e) {
Main.handleException(e, optionalArgs.enableStackTraces);
return Main.FAILURE;
}
return Main.SUCCESS;
}
}
| 2,467 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/cli/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Command line interface for using Cryptographic Computing for Clean Rooms with AWS Clean Rooms.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
@DefaultAnnotation(NonNull.class)
package com.amazonaws.c3r.spark.cli;
import edu.umd.cs.findbugs.annotations.DefaultAnnotation;
import lombok.NonNull; | 2,468 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/action/SparkMarshaller.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.action;
import com.amazonaws.c3r.Transformer;
import com.amazonaws.c3r.action.RowMarshaller;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnInsight;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.encryption.EncryptionContext;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.internal.Nonce;
import com.amazonaws.c3r.spark.config.SparkEncryptConfig;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder;
import org.apache.spark.sql.catalyst.encoders.RowEncoder;
import org.apache.spark.sql.functions;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import scala.jdk.CollectionConverters;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
/**
* Spark orchestration of the C3R SDK for encryption.
*
* <p>
* Note that there are a few differences between C3R's standard offerings and orchestrating with Spark.
*
* <p>
* The most important difference is the change in trust boundaries. When using the C3R normally, files exist on the same machine running
* C3R. C3R never writes any data to disk unencrypted unless it is meant to be unencrypted in the output. With Spark, as an input file is
* read, Spark is partitioning that data in memory and/or on disk before C3R ever gets an opportunity to encrypt it. This means that
* unless your Spark instance is configured to encrypt these files, cleartext forms of data that will eventually be encrypted may be
* written to disk and/or distributed to Spark Workers before it is encrypted. Further, Spark Workers may exist on other machines or
* networks. If a Spark job fails, there could be cleartext copies of the input file leftover across your Spark infrastructure. It is up
* to you to understand if this is permissible for your threat model and to configure your Spark server according to your needs.
*
* <p>
* Second, this Spark orchestration is not managing file permissions for the output file. C3R normally sets this file to be RW by the Owner
* only. Files written by Spark will inherit the permissions of where they are written.
*
* <p>
* Third, Spark partitions and distributes the partitioned data before C3R drops columns that will not be included in the output. When
* using the C3R SDK or CLI, these columns are dropped during the data load step before they're ever written to disk. If these columns
* should never leave the initial location, they should be removed from the data before it is handed to this Spark orchestration.
*
* <p>
* Fourth, Spark may partition the data and thus the output files. You may need to take additional steps to merge the data if downstream
* steps require it be one file. Note that when using S3 and Glue with AWS Clean Rooms, this should not be necessary.
*
* <p>
* Finally, certain functionality like shuffling rows, dropping columns, finding max length of values in a column, and finding duplicate
* values in a column are all revised in this orchestration to take advantage of Spark. These are normally handled by C3R's
* {@link RowMarshaller}. All of these functions will behave the same as they do with C3R except shuffling rows.
* Instead of sorting on Nonces created using Java's {@code SecureRandom}, Spark is using its own {@code rand()} function for the shuffle.
*/
public abstract class SparkMarshaller {
/**
* Spark orchestration of C3R encryption.
*
* <p>
* Please note that only {@code String} data types are currently supported.
*
* @param inputData input Dataset to process
* @param encryptConfig Encryption config to use for processing
* @return The encrypted Dataset
*/
public static Dataset<Row> encrypt(final Dataset<Row> inputData, final SparkEncryptConfig encryptConfig) {
final List<ColumnInsight> columnInsights = encryptConfig.getTableSchema().getColumns().stream().map(ColumnInsight::new)
.collect(Collectors.toList());
Dataset<Row> dataset = filterSourceColumnsBySchema(inputData, columnInsights);
updateMaxValuesPerColumn(dataset, columnInsights);
validateDuplicates(encryptConfig.getSettings(), dataset, columnInsights);
dataset = shuffleData(dataset);
dataset = mapSourceToTargetColumns(dataset, columnInsights);
populateColumnPositions(dataset, columnInsights);
dataset = marshalData(dataset, encryptConfig, columnInsights);
return dataset;
}
/**
* Filter source columns not in the schema.
*
* <p>
* This is normally handled by C3R's {@link RowMarshaller} by dropping columns that won't be in the output
* during the data load.
*
* @param rawInputData the Dataset to filter
* @param columnInsights Insights for all the columns to be processed
* @return A Dataset containing only source columns defined in the schema
*/
static Dataset<Row> filterSourceColumnsBySchema(final Dataset<Row> rawInputData, final List<ColumnInsight> columnInsights) {
final Set<ColumnHeader> schemaSourceColumns = columnInsights.stream()
.map(ColumnSchema::getSourceHeader)
.collect(Collectors.toSet());
final Set<ColumnHeader> inputColumns = Arrays.stream(rawInputData.columns())
.map(ColumnHeader::new)
.collect(Collectors.toSet());
inputColumns.removeAll(schemaSourceColumns);
Dataset<Row> toReturn = rawInputData;
for (ColumnHeader columnHeader : inputColumns) {
toReturn = toReturn.drop(columnHeader.toString());
}
return toReturn;
}
/**
* Updates the passed ColumnInsights with the max value length of their columns. These values are used during encryption whenever
* {@link PadType#MAX} is configured for a sealed column.
*
* <p>
* This is normally handled by C3R's {@link RowMarshaller} tracking the size of each value being read in
* during the data load.
*
* @param columnInsights Insights for all the columns to be processed
* @param rawInputData The Dataset to run the queries against
*/
static void updateMaxValuesPerColumn(final Dataset<Row> rawInputData,
final List<ColumnInsight> columnInsights) {
final Map<ColumnHeader, List<ColumnInsight>> sourceMappedColumnInsights = columnInsights.stream()
.collect(Collectors.groupingBy(ColumnInsight::getSourceHeader));
Arrays.stream(rawInputData.columns()).forEach(col -> {
final Dataset<Row> columnData = rawInputData.select(functions.col(col)
.as("column"));
columnData.createOrReplaceTempView("singleColumnData");
final ColumnHeader columnHeader = new ColumnHeader(col);
final Row longestValueRow = rawInputData.sparkSession()
.sql("SELECT max(bit_length(column))\n" +
"FROM singleColumnData\n" +
"ORDER BY bit_length('column') DESC\n" +
"LIMIT 1")
.first();
final int maxBitLength = (longestValueRow.get(0) == null) ? 0 : longestValueRow.getInt(0);
final int maxByteLength = maxBitLength / Byte.SIZE;
for (ColumnInsight insight : sourceMappedColumnInsights.get(columnHeader)) {
insight.setMaxValueLength(maxByteLength);
}
});
}
/**
* Validates whether the input data meets the encryption settings for `allowDuplicates`.
*
* <p>
* This is normally handled by C3R's {@link RowMarshaller} querying the temporary SQL table data is loaded
* to.
*
* @param clientSettings The encryption settings to validate with
* @param rawInputData The Dataset to be validated
* @param columnInsights Insights for all the columns to be processed
* @throws C3rRuntimeException If input data is invalid
*/
static void validateDuplicates(final ClientSettings clientSettings, final Dataset<Row> rawInputData,
final List<ColumnInsight> columnInsights) {
if (clientSettings.isAllowDuplicates()) {
return;
}
// Check for duplicates when `allowDuplicates` is false
final String[] fingerprintColumns = columnInsights.stream()
.filter(columnSchema -> columnSchema.getType() == ColumnType.FINGERPRINT) // enforced on fingerprint columns only
.map(ColumnSchema::getSourceHeader)
.map(ColumnHeader::toString)
.distinct()
.toArray(String[]::new);
// Check for duplicate non-null values
for (String col : fingerprintColumns) {
final Dataset<Row> filteredData = rawInputData.groupBy(col).count().filter("count > 1");
if (!filteredData.isEmpty()) {
throw new C3rRuntimeException("Duplicates were found in column `" + col + "`, but `allowDuplicates` is false.");
}
}
// Check for duplicate null values when `preserveNulls` is false
if (!clientSettings.isPreserveNulls()) {
for (String col : fingerprintColumns) {
final Column column = new Column(col);
final Dataset<Row> filteredData = rawInputData.select(column)
.groupBy(column)
.count()
.filter(column.isNull())
.filter("count > 1");
if (!filteredData.isEmpty()) {
throw new C3rRuntimeException("Duplicates NULLs were found in column `" + col + "`, but `allowDuplicates` and " +
"`preserveNulls` are false.");
}
}
}
}
/**
* Map the source columns to their respective target columns.
*
* <p>
* This is normally handled by C3R's {@link RowMarshaller} by writing input columns of data to the intended
* target columns during the data load.
*
* @param rawInputData the Dataset to map
* @param columnInsights Insights for all the columns to be processed
* @return A Dataset containing each target column
*/
static Dataset<Row> mapSourceToTargetColumns(final Dataset<Row> rawInputData, final List<ColumnInsight> columnInsights) {
final List<Column> targetColumns = new ArrayList<>();
columnInsights.forEach(target -> targetColumns.add(functions.col(target.getSourceHeader().toString())
.as(target.getTargetHeader().toString())));
return rawInputData.select(CollectionConverters.IteratorHasAsScala(targetColumns.iterator()).asScala().toSeq());
}
/**
* Encrypt source data. Requires that column positions have been populated in ColumnInsights.
*
* @param rawInputData The source data to be encrypted
* @param encryptConfig The encryption configuration
* @param columnInsights Insights for all the columns to be processed
* @return The encrypted data
* @throws C3rRuntimeException If the input data cannot be marshalled.
*/
static Dataset<Row> marshalData(final Dataset<Row> rawInputData, final SparkEncryptConfig encryptConfig,
final List<ColumnInsight> columnInsights) {
// Copy out values that need to be serialized
final ClientSettings settings = encryptConfig.getSettings();
final String salt = encryptConfig.getSalt();
final String base64EncodedKey = Base64.getEncoder().encodeToString(encryptConfig.getSecretKey().getEncoded());
final ExpressionEncoder<Row> rowEncoder = RowEncoder.apply(rawInputData.schema());
final StructField[] fields = rawInputData.schema().fields();
try {
return rawInputData.map((MapFunction<Row, Row>) row -> {
// Grab a nonce for the row
final Nonce nonce = Nonce.nextNonce();
// Build a list of transformers for each row, limiting state to keys/salts/settings POJOs
final Map<ColumnType, Transformer> transformers = Transformer.initTransformers(
KeyUtil.sharedSecretKeyFromString(base64EncodedKey),
salt,
settings,
false); // Not relevant to encryption
// For each column in the row, transform the data
return Row.fromSeq(
CollectionConverters.IteratorHasAsScala(columnInsights.stream().map(column -> {
if (column.getType() == ColumnType.CLEARTEXT) {
return row.get(column.getSourceColumnPosition());
}
if (fields[column.getSourceColumnPosition()].dataType() != DataTypes.StringType) {
throw new C3rRuntimeException("Encrypting non-String values is not supported. Non-String column marked" +
" for encryption: `" + column.getTargetHeader() + "`");
}
final Transformer transformer = transformers.get(column.getType());
final String data = row.getString(column.getSourceColumnPosition());
final byte[] dataBytes = data == null ? null : data.getBytes(StandardCharsets.UTF_8);
final EncryptionContext encryptionContext = new EncryptionContext(column, nonce, ClientDataType.STRING);
final byte[] marshalledBytes = transformer.marshal(dataBytes, encryptionContext);
return (marshalledBytes == null ? null : new String(marshalledBytes, StandardCharsets.UTF_8));
}).iterator()).asScala().toSeq());
}, rowEncoder);
} catch (C3rRuntimeException e) {
throw e;
} catch (Exception e) {
throw new C3rRuntimeException("Unknown exception when encrypting data.", e);
}
}
/**
* Find the positions for each column. Requires that source columns have already been mapped to targets.
*
* <p>
* Note that this method repurposes the {@link ColumnInsight#getSourceColumnPosition()} method to track the position of a target
* column in the source data after the source columns have been mapped to target column names.
*
* @param rawInputData The source data to map the columns with
* @param columnInsights Insights for all the columns to be processed
*/
static void populateColumnPositions(final Dataset<Row> rawInputData, final List<ColumnInsight> columnInsights) {
// Gather the positions of all the columns
final String[] columns = rawInputData.columns();
final Map<ColumnHeader, Integer> columnPositions = new HashMap<>();
for (int i = 0; i < columns.length; i++) {
columnPositions.put(new ColumnHeader(columns[i]), i);
}
for (ColumnInsight column : columnInsights) {
final int position = columnPositions.get(column.getTargetHeader());
column.setSourceColumnPosition(position);
}
}
/**
* Shuffles the input data to hide ordering.
*
* <p>
* This is normally handled by C3R's {@link RowMarshaller} by appending the Nonces used for each row to the
* data on load and then sorting on those nonces before writing out the data. Instead of sorting on Nonces created using Java's
* {@code SecureRandom}, Spark is using its own {@code rand()} function for the shuffle.
*
* @param rawInputData The Dataset to shuffle
* @return The shuffled Dataset
*/
static Dataset<Row> shuffleData(final Dataset<Row> rawInputData) {
return rawInputData.orderBy(functions.rand());
}
}
| 2,469 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/action/SparkUnmarshaller.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.spark.action;
import com.amazonaws.c3r.Transformer;
import com.amazonaws.c3r.action.RowMarshaller;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.encryption.keys.KeyUtil;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.spark.config.SparkDecryptConfig;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder;
import org.apache.spark.sql.catalyst.encoders.RowEncoder;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import scala.jdk.CollectionConverters;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Base64;
import java.util.List;
import java.util.Map;
/**
* Spark orchestration of the C3R SDK for decryption.
*
* <p>
* Note that there are a few differences between C3R's standard offerings and orchestrating with Spark.
*
* <p>
* The most important difference is the change in trust boundaries. When using the C3R normally, files exist on the same machine running
* C3R. C3R never writes any data to disk unencrypted unless it is meant to be unencrypted in the output. With Spark, as an input file is
* read, Spark is partitioning that data in memory and/or on disk before C3R ever gets an opportunity to encrypt it. This means that
* unless your Spark instance is configured to encrypt these files, cleartext forms of data that will eventually be encrypted may be
* written to disk and/or distributed to Spark Workers before it is encrypted. Further, Spark Workers may exist on other machines or
* networks. If a Spark job fails, there could be cleartext copies of the input file leftover across your Spark infrastructure. It is up
* to you to understand if this is permissible for your threat model and to configure your Spark server according to your needs.
*
* <p>
* Second, this Spark orchestration is not managing file permissions for the output file. C3R normally sets this file to be RW by the Owner
* only. Files written by Spark will inherit the permissions of where they are written.
*
* <p>
* Third, Spark partitions and distributes the partitioned data before C3R drops columns that will not be included in the output. When
* using the C3R SDK or CLI, these columns are dropped during the data load step before they're ever written to disk. If these columns
* should never leave the initial location, they should be removed from the data before it is handed to this Spark orchestration.
*
* <p>
* Fourth, Spark may partition the data and thus the output files. You may need to take additional steps to merge the data if downstream
* steps require it be one file. Note that when using S3 and Glue with AWS Clean Rooms, this should not be necessary.
*
* <p>
* Finally, certain functionality like shuffling rows, dropping columns, finding max length of values in a column, and finding duplicate
* values in a column are all revised in this orchestration to take advantage of Spark. These are normally handled by C3R's
* {@link RowMarshaller}. All of these functions will behave the same as they do with C3R except shuffling rows.
* Instead of sorting on Nonces created using Java's {@code SecureRandom}, Spark is using its own {@code rand()} function for the shuffle.
*/
public abstract class SparkUnmarshaller {
/**
* Sample of Spark orchestrating the C3R SDK for decryption.
*
* <p>
* This function is currently setup to only process CSV files. It can be modified to instead take a {@code Dataset<Row>}. There is no
* functionality specific to a CSV after the initial data load.
*
* <p>
* Please note that only {@code String} data types are currently supported.
*
* @param inputData input Dataset to process
* @param decryptConfig Decryption config to use for processing
* @return The decrypted Dataset
*/
public static Dataset<Row> decrypt(final Dataset<Row> inputData, final SparkDecryptConfig decryptConfig) {
return unmarshalData(inputData, decryptConfig);
}
/**
* Decrypt source data.
*
* @param rawInputData The source data to be decrypted
* @param decryptConfig Decryption config to use for processing
* @return The cleartext data
* @throws C3rRuntimeException If the input data cannot be unmarshalled.
*/
static Dataset<Row> unmarshalData(final Dataset<Row> rawInputData, final SparkDecryptConfig decryptConfig) {
// Copy out values that need to be serialized
final String salt = decryptConfig.getSalt();
final String base64EncodedKey = Base64.getEncoder().encodeToString(decryptConfig.getSecretKey().getEncoded());
final boolean failOnFingerprintColumns = decryptConfig.isFailOnFingerprintColumns();
final ExpressionEncoder<Row> rowEncoder = RowEncoder.apply(rawInputData.schema());
final StructField[] fields = rawInputData.schema().fields();
try {
return rawInputData.map((MapFunction<Row, Row>) row -> {
// Build a list of transformers for each row, limiting state to keys/salts/settings POJOs
final Map<ColumnType, Transformer> transformers = Transformer.initTransformers(
KeyUtil.sharedSecretKeyFromString(base64EncodedKey),
salt,
null, // Not relevant to decryption.
failOnFingerprintColumns);
// For each column in the row, transform the data
final List<Object> unmarshalledValues = new ArrayList<>();
for (int i = 0; i < row.size(); i++) {
// Pass through non-String data types
if (fields[i].dataType() != DataTypes.StringType) {
unmarshalledValues.add(row.get(i));
continue;
}
final String data = row.getString(i);
final byte[] dataBytes = data == null ? null : data.getBytes(StandardCharsets.UTF_8);
Transformer transformer = transformers.get(ColumnType.CLEARTEXT); // Default to pass through
if (Transformer.hasDescriptor(transformers.get(ColumnType.SEALED), dataBytes)) {
transformer = transformers.get(ColumnType.SEALED);
} else if (Transformer.hasDescriptor(transformers.get(ColumnType.FINGERPRINT), dataBytes)) {
transformer = transformers.get(ColumnType.FINGERPRINT);
}
final byte[] unmarshalledBytes = transformer.unmarshal(dataBytes);
unmarshalledValues.add(unmarshalledBytes == null ? null : new String(unmarshalledBytes, StandardCharsets.UTF_8));
}
return Row.fromSeq(
CollectionConverters.IteratorHasAsScala(unmarshalledValues.iterator()).asScala().toSeq());
}, rowEncoder);
} catch (Exception e) {
throw new C3rRuntimeException("Unknown exception when decrypting data.", e);
}
}
}
| 2,470 |
0 | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark | Create_ds/c3r/c3r-cli-spark/src/main/java/com/amazonaws/c3r/spark/action/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* This package classes used to marshall (encrypt) and unmarshall (decrypt) data to and from the clean room for the various supported
* data types. {@link com.amazonaws.c3r.spark.action.SparkMarshaller} handles the logic of marshalling data outside of anything having to
* do with the actual data format and {@link com.amazonaws.c3r.spark.action.SparkUnmarshaller} does the same for unmarshalling. Each format
* specific class handles file I/O and value creation only for that particular data type.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.spark.action; | 2,471 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/io/ParquetRowWriterTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.ParquetTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import static com.amazonaws.c3r.io.ParquetRowReaderTest.validateRowsGetValueContent;
import static com.amazonaws.c3r.io.ParquetRowReaderTest.validateRowsGetValueNullContent;
import static org.junit.jupiter.api.Assertions.assertEquals;
public class ParquetRowWriterTest {
private String output;
@BeforeEach
public void setup() throws IOException {
output = FileTestUtility.resolve("output.parquet").toString();
}
@Test
public void getTargetNameTest() {
final var inReader = ParquetRowReader.builder().sourceName(ParquetTestUtility.PARQUET_1_ROW_PRIM_DATA_PATH).build();
final ParquetRowWriter writer =
ParquetRowWriter.builder().targetName(output).parquetSchema(inReader.getParquetSchema()).build();
assertEquals(output, writer.getTargetName());
}
@Test
public void getHeadersTest() {
final var inReader = ParquetRowReader.builder().sourceName(ParquetTestUtility.PARQUET_1_ROW_PRIM_DATA_PATH).build();
final ParquetRowWriter writer =
ParquetRowWriter.builder().targetName(output).parquetSchema(inReader.getParquetSchema()).build();
assertEquals(ParquetTestUtility.PARQUET_TEST_DATA_HEADERS, writer.getHeaders());
}
private void roundTripAssertEquals(final String inPath, final int rowCount, final boolean nonNullEntries) {
final var inReader = ParquetRowReader.builder().sourceName(inPath).build();
final var inRows = ParquetTestUtility.readAllRows(inReader);
assertEquals(rowCount, inRows.size());
if (nonNullEntries) {
validateRowsGetValueContent(inRows);
} else {
validateRowsGetValueNullContent(inRows);
}
final ParquetRowWriter writer =
ParquetRowWriter.builder().targetName(output).parquetSchema(inReader.getParquetSchema()).build();
for (var row : inRows) {
writer.writeRow(row);
}
writer.close();
writer.flush();
final var outReader = ParquetRowReader.builder().sourceName(output).build();
final var outRows = ParquetTestUtility.readAllRows(outReader);
assertEquals(rowCount, outRows.size());
for (int i = 0; i < rowCount; i++) {
assertEquals(inRows.get(i), outRows.get(i), "row " + i);
}
outReader.close();
inReader.close();
}
@Test
public void roundTrip1RowTest() throws IOException {
roundTripAssertEquals(ParquetTestUtility.PARQUET_1_ROW_PRIM_DATA_PATH, 1, true);
}
@Test
public void roundTrip100Rows1GroupTest() throws IOException {
roundTripAssertEquals(ParquetTestUtility.PARQUET_100_ROWS_PRIM_DATA_PATH, 100, true);
}
@Test
public void roundTrip100Rows10GroupsTest() throws IOException {
roundTripAssertEquals(ParquetTestUtility.PARQUET_100_ROWS_10_GROUPS_PRIM_DATA_PATH, 100, true);
}
@Test
public void roundTrip1NullRowTest() throws IOException {
roundTripAssertEquals(ParquetTestUtility.PARQUET_NULL_1_ROW_PRIM_DATA_PATH, 1, false);
}
@Test
public void roundTrip100NullRows1GroupTest() throws IOException {
roundTripAssertEquals(ParquetTestUtility.PARQUET_NULL_100_ROWS_PRIM_DATA_PATH, 100, false);
}
}
| 2,472 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/io/ParquetRowReaderTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ParquetDataType;
import com.amazonaws.c3r.data.ParquetRow;
import com.amazonaws.c3r.data.ParquetSchema;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.ParquetTestUtility;
import org.apache.parquet.schema.LogicalTypeAnnotation;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
import org.apache.parquet.schema.Types;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ParquetRowReaderTest {
public static Boolean getBoolean(final Row<ParquetValue> row) {
return ((ParquetValue.Boolean) row.getValue(new ColumnHeader("boolean"))).getValue();
}
public static String getString(final Row<ParquetValue> row) {
return new String(((ParquetValue.Binary) row.getValue(new ColumnHeader("string"))).getValue().getBytes(),
StandardCharsets.UTF_8);
}
public static Integer getInt8(final Row<ParquetValue> row) {
return ((ParquetValue.Int32) row.getValue(new ColumnHeader("int8"))).getValue();
}
public static Integer getInt16(final Row<ParquetValue> row) {
return ((ParquetValue.Int32) row.getValue(new ColumnHeader("int16"))).getValue();
}
public static Integer getInt32(final Row<ParquetValue> row) {
return ((ParquetValue.Int32) row.getValue(new ColumnHeader("int32"))).getValue();
}
public static Long getInt64(final Row<ParquetValue> row) {
return ((ParquetValue.Int64) row.getValue(new ColumnHeader("int64"))).getValue();
}
public static Float getFloat(final Row<ParquetValue> row) {
return ((ParquetValue.Float) row.getValue(new ColumnHeader("float"))).getValue();
}
public static Double getDouble(final Row<ParquetValue> row) {
return ((ParquetValue.Double) row.getValue(new ColumnHeader("double"))).getValue();
}
public static Long getTimestamp(final Row<ParquetValue> row) {
return ((ParquetValue.Int64) row.getValue(new ColumnHeader("timestamp"))).getValue();
}
public static void validateRowsGetValueContent(final List<Row<ParquetValue>> rows) {
for (int i = 0; i < rows.size(); i++) {
final var row = rows.get(i);
assertEquals(i % 2 == 0, getBoolean(row), "row " + i);
assertEquals(String.valueOf(i), getString(row), "row " + i);
assertEquals((8 + i) % 127, getInt8(row), "row " + i);
assertEquals((16 + i) % 32767, getInt16(row), "row " + i);
assertEquals((32 + i), getInt32(row), "row " + i);
assertEquals((64 + i), getInt64(row), "row " + i);
assertEquals((float) (1.0 + i), getFloat(row), 0.000001, "row " + i);
assertEquals(-1.0 - i, getDouble(row), 0.000001, "row " + i);
assertEquals(-446774400000000L, getTimestamp(row), "row " + i);
}
}
public static void validateRowsGetValueNullContent(final List<Row<ParquetValue>> rows) {
for (var row : rows) {
for (var header : ParquetTestUtility.PARQUET_TEST_DATA_HEADERS) {
assertTrue(row.getValue(header).isNull());
assertNull(row.getValue(header).getBytes());
}
}
}
@Test
public void getSourceNameTest() {
final var pReader = ParquetRowReader.builder().sourceName(ParquetTestUtility.PARQUET_1_ROW_PRIM_DATA_PATH).build();
assertEquals(
ParquetTestUtility.PARQUET_1_ROW_PRIM_DATA_PATH,
pReader.getSourceName());
}
@Test
public void getHeadersTest() {
final var pReader = ParquetRowReader.builder()
.sourceName(ParquetTestUtility.PARQUET_1_ROW_PRIM_DATA_PATH)
.build();
assertEquals(
ParquetTestUtility.PARQUET_TEST_DATA_HEADERS,
pReader.getHeaders());
}
@Test
public void getHeadersWithNormalizationTest() {
final var pReader = ParquetRowReader.builder()
.sourceName(ParquetTestUtility.PARQUET_SAMPLE_DATA_PATH)
.skipHeaderNormalization(false)
.build();
assertEquals(
ParquetTestUtility.PARQUET_SAMPLE_DATA_HEADERS,
pReader.getHeaders());
assertEquals(
ParquetTestUtility.PARQUET_SAMPLE_DATA_HEADERS,
pReader.getParquetSchema().getHeaders());
}
@Test
public void getHeadersWithoutNormalizationTest() {
final var pReader = ParquetRowReader.builder()
.sourceName(ParquetTestUtility.PARQUET_SAMPLE_DATA_PATH)
.skipHeaderNormalization(true)
.build();
assertEquals(
ParquetTestUtility.PARQUET_SAMPLE_DATA_HEADERS_NO_NORMALIZATION,
pReader.getHeaders());
assertEquals(
ParquetTestUtility.PARQUET_SAMPLE_DATA_HEADERS_NO_NORMALIZATION,
pReader.getParquetSchema().getHeaders());
}
@Test
public void getReadRowCountTest() {
var pReader = ParquetRowReader.builder().sourceName(ParquetTestUtility.PARQUET_1_ROW_PRIM_DATA_PATH).build();
assertEquals(0, pReader.getReadRowCount());
ParquetTestUtility.readAllRows(pReader);
assertEquals(1, pReader.getReadRowCount());
pReader = ParquetRowReader.builder().sourceName(ParquetTestUtility.PARQUET_100_ROWS_PRIM_DATA_PATH).build();
ParquetTestUtility.readAllRows(pReader);
assertEquals(100, pReader.getReadRowCount());
}
@Test
public void getParquetSchemaSupportsCryptoComputingTest() {
final var pReader = ParquetRowReader.builder().sourceName(ParquetTestUtility.PARQUET_1_ROW_PRIM_DATA_PATH).build();
final var schema = pReader.getParquetSchema();
ParquetTestUtility.PARQUET_TEST_DATA_TYPES.forEach((header, type) ->
assertEquals(type, schema.getColumnType(header).getClientDataType(),
"column `" + header + "` has the wrong type"));
}
@Test
public void nextTest() {
final var pReader = ParquetRowReader.builder().sourceName(ParquetTestUtility.PARQUET_1_ROW_PRIM_DATA_PATH).build();
final var row = pReader.next();
assertEquals(ParquetTestUtility.PARQUET_TEST_ROW_TYPE_ENTRIES.size(), row.size());
for (var header : ParquetTestUtility.PARQUET_TEST_DATA_HEADERS) {
assertTrue(row.hasColumn(header));
}
}
@Test
public void validate1RowGetValueTest() {
final var pReader = ParquetRowReader.builder().sourceName(ParquetTestUtility.PARQUET_1_ROW_PRIM_DATA_PATH).build();
final var rows = ParquetTestUtility.readAllRows(pReader);
assertEquals(rows.size(), 1);
validateRowsGetValueContent(rows);
}
@Test
public void validate100RowsGetValueTest() {
final var pReader = ParquetRowReader.builder().sourceName(ParquetTestUtility.PARQUET_100_ROWS_PRIM_DATA_PATH).build();
final var rows = ParquetTestUtility.readAllRows(pReader);
assertEquals(rows.size(), 100);
validateRowsGetValueContent(rows);
}
@Test
public void validate100RowsIn10GroupsGetValueTest() {
final var pReader = ParquetRowReader.builder().sourceName(ParquetTestUtility.PARQUET_100_ROWS_10_GROUPS_PRIM_DATA_PATH).build();
final var rows = ParquetTestUtility.readAllRows(pReader);
assertEquals(rows.size(), 100);
validateRowsGetValueContent(rows);
}
@Test
public void validate1NullRowGetValueTest() {
final var pReader = ParquetRowReader.builder().sourceName(ParquetTestUtility.PARQUET_NULL_1_ROW_PRIM_DATA_PATH).build();
final var rows = ParquetTestUtility.readAllRows(pReader);
assertEquals(rows.size(), 1);
validateRowsGetValueNullContent(rows);
}
@Test
public void validate100NullRowsGetValueTest() {
final var pReader = ParquetRowReader.builder().sourceName(ParquetTestUtility.PARQUET_NULL_100_ROWS_PRIM_DATA_PATH).build();
final var rows = ParquetTestUtility.readAllRows(pReader);
assertEquals(rows.size(), 100);
validateRowsGetValueNullContent(rows);
}
@Test
public void maxColumnCountTest() throws IOException {
final List<Type> types = new ArrayList<>();
final Map<ColumnHeader, ParquetDataType> columnTypes = new HashMap<>();
for (int i = 0; i < ParquetRowReader.MAX_COLUMN_COUNT + 1; i++) {
final Type type = Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("string" + i);
columnTypes.put(new ColumnHeader("string" + i), ParquetDataType.fromType(type));
types.add(type);
}
final var messageType = new MessageType("Oversized", types);
final ParquetSchema parquetSchema = new ParquetSchema(messageType);
final String output = FileTestUtility.createTempFile("output", ".parquet").toString();
final ParquetRowWriter writer = ParquetRowWriter.builder()
.targetName(output)
.parquetSchema(parquetSchema)
.build();
final Row<ParquetValue> row = new ParquetRow(columnTypes);
writer.writeRow(row);
writer.flush();
writer.close();
assertThrows(C3rRuntimeException.class, () -> ParquetRowReader.builder().sourceName(output).build());
}
}
| 2,473 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/io/parquet/ParquetPrimitiveConverterTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.parquet;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ParquetDataType;
import com.amazonaws.c3r.data.ParquetRow;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import org.apache.parquet.io.api.Binary;
import org.apache.parquet.schema.LogicalTypeAnnotation;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
import org.apache.parquet.schema.Types;
import org.junit.jupiter.api.Test;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class ParquetPrimitiveConverterTest {
private static final Type INT32_TYPE = Types.required(PrimitiveType.PrimitiveTypeName.INT32).named("int32");
private static final Type INT64_TYPE = Types.required(PrimitiveType.PrimitiveTypeName.INT64).named("int64");
private static final Type STRING_TYPE = Types.required(PrimitiveType.PrimitiveTypeName.BINARY).as(LogicalTypeAnnotation.stringType())
.named("binary");
private static final Type BOOLEAN_TYPE = Types.required(PrimitiveType.PrimitiveTypeName.BOOLEAN).named("boolean");
private static final Type FLOAT_TYPE = Types.required(PrimitiveType.PrimitiveTypeName.FLOAT).named("float");
private static final Type DOUBLE_TYPE = Types.required(PrimitiveType.PrimitiveTypeName.DOUBLE).named("double");
private static final ColumnHeader FOO_HEADER = new ColumnHeader("foo");
private static final ColumnHeader BAR_HEADER = new ColumnHeader("bar");
private ParquetRow row;
private ParquetPrimitiveConverter fooConverter;
private ParquetPrimitiveConverter barConverter;
public void setup(final Type type) {
row = new ParquetRow(Map.of(
FOO_HEADER, ParquetDataType.fromType(type),
BAR_HEADER, ParquetDataType.fromType(type)
));
fooConverter = new ParquetPrimitiveConverter(FOO_HEADER, ParquetDataType.fromType(
type
));
barConverter = new ParquetPrimitiveConverter(BAR_HEADER, ParquetDataType.fromType(
type
));
fooConverter.setRow(row);
barConverter.setRow(row);
}
@Test
public void addMissingRowTest() {
fooConverter = new ParquetPrimitiveConverter(FOO_HEADER, ParquetDataType.fromType(
STRING_TYPE
));
assertThrows(C3rRuntimeException.class, () ->
fooConverter.addBinary(Binary.fromReusedByteArray(new byte[]{0})));
}
@Test
public void addRepeatValueTest() {
row = new ParquetRow(Map.of(
FOO_HEADER, ParquetDataType.fromType(STRING_TYPE)
));
fooConverter = new ParquetPrimitiveConverter(FOO_HEADER, ParquetDataType.fromType(
STRING_TYPE
));
fooConverter.setRow(row);
assertDoesNotThrow(() ->
fooConverter.addBinary(Binary.fromReusedByteArray(new byte[]{0})));
assertThrows(C3rRuntimeException.class, () ->
fooConverter.addBinary(Binary.fromReusedByteArray(new byte[]{0})));
}
@Test
public void addBinaryTest() {
setup(STRING_TYPE);
fooConverter.addBinary(Binary.fromReusedByteArray(new byte[]{0}));
assertArrayEquals(new byte[]{0}, row.getValue(FOO_HEADER).getBytes());
barConverter.addBinary(Binary.fromReusedByteArray(new byte[]{1}));
assertArrayEquals(new byte[]{1}, row.getValue(BAR_HEADER).getBytes());
}
@Test
public void addBooleanTest() {
setup(BOOLEAN_TYPE);
fooConverter.addBoolean(true);
assertEquals(
new ParquetValue.Boolean(ParquetDataType.fromType(BOOLEAN_TYPE), true),
row.getValue(FOO_HEADER));
barConverter.addBoolean(false);
assertEquals(
new ParquetValue.Boolean(ParquetDataType.fromType(BOOLEAN_TYPE), false),
row.getValue(BAR_HEADER));
}
@Test
public void addFloatTest() {
setup(FLOAT_TYPE);
fooConverter.addFloat(3.14159f);
assertEquals(
new ParquetValue.Float(ParquetDataType.fromType(FLOAT_TYPE), 3.14159f),
row.getValue(FOO_HEADER));
barConverter.addFloat(42.0f);
assertEquals(
new ParquetValue.Float(ParquetDataType.fromType(FLOAT_TYPE), 42.0f),
row.getValue(BAR_HEADER));
}
@Test
public void addDoubleTest() {
setup(DOUBLE_TYPE);
fooConverter.addDouble(3.14159);
assertEquals(
new ParquetValue.Double(ParquetDataType.fromType(DOUBLE_TYPE), 3.14159),
row.getValue(FOO_HEADER));
barConverter.addDouble(42.0);
assertEquals(
new ParquetValue.Double(ParquetDataType.fromType(DOUBLE_TYPE), 42.0),
row.getValue(BAR_HEADER));
}
@Test
public void addIntTest() {
setup(INT32_TYPE);
fooConverter.addInt(3);
assertEquals(
new ParquetValue.Int32(ParquetDataType.fromType(INT32_TYPE), 3),
row.getValue(FOO_HEADER));
barConverter.addInt(42);
assertEquals(
new ParquetValue.Int32(ParquetDataType.fromType(INT32_TYPE), 42),
row.getValue(BAR_HEADER));
}
@Test
public void addLongTest() {
setup(INT64_TYPE);
fooConverter.addLong(3L);
assertEquals(
new ParquetValue.Int64(ParquetDataType.fromType(INT64_TYPE), 3L),
row.getValue(FOO_HEADER));
barConverter.addLong(42L);
assertEquals(
new ParquetValue.Int64(ParquetDataType.fromType(INT64_TYPE), 42L),
row.getValue(BAR_HEADER));
}
}
| 2,474 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/utils/ParquetTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.utils;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.io.ParquetRowReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static java.util.Map.entry;
/**
* Utility class for testing Parquet functionality.
*/
public final class ParquetTestUtility {
public static final String PARQUET_SAMPLE_DATA_PATH = "../samples/parquet/data_sample.parquet";
/**
* Raw column names and types for {@link #PARQUET_SAMPLE_DATA_PATH}.
*/
public static final List<Map.Entry<String, ClientDataType>> PARQUET_SAMPLE_DATA_ROW_TYPE_ENTRIES =
List.of(entry("FirstName", ClientDataType.STRING),
entry("LastName", ClientDataType.STRING),
entry("Address", ClientDataType.STRING),
entry("City", ClientDataType.STRING),
entry("State", ClientDataType.STRING),
entry("PhoneNumber", ClientDataType.STRING),
entry("Title", ClientDataType.STRING),
entry("Level", ClientDataType.STRING),
entry("Notes", ClientDataType.STRING));
/**
* ColumnHeaders for {@link #PARQUET_SAMPLE_DATA_PATH}.
*/
public static final List<ColumnHeader> PARQUET_SAMPLE_DATA_HEADERS =
PARQUET_SAMPLE_DATA_ROW_TYPE_ENTRIES.stream()
.map(Map.Entry::getKey)
.map(ColumnHeader::new)
.collect(Collectors.toList());
/**
* Non-normalized ColumnHeaders for {@link #PARQUET_SAMPLE_DATA_PATH}.
*/
public static final List<ColumnHeader> PARQUET_SAMPLE_DATA_HEADERS_NO_NORMALIZATION =
PARQUET_SAMPLE_DATA_ROW_TYPE_ENTRIES.stream()
.map(Map.Entry::getKey)
.map(ColumnHeader::ofRaw)
.collect(Collectors.toList());
/**
* A file containing only binary values without logical annotations.
*/
public static final String PARQUET_BINARY_VALUES_PATH = "../samples/parquet/binary_values.parquet";
/**
* A file containing a single row and single column of Parquet data.
*/
public static final String PARQUET_1_ROW_PRIM_DATA_PATH = "../samples/parquet/rows_1_groups_1_prim_data.parquet";
/**
* A file containing 100 rows of a single column of Parquet data.
*/
public static final String PARQUET_100_ROWS_PRIM_DATA_PATH = "../samples/parquet/rows_100_groups_1_prim_data.parquet";
/**
* A file containing 100 rows of 10 columns/groups of Parquet data.
*/
public static final String PARQUET_100_ROWS_10_GROUPS_PRIM_DATA_PATH = "../samples/parquet/rows_100_groups_10_prim_data.parquet";
/**
* Parquet file that is not UTF-8 encoded.
*/
public static final String PARQUET_NON_UTF8_DATA_PATH = "../samples/parquet/nonUtf8Encoding.parquet";
/**
* Parquet file with a single value that is null.
*/
public static final String PARQUET_NULL_1_ROW_PRIM_DATA_PATH = "../samples/parquet/null_rows_1_groups_1_prim_data.parquet";
/**
* Parquet file with 100 rows of a single column that contains {@code null} as values.
*/
public static final String PARQUET_NULL_100_ROWS_PRIM_DATA_PATH = "../samples/parquet/null_rows_100_groups_1_prim_data.parquet";
/**
* List of map entries of Parquet data type as column header to C3R data type.
*
* @see #PARQUET_TEST_DATA_TYPES
*/
public static final List<Map.Entry<ColumnHeader, ClientDataType>> PARQUET_TEST_ROW_TYPE_ENTRIES =
List.of(entry(new ColumnHeader("boolean"), ClientDataType.BOOLEAN),
entry(new ColumnHeader("string"), ClientDataType.STRING),
entry(new ColumnHeader("int8"), ClientDataType.UNKNOWN),
entry(new ColumnHeader("int16"), ClientDataType.SMALLINT),
entry(new ColumnHeader("int32"), ClientDataType.INT),
entry(new ColumnHeader("int64"), ClientDataType.BIGINT),
entry(new ColumnHeader("float"), ClientDataType.FLOAT),
entry(new ColumnHeader("double"), ClientDataType.DOUBLE),
entry(new ColumnHeader("timestamp"), ClientDataType.TIMESTAMP));
/**
* Map of Parquet data types as column headers to C3R data type.
*/
public static final Map<ColumnHeader, ClientDataType> PARQUET_TEST_DATA_TYPES =
PARQUET_TEST_ROW_TYPE_ENTRIES.stream().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
/**
* Parquet data types as column headers.
*
* @see #PARQUET_TEST_DATA_TYPES
*/
public static final List<ColumnHeader> PARQUET_TEST_DATA_HEADERS =
PARQUET_TEST_ROW_TYPE_ENTRIES.stream().map(Map.Entry::getKey).collect(Collectors.toList());
/**
* Hidden utility class constructor.
*/
private ParquetTestUtility() {
}
/**
* Read all rows from a Parquet file.
*
* @param filePath Location of file to read
* @return List of individual rows of Parquet data
* @throws IOException If the file could not be opened for reading
*/
public static List<Row<ParquetValue>> readAllRows(final String filePath) {
return readAllRows(ParquetRowReader.builder().sourceName(filePath).build());
}
/**
* Read all rows from a Parquet file using a {@link ParquetRowReader}.
*
* @param reader Reader configured for a specific Parquet file and the data types it contains
* @return List of individual rows of Parquet data
*/
public static List<Row<ParquetValue>> readAllRows(final ParquetRowReader reader) {
final var rows = new ArrayList<Row<ParquetValue>>();
while (reader.hasNext()) {
final var row = reader.next();
rows.add(row);
}
return rows;
}
}
| 2,475 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/utils/ParquetTypeDefsTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.utils;
import org.apache.parquet.schema.LogicalTypeAnnotation;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Types;
/**
* Contains definitions for each primitive data type as both logical and required. An unsupported logical type
* and an unsupported repeated primitive type are included for testing purposes.
*/
public final class ParquetTypeDefsTestUtility {
/**
* Length of the fixed width arrays.
*/
public static final int FIXED_WIDTH_LENGTH = 5;
/**
* Optional fixed length byte array.
*/
public static final org.apache.parquet.schema.Type OPTIONAL_FIXED_LEN_BYTE_ARRAY =
Types.optional(PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY)
.length(FIXED_WIDTH_LENGTH)
.named("OPT_FIXED_LEN_BYTE_ARRAY");
/**
* Required fixed length byte array.
*/
public static final org.apache.parquet.schema.Type REQUIRED_FIXED_LEN_BYTE_ARRAY =
Types.required(PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY)
.length(FIXED_WIDTH_LENGTH)
.named("REQ_FIXED_LEN_BYTE_ARRAY");
/**
* Zero or one instances of a binary subtype string value is required per row group.
*/
public static final org.apache.parquet.schema.Type OPTIONAL_STRING_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("OPT_STRING");
/**
* Exactly one instance of a binary subtype string value is required per row group.
*/
public static final org.apache.parquet.schema.Type REQUIRED_STRING_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("REQ_STRING");
/**
* Unsupported primitive type (repeated: 0, 1 or more instances required per row group) for testing purposes.
*/
public static final org.apache.parquet.schema.Type REPEATED_STRING_TYPE =
Types.repeated(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("REPEAT_STRING");
/**
* Exactly one instance of a binary value is required per row group.
*/
public static final org.apache.parquet.schema.Type REQUIRED_BINARY_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.BINARY)
.named("REQ_BINARY");
/**
* Zero or one instances of a boolean value is required per row group.
*/
public static final org.apache.parquet.schema.Type OPTIONAL_BOOLEAN_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.BOOLEAN)
.named("OPT_BOOLEAN");
/**
* Exactly one instance of a boolean value is required per row group.
*/
public static final org.apache.parquet.schema.Type REQUIRED_BOOLEAN_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.BOOLEAN)
.named("REQ_BOOLEAN");
/**
* Zero or one instances of a double value is required per row group.
*/
public static final org.apache.parquet.schema.Type OPTIONAL_DOUBLE_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
.named("OPT_DOUBLE");
/**
* Exactly one instance of a double value is required per row group.
*/
public static final org.apache.parquet.schema.Type REQUIRED_DOUBLE_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.DOUBLE)
.named("REQ_DOUBLE");
/**
* Zero or one instances of a float value is required per row group.
*/
public static final org.apache.parquet.schema.Type OPTIONAL_FLOAT_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.FLOAT)
.named("OPT_FLOAT");
/**
* Exactly one instance of a float value is required per row group.
*/
public static final org.apache.parquet.schema.Type REQUIRED_FLOAT_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.FLOAT)
.named("REQ_FLOAT");
/**
* Exactly one instance of an 8-bit integer is required per row group.
*/
public static final org.apache.parquet.schema.Type OPTIONAL_INT8_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.intType(8, true))
.named("OPT_INT8");
/**
* Zero or one instances of a signed int/int32 value is required per row group.
*/
public static final org.apache.parquet.schema.Type OPTIONAL_INT32_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.named("OPT_INT32");
/**
* Zero or one instances of a signed int/int32 value is required per row group.
*/
public static final org.apache.parquet.schema.Type OPTIONAL_INT32_ANNOTATED_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.intType(32, true))
.named("OPT_ANNOTATED_INT32");
/**
* Exactly one instance of an int/int32 value is required per row group.
*/
public static final org.apache.parquet.schema.Type REQUIRED_INT32_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.INT32)
.named("REQ_INT32");
/**
* Exactly one instance of an int/int32 value is required per row group.
*/
public static final org.apache.parquet.schema.Type REQUIRED_INT32_ANNOTATED_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.intType(32, true))
.named("REQ_ANNOTATED_INT32");
/**
* Zero or one instances of a long/int64 value is required per row group.
*/
public static final org.apache.parquet.schema.Type OPTIONAL_INT64_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.INT64)
.named("OPT_INT64");
/**
* Zero or one instances of a long/int64 value is required per row group.
*/
public static final org.apache.parquet.schema.Type OPTIONAL_INT64_ANNOTATED_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.INT64)
.as(LogicalTypeAnnotation.intType(64, true))
.named("OPT_ANNOTATED_INT64");
/**
* Exactly one instance of a long/int64 value is required per row group.
*/
public static final org.apache.parquet.schema.Type REQUIRED_INT64_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.INT64)
.named("REQ_INT64");
/**
* Exactly one instance of a long/int64 value is required per row group.
*/
public static final org.apache.parquet.schema.Type REQUIRED_INT64_ANNOTATED_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.INT64)
.as(LogicalTypeAnnotation.intType(64, true))
.named("REQ_ANNOTATED_INT64");
public static final org.apache.parquet.schema.Type OPTIONAL_DATE_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.dateType())
.named("OPT_DATE");
public static final org.apache.parquet.schema.Type REQUIRED_DATE_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.dateType())
.named("REQ_DATE");
public static final org.apache.parquet.schema.Type OPTIONAL_INT16_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.intType(16, true))
.named("OPT_INT16");
public static final org.apache.parquet.schema.Type REQUIRED_INT16_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.intType(16, true))
.named("REQ_INT16");
public static final org.apache.parquet.schema.Type OPTIONAL_TIMESTAMP_UTC_NANO_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.INT64)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.NANOS))
.named("OPT_UTC_NANO_TIMESTAMP");
public static final org.apache.parquet.schema.Type REQUIRED_TIMESTAMP_UTC_NANO_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.INT64)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.NANOS))
.named("REQ_UTC_NANOTIMESTAMP");
public static final org.apache.parquet.schema.Type REQUIRED_TIMESTAMP_NANO_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.INT64)
.as(LogicalTypeAnnotation.timestampType(false, LogicalTypeAnnotation.TimeUnit.NANOS))
.named("REQ_NANO_TIMESTAMP");
/**
* Unsupported logical type for testing purposes.
*/
public static final org.apache.parquet.schema.Type COMPLEX_TYPE =
Types.requiredMap()
.key(PrimitiveType.PrimitiveTypeName.FLOAT)
.optionalValue(PrimitiveType.PrimitiveTypeName.INT32)
.named("ZipMap");
/**
* Unsupported unsigned int8 value for testing purposes.
*/
public static final org.apache.parquet.schema.Type UNSIGNED_INT8_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.intType(8, false))
.named("UINT8");
/**
* Unsupported unsigned int16 value for testing purposes.
*/
public static final org.apache.parquet.schema.Type UNSIGNED_INT16_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.intType(16, false))
.named("UINT16");
/**
* Unsupported unsigned int32 value for testing purposes.
*/
public static final org.apache.parquet.schema.Type UNSIGNED_INT32_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.intType(32, false))
.named("UINT32");
/**
* Unsupported unsigned int64 value for testing purposes.
*/
public static final org.apache.parquet.schema.Type UNSIGNED_INT64_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.INT64)
.as(LogicalTypeAnnotation.intType(64, false))
.named("UINT64");
/**
* Fixed length byte array decimal.
*/
public static final org.apache.parquet.schema.Type REQUIRED_BYTE_ARRAY_DECIMAL_TYPE =
Types.required(PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY)
.length(32)
.as(LogicalTypeAnnotation.decimalType(16, 16))
.named("REQ_DECIMAL_10_2");
/**
* Fixed length byte array decimal.
*/
public static final org.apache.parquet.schema.Type OPTIONAL_BYTE_ARRAY_DECIMAL_TYPE =
Types.optional(PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY)
.length(32)
.as(LogicalTypeAnnotation.decimalType(16, 16))
.named("REQ_DECIMAL_10_2");
/**
* Hidden utility constructor.
*/
private ParquetTypeDefsTestUtility() {
}
}
| 2,476 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/utils/FileTestUtility.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.utils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
/**
* A test utility for creating temporary Path resources for tests that will clean themselves up after execution.
*/
public abstract class FileTestUtility {
/**
* Creates a temporary directory with the prefix "temp" marked with deleteOnExit.
*
* @return A temporary Path
* @throws IOException If the temporary Path cannot be created
*/
public static Path createTempDir() throws IOException {
final Path tempDir = Files.createTempDirectory("temp");
tempDir.toFile().deleteOnExit();
return tempDir;
}
/**
* Creates a temporary file with the prefix "testFile" and suffix ".tmp" marked with deleteOnExit.
*
* @return A temporary Path
* @throws IOException If the temporary Path cannot be created
*/
public static Path createTempFile() throws IOException {
return createTempFile("testFile", ".tmp");
}
/**
* Creates a temporary file with the prefix and suffix provided marked with deleteOnExit.
*
* @param prefix The prefix of the Path to create
* @param suffix The suffix of the Path to create
* @return A temporary Path
* @throws IOException If the temporary Path cannot be created
*/
public static Path createTempFile(final String prefix, final String suffix) throws IOException {
final Path tempDir = createTempDir();
final Path tempFile = Files.createTempFile(tempDir, prefix, suffix);
tempFile.toFile().deleteOnExit();
return tempFile;
}
/**
* Resolves a temporary file with the file name provided marked with deleteOnExit.
*
* @param fileName The name of the Path to resolve
* @return A temporary Path
* @throws IOException If the temporary Path cannot be resolved
*/
public static Path resolve(final String fileName) throws IOException {
return resolve(fileName, createTempDir());
}
/**
* Resolves a temporary file with the prefix and suffix provided marked with deleteOnExit.
*
* @param fileName The name of the Path to resolve
* @param tempDir The Path to use to resolve the temporary file
* @return A temporary Path
*/
private static Path resolve(final String fileName, final Path tempDir) {
final Path tempFile = tempDir.resolve(fileName);
tempFile.toFile().deleteOnExit();
return tempFile;
}
}
| 2,477 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/action/ParquetRowMarshallerTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.action;
import com.amazonaws.c3r.FingerprintTransformer;
import com.amazonaws.c3r.SealedTransformer;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.EncryptConfig;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.Pad;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.config.ParquetConfig;
import com.amazonaws.c3r.config.PositionalTableSchema;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.ParquetTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.amazonaws.c3r.utils.GeneralTestUtility.TEST_CONFIG_DATA_SAMPLE;
import static com.amazonaws.c3r.utils.GeneralTestUtility.cleartextColumn;
import static com.amazonaws.c3r.utils.GeneralTestUtility.fingerprintColumn;
import static com.amazonaws.c3r.utils.GeneralTestUtility.sealedColumn;
import static com.amazonaws.c3r.utils.ParquetTestUtility.PARQUET_1_ROW_PRIM_DATA_PATH;
import static com.amazonaws.c3r.utils.ParquetTestUtility.PARQUET_BINARY_VALUES_PATH;
import static com.amazonaws.c3r.utils.ParquetTestUtility.PARQUET_NULL_1_ROW_PRIM_DATA_PATH;
import static com.amazonaws.c3r.utils.ParquetTestUtility.PARQUET_TEST_DATA_HEADERS;
import static com.amazonaws.c3r.utils.ParquetTestUtility.PARQUET_TEST_DATA_TYPES;
import static com.amazonaws.c3r.utils.ParquetTestUtility.readAllRows;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ParquetRowMarshallerTest {
// a table schema which leaves each column as-is/unencrypted
private final TableSchema identitySchema = new MappedTableSchema(
PARQUET_TEST_DATA_HEADERS.stream()
.map(h -> cleartextColumn(h.toString()))
.collect(Collectors.toList()));
// a table scheme which omits the one string column and makes the rest unencrypted
private final TableSchema dropStringColumnSchema = new MappedTableSchema(
PARQUET_TEST_DATA_HEADERS.stream()
.filter(h -> !PARQUET_TEST_DATA_TYPES.get(h).equals(ClientDataType.STRING))
.map(h -> cleartextColumn(h.toString()))
.collect(Collectors.toList()));
// a table schema which encrypts the string column both for sealed and fingerprint
private final TableSchema encryptStringColumnSealedAndFingerprintSchema = new MappedTableSchema(
PARQUET_TEST_DATA_HEADERS.stream()
.flatMap(h -> PARQUET_TEST_DATA_TYPES.get(h).equals(ClientDataType.STRING)
?
Stream.of(
sealedColumn(h.toString(), h + ColumnHeader.DEFAULT_SEALED_SUFFIX, PadType.NONE, null),
fingerprintColumn(h.toString(), h + ColumnHeader.DEFAULT_FINGERPRINT_SUFFIX))
:
Stream.of(cleartextColumn(h.toString())))
.collect(Collectors.toList()));
private final ClientSettings lowSecurityEncryptNull =
ClientSettings.builder()
.preserveNulls(false)
.allowDuplicates(true)
.allowJoinsOnColumnsWithDifferentNames(true)
.allowCleartext(true)
.build();
private String tempDir;
private Path output;
@BeforeEach
public void setup() throws IOException {
tempDir = FileTestUtility.createTempDir().toString();
output = FileTestUtility.resolve("output.parquet");
}
@Test
public void validateRejectNonParquetFormatTest() throws IOException {
final String output = FileTestUtility.resolve("endToEndMarshalOut.unknown").toString();
final var configBuilder = EncryptConfig.builder()
.sourceFile(PARQUET_1_ROW_PRIM_DATA_PATH)
.targetFile(output)
.fileFormat(FileFormat.CSV)
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.tempDir(tempDir)
.settings(lowSecurityEncryptNull)
.tableSchema(identitySchema)
.overwrite(true);
assertThrows(C3rIllegalArgumentException.class, () ->
ParquetRowMarshaller.newInstance(configBuilder.fileFormat(FileFormat.CSV).build(), ParquetConfig.DEFAULT));
assertDoesNotThrow(() ->
ParquetRowMarshaller.newInstance(configBuilder.fileFormat(FileFormat.PARQUET).build(), ParquetConfig.DEFAULT));
}
private void marshal1RowTest(final TableSchema schema, final ClientSettings settings, final boolean isDataNull) throws IOException {
final String inputFile = isDataNull
? PARQUET_NULL_1_ROW_PRIM_DATA_PATH
: PARQUET_1_ROW_PRIM_DATA_PATH;
final var config = EncryptConfig.builder()
.sourceFile(inputFile)
.targetFile(output.toString())
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.tempDir(tempDir)
.settings(settings)
.tableSchema(schema)
.overwrite(true)
.build();
final var marshaller = ParquetRowMarshaller.newInstance(config, ParquetConfig.DEFAULT);
marshaller.marshal();
marshaller.close();
assertNotEquals(0, Files.size(output));
final Row<ParquetValue> inRow = readAllRows(inputFile).get(0);
final List<Row<ParquetValue>> marshalledRows = readAllRows(output.toString());
// The input file had one row - ensure the output does
assertEquals(1, marshalledRows.size());
final Row<ParquetValue> outRow = marshalledRows.get(0);
// The marshalled row should have the size the schema dictates
assertEquals(schema.getColumns().size(), outRow.size());
for (ColumnSchema column : schema.getColumns()) {
final var inValue = inRow.getValue(column.getSourceHeader());
final var outValue = outRow.getValue(column.getTargetHeader());
if (column.getType() == ColumnType.CLEARTEXT) {
// cleartext content should be unchanged
assertEquals(inValue, outValue);
} else if (isDataNull && settings.isPreserveNulls()) {
// null entries should remain null when preserveNULLs is true
assertTrue(inValue.isNull());
assertTrue(outValue.isNull());
} else {
// Sealed/Fingerprint data, and either it is NULL or preserveNULLs is false
assertNotEquals(inValue, outValue);
}
}
}
private void marshall1Row(final TableSchema schema) throws IOException {
marshal1RowTest(schema, ClientSettings.lowAssuranceMode(), /* All NULL */ false);
marshal1RowTest(schema, lowSecurityEncryptNull, /* All NULL */ true);
marshal1RowTest(schema, ClientSettings.lowAssuranceMode(), /* All NULL */ false);
marshal1RowTest(schema, lowSecurityEncryptNull, /* All NULL */ true);
}
@Test
public void marshal1RowIdentitySchema1RowTest() throws IOException {
marshall1Row(identitySchema);
}
@Test
public void marshal1RowDrop1ColumnSchema1RowTest() throws IOException {
marshall1Row(dropStringColumnSchema);
}
@Test
public void marshal1RowEncrypt1ColumnSchema1NullRowTest() throws IOException {
marshall1Row(encryptStringColumnSealedAndFingerprintSchema);
}
private RowMarshaller<ParquetValue> buildRowMarshallerWithSchema(final TableSchema schema) {
final var config = EncryptConfig.builder()
.sourceFile(PARQUET_1_ROW_PRIM_DATA_PATH)
.targetFile(output.toString())
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.tempDir(tempDir)
.settings(lowSecurityEncryptNull)
.tableSchema(schema)
.overwrite(true)
.build();
return ParquetRowMarshaller.newInstance(config, ParquetConfig.DEFAULT);
}
@Test
public void positionalSchemaErrorsTest() {
final TableSchema positionalSchema =
new PositionalTableSchema(identitySchema.getColumns().stream().map(c ->
List.of(ColumnSchema.builder()
.targetHeader(c.getTargetHeader())
.type(c.getType())
.pad(c.getPad())
.build()))
.collect(Collectors.toList()));
// check that the test case works with the mapped identity schema
assertDoesNotThrow(() ->
buildRowMarshallerWithSchema(identitySchema));
// check that switching to the positional identity schema errors
assertThrows(C3rIllegalArgumentException.class, () ->
buildRowMarshallerWithSchema(positionalSchema));
}
@Test
public void marshalBinaryValuesAsStringTest() {
final String input = PARQUET_BINARY_VALUES_PATH;
final ColumnHeader fingerprintHeader = new ColumnHeader("fingerprint");
final ColumnHeader sealedHeader = new ColumnHeader("sealed");
final ColumnHeader cleartextHeader = new ColumnHeader("cleartext");
// Output one column of each type
final MappedTableSchema schema = new MappedTableSchema(List.of(
ColumnSchema.builder().type(ColumnType.FINGERPRINT)
.sourceHeader(fingerprintHeader).targetHeader(fingerprintHeader).build(),
ColumnSchema.builder().type(ColumnType.SEALED)
.sourceHeader(sealedHeader).targetHeader(sealedHeader)
.pad(Pad.DEFAULT).build(),
ColumnSchema.builder().type(ColumnType.CLEARTEXT)
.sourceHeader(cleartextHeader).targetHeader(cleartextHeader)
.build()
));
// All configuration settings except for how to treat binary values
final var baseConfig = EncryptConfig.builder()
.sourceFile(input)
.targetFile(output.toString())
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.tempDir(tempDir)
.settings(lowSecurityEncryptNull)
.tableSchema(schema)
.overwrite(true)
.build();
// --parquetBinaryAsString is unset should cause execution to fail on file containing binary values
final var nullConfig = ParquetConfig.builder().binaryAsString(null).build();
assertThrows(C3rRuntimeException.class, () -> ParquetRowMarshaller.newInstance(baseConfig, nullConfig));
// --parquetBinaryAsString is false should cause execution to fail on file containing binary values
final var falseConfig = ParquetConfig.builder().binaryAsString(false).build();
assertThrows(C3rRuntimeException.class, () -> ParquetRowMarshaller.newInstance(baseConfig, falseConfig));
// --parquetBinaryAsString is true should cause execution to work on file containing binary values
final var trueConfig = ParquetConfig.builder().binaryAsString(true).build();
assertDoesNotThrow(() -> {
final var marshaller = ParquetRowMarshaller.newInstance(baseConfig, trueConfig);
marshaller.marshal();
marshaller.close();
});
// Check that fingerprint, sealed and cleartext values were actually written
final var rows = ParquetTestUtility.readAllRows(output.toString());
for (var row : rows) {
final var fingerprintValue = row.getValue(fingerprintHeader);
assertTrue(fingerprintValue.toString().startsWith(FingerprintTransformer.DESCRIPTOR_PREFIX_STRING));
final var sealedValue = row.getValue(sealedHeader);
assertTrue(sealedValue.toString().startsWith(SealedTransformer.DESCRIPTOR_PREFIX_STRING));
final var cleartextValue = row.getValue(cleartextHeader);
assertNotNull(cleartextValue.toString());
}
}
}
| 2,478 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/action/ParquetRowUnmarshallerTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.action;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.DecryptConfig;
import com.amazonaws.c3r.config.EncryptConfig;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.PadType;
import com.amazonaws.c3r.config.ParquetConfig;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.ClientDataType;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.io.ParquetRowReader;
import com.amazonaws.c3r.utils.FileTestUtility;
import com.amazonaws.c3r.utils.GeneralTestUtility;
import com.amazonaws.c3r.utils.ParquetTestUtility;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.stream.Collectors;
import static com.amazonaws.c3r.utils.GeneralTestUtility.TEST_CONFIG_DATA_SAMPLE;
import static com.amazonaws.c3r.utils.GeneralTestUtility.cleartextColumn;
import static com.amazonaws.c3r.utils.GeneralTestUtility.fingerprintColumn;
import static com.amazonaws.c3r.utils.ParquetTestUtility.PARQUET_1_ROW_PRIM_DATA_PATH;
import static com.amazonaws.c3r.utils.ParquetTestUtility.PARQUET_NON_UTF8_DATA_PATH;
import static com.amazonaws.c3r.utils.ParquetTestUtility.PARQUET_NULL_1_ROW_PRIM_DATA_PATH;
import static com.amazonaws.c3r.utils.ParquetTestUtility.PARQUET_TEST_DATA_HEADERS;
import static com.amazonaws.c3r.utils.ParquetTestUtility.PARQUET_TEST_DATA_TYPES;
import static com.amazonaws.c3r.utils.ParquetTestUtility.readAllRows;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ParquetRowUnmarshallerTest {
private final ClientSettings lowSecurityEncryptNull =
ClientSettings.builder()
.preserveNulls(false)
.allowDuplicates(true)
.allowJoinsOnColumnsWithDifferentNames(true)
.allowCleartext(true)
.build();
private final TableSchema cleartextSchema = new MappedTableSchema(
PARQUET_TEST_DATA_HEADERS.stream()
.map(h -> cleartextColumn(h.toString()))
.collect(Collectors.toList()));
// the string column is the only one that can be encrypted
private final TableSchema sealedSchema = new MappedTableSchema(
PARQUET_TEST_DATA_HEADERS.stream()
.map(h -> PARQUET_TEST_DATA_TYPES.get(h).equals(ClientDataType.STRING)
? GeneralTestUtility.sealedColumn(h.toString(), PadType.NONE, null)
: cleartextColumn(h.toString()))
.collect(Collectors.toList()));
// the string column is the only one that can be HMACd
private final TableSchema fingerprintSchema = new MappedTableSchema(
PARQUET_TEST_DATA_HEADERS.stream()
.map(h -> PARQUET_TEST_DATA_TYPES.get(h).equals(ClientDataType.STRING)
? fingerprintColumn(h.toString())
: cleartextColumn(h.toString()))
.collect(Collectors.toList()));
private String tempDir;
@BeforeEach
public void setup() throws IOException {
tempDir = FileTestUtility.createTempDir().toString();
}
@Test
public void validateRejectNonParquetFormatTest() throws IOException {
final String output = FileTestUtility.resolve("endToEndMarshalOut.unknown").toString();
final var configBuilder = DecryptConfig.builder()
.sourceFile(PARQUET_1_ROW_PRIM_DATA_PATH)
.targetFile(output)
.fileFormat(FileFormat.CSV)
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.overwrite(true);
assertThrows(C3rIllegalArgumentException.class, () ->
ParquetRowUnmarshaller.newInstance(configBuilder.fileFormat(FileFormat.CSV).build()));
assertDoesNotThrow(() ->
ParquetRowUnmarshaller.newInstance(configBuilder.fileFormat(FileFormat.PARQUET).build()));
}
@Test
public void unmarshallDoesNotModifyHeadersTest() throws IOException {
final Path tempOutput = FileTestUtility.resolve("output.parquet");
final var decConfig = DecryptConfig.builder()
.sourceFile(ParquetTestUtility.PARQUET_SAMPLE_DATA_PATH.toString())
.targetFile(tempOutput.toString())
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.overwrite(true)
.build();
final var unmarshaller = ParquetRowUnmarshaller.newInstance(decConfig);
unmarshaller.unmarshal();
unmarshaller.close();
final var parquetSchema = ParquetRowReader.builder()
.sourceName(tempOutput.toString())
.skipHeaderNormalization(true)
.build();
assertNotEquals(
ParquetTestUtility.PARQUET_SAMPLE_DATA_HEADERS,
parquetSchema.getHeaders());
assertEquals(
ParquetTestUtility.PARQUET_SAMPLE_DATA_HEADERS_NO_NORMALIZATION,
parquetSchema.getHeaders());
}
private void endToEndUnmarshal1RowTest(final String input,
final TableSchema schema,
final ClientSettings settings)
throws IOException {
final Path encOutput = FileTestUtility.resolve("endToEndMarshalOut.parquet");
final var encConfig = EncryptConfig.builder()
.sourceFile(input)
.targetFile(encOutput.toString())
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.tempDir(tempDir)
.settings(settings)
.tableSchema(schema)
.overwrite(true)
.build();
final var marshaller = ParquetRowMarshaller.newInstance(encConfig, ParquetConfig.DEFAULT);
marshaller.marshal();
marshaller.close();
assertNotEquals(0, Files.size(encOutput));
final Path decOutput = FileTestUtility.resolve("endToEndUnmarshalOut.parquet");
final var decConfig = DecryptConfig.builder()
.sourceFile(encOutput.toString())
.targetFile(decOutput.toString())
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.overwrite(true)
.build();
final var unmarshaller = ParquetRowUnmarshaller.newInstance(decConfig);
unmarshaller.unmarshal();
unmarshaller.close();
assertNotEquals(0, Files.size(decOutput));
final List<Row<ParquetValue>> inputRows = readAllRows(input);
final List<Row<ParquetValue>> marshalledRows = readAllRows(encOutput.toString());
final List<Row<ParquetValue>> unmarshalledRows = readAllRows(decOutput.toString());
assertEquals(inputRows.size(), marshalledRows.size());
assertEquals(inputRows.size(), unmarshalledRows.size());
for (int i = 0; i < inputRows.size(); i++) {
final Row<ParquetValue> inputRow = inputRows.get(i);
final Row<ParquetValue> unmarshalledRow = unmarshalledRows.get(i);
for (ColumnSchema c : schema.getColumns()) {
final var inValue = inputRow.getValue(c.getSourceHeader());
final var outValue = unmarshalledRow.getValue(c.getTargetHeader());
if (c.getType() != ColumnType.FINGERPRINT) {
// CLEARTEXT/SEALED column data: should round trip to the same value always.
assertEquals(inValue, outValue);
} else {
// fingerprint column data: only round trips the same if NULL and preserveNULLs is true
if (inValue.isNull() && settings.isPreserveNulls()) {
assertTrue(outValue.isNull());
} else {
assertNotEquals(inValue, outValue);
}
}
}
}
}
@Test
public void cleartextEndToEndUnmarshal1RowTest() throws IOException {
endToEndUnmarshal1RowTest(PARQUET_1_ROW_PRIM_DATA_PATH, cleartextSchema, ClientSettings.lowAssuranceMode());
endToEndUnmarshal1RowTest(PARQUET_1_ROW_PRIM_DATA_PATH, cleartextSchema, lowSecurityEncryptNull);
endToEndUnmarshal1RowTest(PARQUET_NULL_1_ROW_PRIM_DATA_PATH, cleartextSchema, ClientSettings.lowAssuranceMode());
endToEndUnmarshal1RowTest(PARQUET_NULL_1_ROW_PRIM_DATA_PATH, cleartextSchema, lowSecurityEncryptNull);
}
@Test
public void sealedEndToEndUnmarshal1RowTest() throws IOException {
endToEndUnmarshal1RowTest(PARQUET_1_ROW_PRIM_DATA_PATH, sealedSchema, ClientSettings.lowAssuranceMode());
endToEndUnmarshal1RowTest(PARQUET_1_ROW_PRIM_DATA_PATH, sealedSchema, lowSecurityEncryptNull);
endToEndUnmarshal1RowTest(PARQUET_NULL_1_ROW_PRIM_DATA_PATH, sealedSchema, ClientSettings.lowAssuranceMode());
endToEndUnmarshal1RowTest(PARQUET_NULL_1_ROW_PRIM_DATA_PATH, sealedSchema, lowSecurityEncryptNull);
}
@Test
public void sealedEndToEndUnmarshal1RowWithNonUtf8Test() throws IOException {
endToEndUnmarshal1RowTest(PARQUET_NON_UTF8_DATA_PATH, sealedSchema, ClientSettings.lowAssuranceMode());
endToEndUnmarshal1RowTest(PARQUET_NON_UTF8_DATA_PATH, sealedSchema, lowSecurityEncryptNull);
endToEndUnmarshal1RowTest(PARQUET_NON_UTF8_DATA_PATH, sealedSchema, ClientSettings.lowAssuranceMode());
endToEndUnmarshal1RowTest(PARQUET_NON_UTF8_DATA_PATH, sealedSchema, lowSecurityEncryptNull);
}
@Test
public void joinEndToEndUnmarshal1RowTest() throws IOException {
endToEndUnmarshal1RowTest(PARQUET_1_ROW_PRIM_DATA_PATH, fingerprintSchema, ClientSettings.lowAssuranceMode());
endToEndUnmarshal1RowTest(PARQUET_1_ROW_PRIM_DATA_PATH, fingerprintSchema, lowSecurityEncryptNull);
endToEndUnmarshal1RowTest(PARQUET_NULL_1_ROW_PRIM_DATA_PATH, fingerprintSchema, ClientSettings.lowAssuranceMode());
endToEndUnmarshal1RowTest(PARQUET_NULL_1_ROW_PRIM_DATA_PATH, fingerprintSchema, lowSecurityEncryptNull);
}
}
| 2,479 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/data/ParquetValueTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.List;
import java.util.stream.Stream;
import static com.amazonaws.c3r.data.ClientDataType.BIGINT_BYTE_SIZE;
import static com.amazonaws.c3r.data.ClientDataType.INT_BYTE_SIZE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.COMPLEX_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_BOOLEAN_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_DOUBLE_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_FIXED_LEN_BYTE_ARRAY;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_FLOAT_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT16_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT32_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT64_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_STRING_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_BOOLEAN_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_DOUBLE_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_FIXED_LEN_BYTE_ARRAY;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_FLOAT_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_INT16_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_INT32_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_INT64_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_STRING_TYPE;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class ParquetValueTest {
private final ParquetValue.Binary nullFixedLenByteArray = new ParquetValue.Binary(
ParquetDataType.fromType(OPTIONAL_FIXED_LEN_BYTE_ARRAY),
null);
private final ParquetValue.Binary fixedLenByteArray = new ParquetValue.Binary(
ParquetDataType.fromType(REQUIRED_FIXED_LEN_BYTE_ARRAY),
org.apache.parquet.io.api.Binary.fromReusedByteArray("FIX ".getBytes(StandardCharsets.UTF_8)));
private final ParquetValue.Binary nullBinaryValue = new ParquetValue.Binary(
ParquetDataType.fromType(OPTIONAL_STRING_TYPE),
null);
private final ParquetValue.Binary binaryValue = new ParquetValue.Binary(
ParquetDataType.fromType(REQUIRED_STRING_TYPE),
org.apache.parquet.io.api.Binary.fromReusedByteArray("foo".getBytes(StandardCharsets.UTF_8)));
private final ParquetValue.Boolean nullBooleanValue = new ParquetValue.Boolean(
ParquetDataType.fromType(OPTIONAL_BOOLEAN_TYPE),
null);
private final ParquetValue.Boolean booleanValue = new ParquetValue.Boolean(
ParquetDataType.fromType(REQUIRED_BOOLEAN_TYPE),
true);
private final ParquetValue.Double nullDoubleValue = new ParquetValue.Double(
ParquetDataType.fromType(OPTIONAL_DOUBLE_TYPE),
null);
private final ParquetValue.Double doubleValue = new ParquetValue.Double(
ParquetDataType.fromType(REQUIRED_DOUBLE_TYPE),
3.14159);
private final ParquetValue.Float nullFloatValue = new ParquetValue.Float(
ParquetDataType.fromType(OPTIONAL_FLOAT_TYPE),
null);
private final ParquetValue.Float floatValue = new ParquetValue.Float(
ParquetDataType.fromType(REQUIRED_FLOAT_TYPE),
3.14159f);
private final ParquetValue.Int32 nullInt16Value = new ParquetValue.Int32(
ParquetDataType.fromType(OPTIONAL_INT16_TYPE),
null);
private final ParquetValue.Int32 int16Value = new ParquetValue.Int32(
ParquetDataType.fromType(REQUIRED_INT16_TYPE),
3);
private final ParquetValue.Int32 nullInt32Value = new ParquetValue.Int32(
ParquetDataType.fromType(OPTIONAL_INT32_TYPE),
null);
private final ParquetValue.Int32 int32Value = new ParquetValue.Int32(
ParquetDataType.fromType(REQUIRED_INT32_TYPE),
3);
private final ParquetValue.Int64 nullInt64Value = new ParquetValue.Int64(
ParquetDataType.fromType(OPTIONAL_INT64_TYPE),
null);
private final ParquetValue.Int64 int64Value = new ParquetValue.Int64(
ParquetDataType.fromType(REQUIRED_INT64_TYPE),
3L);
private void checkEqualsAndHashCode(final ParquetValue value1,
final ParquetValue value2) {
assertEquals(value1, value1);
assertNotEquals(value1, value2);
assertEquals(value1.hashCode(), value1.hashCode());
assertNotEquals(value1.hashCode(), value2.hashCode());
}
public static Stream<Arguments> getParams() {
return Stream.of(
Arguments.of(new ParquetValue.Binary(ParquetDataType.fromType(REQUIRED_FIXED_LEN_BYTE_ARRAY),
org.apache.parquet.io.api.Binary.fromReusedByteArray("ABCDE".getBytes(StandardCharsets.UTF_8))),
new ParquetValue.Binary(ParquetDataType.fromType(REQUIRED_FIXED_LEN_BYTE_ARRAY),
org.apache.parquet.io.api.Binary.fromReusedByteArray("FGHIJ".getBytes(StandardCharsets.UTF_8))),
new ParquetValue.Binary(ParquetDataType.fromType(OPTIONAL_FIXED_LEN_BYTE_ARRAY),
org.apache.parquet.io.api.Binary.fromReusedByteArray("ABCDE".getBytes(StandardCharsets.UTF_8)))),
Arguments.of(new ParquetValue.Binary(ParquetDataType.fromType(REQUIRED_STRING_TYPE),
org.apache.parquet.io.api.Binary.fromReusedByteArray(new byte[]{0, 1, 2})),
new ParquetValue.Binary(ParquetDataType.fromType(REQUIRED_STRING_TYPE),
org.apache.parquet.io.api.Binary.fromReusedByteArray(new byte[]{3, 4, 5})),
new ParquetValue.Binary(ParquetDataType.fromType(OPTIONAL_STRING_TYPE),
org.apache.parquet.io.api.Binary.fromReusedByteArray(new byte[]{0, 1, 2}))),
Arguments.of(new ParquetValue.Boolean(ParquetDataType.fromType(REQUIRED_BOOLEAN_TYPE), true),
new ParquetValue.Boolean(ParquetDataType.fromType(REQUIRED_BOOLEAN_TYPE), false),
new ParquetValue.Boolean(ParquetDataType.fromType(OPTIONAL_BOOLEAN_TYPE), true)),
Arguments.of(new ParquetValue.Double(ParquetDataType.fromType(REQUIRED_DOUBLE_TYPE), 3.14159),
new ParquetValue.Double(ParquetDataType.fromType(REQUIRED_DOUBLE_TYPE), 42.0),
new ParquetValue.Double(ParquetDataType.fromType(OPTIONAL_DOUBLE_TYPE), 3.14159)),
Arguments.of(new ParquetValue.Float(ParquetDataType.fromType(REQUIRED_FLOAT_TYPE), 3.14159f),
new ParquetValue.Float(ParquetDataType.fromType(REQUIRED_FLOAT_TYPE), 42.0f),
new ParquetValue.Float(ParquetDataType.fromType(OPTIONAL_FLOAT_TYPE), 3.14159f)),
Arguments.of(new ParquetValue.Int32(ParquetDataType.fromType(REQUIRED_INT16_TYPE), 3),
new ParquetValue.Int32(ParquetDataType.fromType(REQUIRED_INT16_TYPE), 42),
new ParquetValue.Int32(ParquetDataType.fromType(OPTIONAL_INT16_TYPE), 3)),
Arguments.of(new ParquetValue.Int32(ParquetDataType.fromType(REQUIRED_INT32_TYPE), 3),
new ParquetValue.Int32(ParquetDataType.fromType(REQUIRED_INT32_TYPE), 42),
new ParquetValue.Int32(ParquetDataType.fromType(OPTIONAL_INT32_TYPE), 3)),
Arguments.of(new ParquetValue.Int64(ParquetDataType.fromType(REQUIRED_INT64_TYPE), 3L),
new ParquetValue.Int64(ParquetDataType.fromType(REQUIRED_INT64_TYPE), 42L),
new ParquetValue.Int64(ParquetDataType.fromType(OPTIONAL_INT64_TYPE), 3L))
);
}
@ParameterizedTest
@MethodSource(value = "getParams")
public void equalsAndHashCodeBinaryTest(final ParquetValue val1Req, final ParquetValue val2Req, final ParquetValue val1Opt) {
// same type, different values
assertFalse(Arrays.equals(val1Req.getBytes(), val2Req.getBytes()));
checkEqualsAndHashCode(val1Req, val2Req);
// different type, same values
assertArrayEquals(val1Req.getBytes(), val1Opt.getBytes());
checkEqualsAndHashCode(val1Req, val1Opt);
}
@Test
public void getParquetDataTypeTest() {
assertEquals(
ParquetDataType.fromType(REQUIRED_FIXED_LEN_BYTE_ARRAY),
fixedLenByteArray.getParquetDataType());
assertEquals(
ParquetDataType.fromType(REQUIRED_STRING_TYPE),
binaryValue.getParquetDataType());
assertEquals(
ParquetDataType.fromType(REQUIRED_BOOLEAN_TYPE),
booleanValue.getParquetDataType());
assertEquals(
ParquetDataType.fromType(REQUIRED_DOUBLE_TYPE),
doubleValue.getParquetDataType());
assertEquals(
ParquetDataType.fromType(REQUIRED_FLOAT_TYPE),
floatValue.getParquetDataType());
assertEquals(
ParquetDataType.fromType(REQUIRED_INT16_TYPE),
int16Value.getParquetDataType());
assertEquals(
ParquetDataType.fromType(REQUIRED_INT32_TYPE),
int32Value.getParquetDataType());
assertEquals(
ParquetDataType.fromType(REQUIRED_INT64_TYPE),
int64Value.getParquetDataType());
}
@Test
public void byteLengthTest() {
assertEquals(0, nullFixedLenByteArray.byteLength());
assertEquals(0, nullBinaryValue.byteLength());
assertEquals(3, binaryValue.byteLength());
assertEquals(0, nullBooleanValue.byteLength());
assertEquals(1, booleanValue.byteLength());
assertEquals(0, nullDoubleValue.byteLength());
assertEquals(Double.BYTES, doubleValue.byteLength());
assertEquals(0, nullFloatValue.byteLength());
assertEquals(Float.BYTES, floatValue.byteLength());
assertEquals(0, nullInt16Value.byteLength());
assertEquals(INT_BYTE_SIZE, int16Value.byteLength());
assertEquals(0, nullInt32Value.byteLength());
assertEquals(INT_BYTE_SIZE, int32Value.byteLength());
assertEquals(0, nullInt64Value.byteLength());
assertEquals(BIGINT_BYTE_SIZE, int64Value.byteLength());
}
@Test
public void isNullTest() {
assertTrue(nullFixedLenByteArray.isNull());
assertTrue(nullBinaryValue.isNull());
assertFalse(binaryValue.isNull());
assertTrue(nullBooleanValue.isNull());
assertFalse(booleanValue.isNull());
assertTrue(nullDoubleValue.isNull());
assertFalse(doubleValue.isNull());
assertTrue(nullFloatValue.isNull());
assertFalse(floatValue.isNull());
assertTrue(nullInt16Value.isNull());
assertFalse(int16Value.isNull());
assertTrue(nullInt32Value.isNull());
assertFalse(int32Value.isNull());
assertTrue(nullInt64Value.isNull());
assertFalse(int64Value.isNull());
}
@Test
public void toStringTest() {
assertNull(nullFixedLenByteArray.toString());
assertEquals("FIX ", fixedLenByteArray.toString());
assertNull(nullBinaryValue.toString());
assertEquals("foo", binaryValue.toString());
assertNull(nullBooleanValue.toString());
assertEquals("true", booleanValue.toString());
assertNull(nullDoubleValue.toString());
assertEquals("3.14159", doubleValue.toString());
assertNull(nullFloatValue.toString());
assertEquals("3.14159", floatValue.toString());
assertNull(nullInt16Value.toString());
assertEquals("3", int16Value.toString());
assertNull(nullInt32Value.toString());
assertEquals("3", int32Value.toString());
assertNull(nullInt64Value.toString());
assertEquals("3", int64Value.toString());
}
@Test
public void fromBytesRoundTripTest() {
final List<ParquetValue> values = List.of(
fixedLenByteArray,
binaryValue,
booleanValue,
doubleValue,
floatValue,
int16Value,
int32Value,
int64Value
);
for (var value : values) {
assertEquals(value, ParquetValue.fromBytes(value.getParquetDataType(), value.getBytes()));
}
}
@Test
public void fromBytesUnsupportedTypeTest() {
assertThrows(C3rRuntimeException.class, () -> ParquetDataType.fromType(COMPLEX_TYPE));
}
@Test
public void isExpectedTypeNonPrimitiveTest() {
final ParquetDataType complexParquetDataType = mock(ParquetDataType.class);
when(complexParquetDataType.getParquetType()).thenReturn(COMPLEX_TYPE);
assertFalse(ParquetValue.isExpectedType(
null,
complexParquetDataType));
}
@Test
public void isExpectedTypeTest() {
final ParquetDataType int32DataType = ParquetDataType.fromType(REQUIRED_INT32_TYPE);
final ParquetDataType int64DataType = ParquetDataType.fromType(REQUIRED_INT64_TYPE);
assertTrue(ParquetValue.isExpectedType(
int64DataType.getParquetType().asPrimitiveType().getPrimitiveTypeName(),
int64DataType));
assertFalse(ParquetValue.isExpectedType(
int32DataType.getParquetType().asPrimitiveType().getPrimitiveTypeName(),
int64DataType));
}
}
| 2,480 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/data/ParquetEquivalenceTypesTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import com.amazonaws.c3r.action.ParquetRowMarshaller;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnInsight;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.EncryptConfig;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.config.ParquetConfig;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.ParquetRowWriter;
import com.amazonaws.c3r.utils.FileTestUtility;
import org.apache.parquet.schema.MessageType;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import java.io.IOException;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.List;
import java.util.function.Function;
import java.util.stream.Stream;
import static com.amazonaws.c3r.data.ClientDataType.BIGINT_BYTE_SIZE;
import static com.amazonaws.c3r.data.ClientDataType.INT_BYTE_SIZE;
import static com.amazonaws.c3r.utils.GeneralTestUtility.TEST_CONFIG_DATA_SAMPLE;
import static com.amazonaws.c3r.utils.ParquetTestUtility.readAllRows;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_BOOLEAN_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_DATE_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_DOUBLE_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_FLOAT_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT16_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT32_ANNOTATED_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT32_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT64_ANNOTATED_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT64_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_STRING_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_TIMESTAMP_UTC_NANO_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_BOOLEAN_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_DATE_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_DOUBLE_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_FLOAT_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_INT16_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_INT32_ANNOTATED_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_INT32_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_INT64_ANNOTATED_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_INT64_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_STRING_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_TIMESTAMP_NANO_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_TIMESTAMP_UTC_NANO_TYPE;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class ParquetEquivalenceTypesTest {
private static final ColumnInsight EMPTY_COLUMN_INSIGHT = new ColumnInsight(ColumnSchema.builder().type(ColumnType.FINGERPRINT)
.sourceHeader(ColumnHeader.ofRaw("Empty")).build());
private static final ParquetValue.Binary NULL_STRING_VALUE = new ParquetValue.Binary(ParquetDataType.fromType(OPTIONAL_STRING_TYPE),
null);
private static final ParquetValue.Binary STRING_VALUE = new ParquetValue.Binary(ParquetDataType.fromType(REQUIRED_STRING_TYPE),
org.apache.parquet.io.api.Binary.fromReusedByteArray("hello".getBytes(StandardCharsets.UTF_8)));
private static final ParquetValue.Boolean NULL_BOOLEAN_VALUE = new ParquetValue.Boolean(ParquetDataType.fromType(OPTIONAL_BOOLEAN_TYPE),
null);
private static final ParquetValue.Boolean BOOLEAN_VALUE = new ParquetValue.Boolean(ParquetDataType.fromType(REQUIRED_BOOLEAN_TYPE),
true);
private static final ParquetValue.Double NULL_DOUBLE_VALUE = new ParquetValue.Double(ParquetDataType.fromType(OPTIONAL_DOUBLE_TYPE),
null);
private static final ParquetValue.Double DOUBLE_VALUE = new ParquetValue.Double(ParquetDataType.fromType(REQUIRED_DOUBLE_TYPE), 1.1);
private static final ParquetValue.Float NULL_FLOAT_VALUE = new ParquetValue.Float(ParquetDataType.fromType(OPTIONAL_FLOAT_TYPE), null);
private static final ParquetValue.Float FLOAT_VALUE = new ParquetValue.Float(ParquetDataType.fromType(REQUIRED_FLOAT_TYPE), 1.1F);
private static final ParquetValue.Int32 NULL_INT_32_VALUE = new ParquetValue.Int32(ParquetDataType.fromType(OPTIONAL_INT32_TYPE), null);
private static final ParquetValue.Int32 INT_32_VALUE = new ParquetValue.Int32(ParquetDataType.fromType(REQUIRED_INT32_TYPE), 1);
private static final ParquetValue.Int32 NULL_ANNOTATED_INT_32_VALUE = new ParquetValue.Int32(
ParquetDataType.fromType(OPTIONAL_INT32_ANNOTATED_TYPE), null);
private static final ParquetValue.Int32 ANNOTATED_INT_32_VALUE = new ParquetValue.Int32(
ParquetDataType.fromType(REQUIRED_INT32_ANNOTATED_TYPE), 1);
private static final ParquetValue.Int64 NULL_INT_64_VALUE = new ParquetValue.Int64(ParquetDataType.fromType(OPTIONAL_INT64_TYPE), null);
private static final ParquetValue.Int64 INT_64_VALUE = new ParquetValue.Int64(ParquetDataType.fromType(REQUIRED_INT64_TYPE), 1L);
private static final ParquetValue.Int64 NULL_ANNOTATED_INT_64_VALUE = new ParquetValue.Int64(
ParquetDataType.fromType(OPTIONAL_INT64_ANNOTATED_TYPE), null);
private static final ParquetValue.Int64 ANNOTATED_INT_64_VALUE = new ParquetValue.Int64(
ParquetDataType.fromType(REQUIRED_INT64_ANNOTATED_TYPE), 1L);
private static final ParquetValue.Int32 NULL_DATE_VALUE = new ParquetValue.Int32(ParquetDataType.fromType(OPTIONAL_DATE_TYPE), null);
private static final ParquetValue.Int32 DATE_VALUE = new ParquetValue.Int32(ParquetDataType.fromType(REQUIRED_DATE_TYPE), 1);
private static final ParquetValue.Int32 NULL_INT_16_VALUE = new ParquetValue.Int32(ParquetDataType.fromType(OPTIONAL_INT16_TYPE), null);
private static final ParquetValue.Int32 INT_16_VALUE = new ParquetValue.Int32(ParquetDataType.fromType(REQUIRED_INT16_TYPE), 1);
private static final ParquetValue.Int64 NULL_UTC_TIMESTAMP_VALUE =
new ParquetValue.Int64(ParquetDataType.fromType(OPTIONAL_TIMESTAMP_UTC_NANO_TYPE), null);
private static final ParquetValue.Int64 UTC_TIMESTAMP_VALUE =
new ParquetValue.Int64(ParquetDataType.fromType(REQUIRED_TIMESTAMP_UTC_NANO_TYPE), 1L);
private static final ParquetValue.Int64 NULL_TIMESTAMP_VALUE =
new ParquetValue.Int64(ParquetDataType.fromType(OPTIONAL_TIMESTAMP_UTC_NANO_TYPE), null);
private static final ParquetValue.Int64 TIMESTAMP_VALUE =
new ParquetValue.Int64(ParquetDataType.fromType(REQUIRED_TIMESTAMP_NANO_TYPE), 1L);
private static final Function<byte[], String> LONG_BYTES_TO_STRING = (val) -> String.valueOf(ByteBuffer.wrap(val).getLong());
private static final Function<byte[], String> STRING_BYTES_TO_STRING = (val) -> new String(val, StandardCharsets.UTF_8);
private static final Function<byte[], String> BOOLEAN_BYTES_TO_STRING = (val) -> ValueConverter.Boolean.fromBytes(val).toString();
private static final Function<byte[], String> DATE_BYTES_TO_STRING = (val) -> String.valueOf(ByteBuffer.wrap(val).getInt());
private static final Function<byte[], String> TIMESTAMP_BYTES_TO_STRING = (val) -> new BigInteger(val).toString();
private static Stream<Arguments> nullSupportedTypes() {
return Stream.of(
Arguments.of(NULL_BOOLEAN_VALUE),
Arguments.of(NULL_STRING_VALUE),
Arguments.of(NULL_INT_32_VALUE),
Arguments.of(NULL_ANNOTATED_INT_32_VALUE),
Arguments.of(NULL_INT_64_VALUE),
Arguments.of(NULL_ANNOTATED_INT_64_VALUE),
Arguments.of(NULL_INT_16_VALUE),
Arguments.of(NULL_DATE_VALUE)
);
}
private static Stream<Arguments> supportedTypes() {
return Stream.of(
Arguments.of(BOOLEAN_VALUE, 1, BOOLEAN_BYTES_TO_STRING),
Arguments.of(STRING_VALUE, STRING_VALUE.byteLength(), STRING_BYTES_TO_STRING),
Arguments.of(INT_16_VALUE, BIGINT_BYTE_SIZE, LONG_BYTES_TO_STRING),
Arguments.of(INT_32_VALUE, BIGINT_BYTE_SIZE, LONG_BYTES_TO_STRING),
Arguments.of(ANNOTATED_INT_32_VALUE, BIGINT_BYTE_SIZE, LONG_BYTES_TO_STRING),
Arguments.of(INT_64_VALUE, BIGINT_BYTE_SIZE, LONG_BYTES_TO_STRING),
Arguments.of(ANNOTATED_INT_64_VALUE, BIGINT_BYTE_SIZE, LONG_BYTES_TO_STRING),
Arguments.of(DATE_VALUE, INT_BYTE_SIZE, DATE_BYTES_TO_STRING)
);
}
private static Stream<Arguments> nullUnsupportedTypes() {
return Stream.of(
Arguments.of(NULL_DOUBLE_VALUE),
Arguments.of(NULL_FLOAT_VALUE),
Arguments.of(NULL_TIMESTAMP_VALUE),
Arguments.of(NULL_UTC_TIMESTAMP_VALUE)
);
}
private static Stream<Arguments> unsupportedTypes() {
return Stream.of(
Arguments.of(DOUBLE_VALUE),
Arguments.of(FLOAT_VALUE),
Arguments.of(TIMESTAMP_VALUE, 1, TIMESTAMP_BYTES_TO_STRING),
Arguments.of(UTC_TIMESTAMP_VALUE, 1, TIMESTAMP_BYTES_TO_STRING)
);
}
@ParameterizedTest
@MethodSource("supportedTypes")
public void validTypesTest(final ParquetValue value, final int length, final Function toString) {
final byte[] bytes = ValueConverter.getBytesForColumn(value, EMPTY_COLUMN_INSIGHT.getType());
assertEquals(length, bytes.length);
assertEquals(value.toString(), toString.apply(bytes));
}
@ParameterizedTest
@MethodSource("nullSupportedTypes")
public void nullValueTest(final ParquetValue value) {
assertNull(ValueConverter.getBytesForColumn(value, EMPTY_COLUMN_INSIGHT.getType()));
}
@ParameterizedTest
@MethodSource("unsupportedTypes")
public void invalidTypesTest(final ParquetValue value) {
assertThrows(C3rRuntimeException.class, () -> ValueConverter.getBytesForColumn(value, EMPTY_COLUMN_INSIGHT.getType()));
}
@ParameterizedTest
@MethodSource("nullUnsupportedTypes")
public void nullInvalidTypesTest(final ParquetValue value) {
assertThrows(C3rRuntimeException.class, () -> ValueConverter.getBytesForColumn(value, EMPTY_COLUMN_INSIGHT.getType()));
}
@Test
public void marshallerWritesSameValuesTest() throws IOException {
final ColumnHeader int16Header = new ColumnHeader("opt_int16");
final ColumnHeader int32Header = new ColumnHeader("opt_int32");
final ColumnHeader annotatedInt32Header = new ColumnHeader("opt_annotated_int32");
final ColumnHeader int64Header = new ColumnHeader("opt_int64");
final ColumnHeader annotatedInt64Header = new ColumnHeader("opt_annotated_int64");
final TableSchema schema = new MappedTableSchema(List.of(
ColumnSchema.builder().type(ColumnType.FINGERPRINT).sourceHeader(int16Header).targetHeader(int16Header).build(),
ColumnSchema.builder().type(ColumnType.FINGERPRINT).sourceHeader(int32Header).targetHeader(int32Header).build(),
ColumnSchema.builder().type(ColumnType.FINGERPRINT).sourceHeader(annotatedInt32Header).targetHeader(annotatedInt32Header)
.build(),
ColumnSchema.builder().type(ColumnType.FINGERPRINT).sourceHeader(int64Header).targetHeader(int64Header).build(),
ColumnSchema.builder().type(ColumnType.FINGERPRINT).sourceHeader(annotatedInt64Header).targetHeader(annotatedInt64Header)
.build()
));
final ParquetValue.Int32 int16 = new ParquetValue.Int32(ParquetDataType.fromType(OPTIONAL_INT16_TYPE), 27);
final ParquetValue.Int32 int32 = new ParquetValue.Int32(ParquetDataType.fromType(OPTIONAL_INT32_TYPE), 27);
final ParquetValue.Int32 annotatedInt32 = new ParquetValue.Int32(ParquetDataType.fromType(OPTIONAL_INT32_ANNOTATED_TYPE), 27);
final ParquetValue.Int64 int64 = new ParquetValue.Int64(ParquetDataType.fromType(OPTIONAL_INT64_TYPE), 27L);
final ParquetValue.Int64 annotatedInt64 = new ParquetValue.Int64(ParquetDataType.fromType(OPTIONAL_INT64_ANNOTATED_TYPE), 27L);
final HashMap<ColumnHeader, ParquetDataType> rowMap = new HashMap<>();
rowMap.put(int16Header, int16.getParquetDataType());
rowMap.put(int32Header, int32.getParquetDataType());
rowMap.put(annotatedInt32Header, annotatedInt32.getParquetDataType());
rowMap.put(int64Header, int64.getParquetDataType());
rowMap.put(annotatedInt64Header, annotatedInt64.getParquetDataType());
final ParquetRowFactory rowFactory = new ParquetRowFactory(rowMap);
final Row<ParquetValue> row = rowFactory.newRow();
row.putValue(int16Header, int16);
row.putValue(int32Header, int32);
row.putValue(annotatedInt32Header, annotatedInt32);
row.putValue(int64Header, int64);
row.putValue(annotatedInt64Header, annotatedInt64);
final ParquetSchema parquetSchema = ParquetSchema.builder().messageType(
new MessageType("IntegralTypes", List.of(
OPTIONAL_INT16_TYPE,
OPTIONAL_INT32_TYPE,
OPTIONAL_INT32_ANNOTATED_TYPE,
OPTIONAL_INT64_TYPE,
OPTIONAL_INT64_ANNOTATED_TYPE
))
).build();
final String tempDir = FileTestUtility.createTempDir().toString();
final Path input = FileTestUtility.resolve("input.parquet");
final Path output = FileTestUtility.resolve("output.parquet");
final ParquetRowWriter writer = ParquetRowWriter.builder().targetName(input.toString()).parquetSchema(parquetSchema).build();
writer.writeRow(row);
writer.flush();
writer.close();
final ClientSettings settings =
ClientSettings.builder()
.preserveNulls(false)
.allowDuplicates(true)
.allowJoinsOnColumnsWithDifferentNames(true)
.allowCleartext(true)
.build();
final EncryptConfig config = EncryptConfig.builder()
.sourceFile(input.toString())
.targetFile(output.toString())
.secretKey(TEST_CONFIG_DATA_SAMPLE.getKey())
.salt(TEST_CONFIG_DATA_SAMPLE.getSalt())
.tempDir(tempDir)
.settings(settings)
.tableSchema(schema)
.overwrite(true)
.build();
final var marshaller = ParquetRowMarshaller.newInstance(config, ParquetConfig.DEFAULT);
marshaller.marshal();
marshaller.close();
final Row<ParquetValue> marshalledRow = readAllRows(output.toString()).get(0);
final String int16Result = marshalledRow.getValue(int16Header).toString();
final String int32Result = marshalledRow.getValue(int32Header).toString();
final String annotatedInt32Result = marshalledRow.getValue(annotatedInt32Header).toString();
final String int64Result = marshalledRow.getValue(int64Header).toString();
final String annotatedInt64Result = marshalledRow.getValue(annotatedInt64Header).toString();
assertEquals(int64Result, int16Result);
assertEquals(int64Result, int32Result);
assertEquals(int32Result, annotatedInt32Result);
assertEquals(int64Result, annotatedInt64Result);
}
}
| 2,481 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/data/ParquetSchemaTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.config.ColumnSchema;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.MappedTableSchema;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import org.apache.parquet.schema.LogicalTypeAnnotation;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Types;
import org.junit.jupiter.api.Test;
import java.util.List;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class ParquetSchemaTest {
@Test
public void emptyParquetSchemaTest() {
final var emptySchema = new ParquetSchema(new MessageType("Empty", List.of()));
assertEquals(0, emptySchema.size());
assertEquals(0, emptySchema.getReconstructedMessageType().getColumns().size());
assertEquals(0, emptySchema.getHeaders().size());
assertEquals(0, emptySchema.getColumnParquetDataTypeMap().size());
}
@Test
public void nonEmptyParquetSchemaTest() {
final var messageType = new MessageType("Empty", List.of(
Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.named("int32"),
Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("string")
));
final var nonEmptySchema = new ParquetSchema(messageType);
assertEquals(2, nonEmptySchema.size());
assertEquals(2, nonEmptySchema.getReconstructedMessageType().getColumns().size());
assertEquals(
List.of(new ColumnHeader("int32"), new ColumnHeader("string")),
nonEmptySchema.getHeaders());
assertEquals(
Map.of(
new ColumnHeader("int32"),
ParquetDataType.fromType(messageType.getType(0)),
new ColumnHeader("string"),
ParquetDataType.fromType(messageType.getType(1))),
nonEmptySchema.getColumnParquetDataTypeMap());
}
@Test
public void parquetSchemaToEmptyTest() {
final var messageType = new MessageType("Empty", List.of(
Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.named("int32"),
Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("string")
));
final var derivedNonEmptySchema = new ParquetSchema(messageType)
.deriveTargetSchema(new MappedTableSchema(List.of(
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("int32"))
.type(ColumnType.CLEARTEXT)
.build()
)));
assertEquals(1, derivedNonEmptySchema.size());
assertEquals(1, derivedNonEmptySchema.getReconstructedMessageType().getColumns().size());
assertEquals(1, derivedNonEmptySchema.getHeaders().size());
assertEquals(1, derivedNonEmptySchema.getColumnParquetDataTypeMap().size());
}
@Test
public void parquetSchemaToIncompatibleSchemaTest() {
final var parquetSchema = new ParquetSchema(new MessageType("Empty", List.of(
Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.named("int32"),
Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("string")
)));
assertThrows(C3rIllegalArgumentException.class, () ->
parquetSchema.deriveTargetSchema(new MappedTableSchema(List.of(
ColumnSchema.builder()
.sourceHeader(new ColumnHeader("oops_this_header_does_not_exist"))
.type(ColumnType.CLEARTEXT)
.build()
))));
}
@Test
public void getHeadersTest() {
final var messageType = new MessageType("NameAndAge", List.of(
Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("Name"),
Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.named("Age")
));
final var schema = ParquetSchema.builder().messageType(messageType).build();
assertEquals(
List.of(new ColumnHeader("Name"), new ColumnHeader("Age")),
schema.getHeaders());
}
@Test
public void getHeadersWithNormalizationTest() {
final var messageType = new MessageType("NameAndAge", List.of(
Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("Name"),
Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.named("Age")
));
final var schema = ParquetSchema.builder().messageType(messageType).skipHeaderNormalization(false).build();
assertEquals(
List.of(new ColumnHeader("name"), new ColumnHeader("age")),
schema.getHeaders());
}
@Test
public void getHeadersWithoutNormalizationTest() {
final var messageType = new MessageType("NameAndAge", List.of(
Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("Name"),
Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.named("Age")
));
final var schema = ParquetSchema.builder().messageType(messageType).skipHeaderNormalization(true).build();
assertEquals(
List.of(ColumnHeader.ofRaw("Name"), ColumnHeader.ofRaw("Age")),
schema.getHeaders());
}
}
| 2,482 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/data/BadParquetAnnotationsTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import org.apache.parquet.schema.LogicalTypeAnnotation;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Types;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class BadParquetAnnotationsTest {
@Test
public void binaryBadAnnotationsTest() {
assertThrows(IllegalStateException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.intType(32, true)).named("binary")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.dateType()).named("binary")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.NANOS)).named("binary")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MICROS)).named("binary")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS)).named("binary")), null));
}
@Test
public void booleanBadAnnotationTest() {
assertThrows(IllegalStateException.class, () -> new ParquetValue.Boolean(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.BOOLEAN)
.as(LogicalTypeAnnotation.stringType()).named("boolean")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Boolean(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.BOOLEAN)
.as(LogicalTypeAnnotation.intType(32, true)).named("boolean")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Boolean(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.BOOLEAN)
.as(LogicalTypeAnnotation.decimalType(2, 7)).named("boolean")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Boolean(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.BOOLEAN)
.as(LogicalTypeAnnotation.dateType()).named("boolean")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Boolean(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.BOOLEAN)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.NANOS)).named("boolean")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Boolean(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.BOOLEAN)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MICROS)).named("boolean")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Boolean(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.BOOLEAN)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS)).named("boolean")), null));
}
@Test
public void doubleBadAnnotationTest() {
assertThrows(IllegalStateException.class, () -> new ParquetValue.Double(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.DOUBLE).as(LogicalTypeAnnotation.stringType())
.named("double")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Double(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.DOUBLE).as(LogicalTypeAnnotation.intType(32, true))
.named("double")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Double(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
.as(LogicalTypeAnnotation.decimalType(2, 7)).named("double")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Double(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
.as(LogicalTypeAnnotation.dateType()).named("double")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Double(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.NANOS)).named("double")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Double(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MICROS)).named("double")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Double(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS)).named("double")), null));
}
@Test
public void floatBadAnnotationTest() {
assertThrows(IllegalStateException.class, () -> new ParquetValue.Float(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FLOAT).as(LogicalTypeAnnotation.stringType())
.named("float")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Float(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FLOAT)
.as(LogicalTypeAnnotation.intType(32, true)).named("float")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Float(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FLOAT)
.as(LogicalTypeAnnotation.decimalType(2, 7)).named("float")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Float(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FLOAT)
.as(LogicalTypeAnnotation.dateType()).named("float")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Float(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FLOAT)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.NANOS)).named("float")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Float(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FLOAT)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MICROS)).named("float")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Float(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FLOAT)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS)).named("float")), null));
}
@Test
public void int32BadAnnotationTest() {
assertThrows(IllegalStateException.class, () -> new ParquetValue.Int32(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.INT32).as(LogicalTypeAnnotation.stringType())
.named("int32")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Int32(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.NANOS)).named("int32")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Int32(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MICROS)).named("int32")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Int32(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS)).named("int32")), null));
}
@Test
public void int64BadAnnotationTest() {
assertThrows(IllegalStateException.class, () -> new ParquetValue.Int64(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.INT64).as(LogicalTypeAnnotation.stringType())
.named("int64")), null));
assertThrows(IllegalStateException.class, () -> new ParquetValue.Int64(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.INT64).as(LogicalTypeAnnotation.dateType())
.named("int64")), null));
}
@Test
public void fixedLenByteArrayBadAnnotationTest() {
assertThrows(IllegalArgumentException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY)
.as(LogicalTypeAnnotation.stringType()).named("fixed_len_byte_array")), null));
assertThrows(IllegalArgumentException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY)
.named("fixed_len_byte_array")), null));
assertThrows(IllegalArgumentException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY)
.as(LogicalTypeAnnotation.intType(32, true)).named("fixed_len_byte_array")), null));
assertThrows(IllegalArgumentException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY)
.as(LogicalTypeAnnotation.decimalType(2, 7)).named("fixed_len_byte_array")), null));
assertThrows(IllegalArgumentException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY)
.as(LogicalTypeAnnotation.dateType()).named("fixed_len_byte_array")), null));
assertThrows(IllegalArgumentException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.NANOS))
.named("fixed_len_byte_array")), null));
assertThrows(IllegalArgumentException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MICROS))
.named("fixed_len_byte_array")), null));
assertThrows(IllegalArgumentException.class, () -> new ParquetValue.Binary(
ParquetDataType.fromType(Types.optional(PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY)
.as(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS))
.named("int32")), null));
}
}
| 2,483 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/test/java/com/amazonaws/c3r/data/ParquetDataTypeTest.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import org.apache.parquet.io.api.Binary;
import org.apache.parquet.schema.LogicalTypeAnnotation;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Types;
import org.junit.jupiter.api.Test;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.COMPLEX_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_BOOLEAN_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_BYTE_ARRAY_DECIMAL_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_DATE_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_DOUBLE_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_FLOAT_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT32_ANNOTATED_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT32_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT64_ANNOTATED_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT64_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_INT8_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.OPTIONAL_STRING_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REPEATED_STRING_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_BINARY_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_BOOLEAN_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_BYTE_ARRAY_DECIMAL_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_DATE_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_DOUBLE_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_FIXED_LEN_BYTE_ARRAY;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_FLOAT_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_INT32_ANNOTATED_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_INT32_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_INT64_ANNOTATED_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_INT64_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.REQUIRED_STRING_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.UNSIGNED_INT16_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.UNSIGNED_INT32_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.UNSIGNED_INT64_TYPE;
import static com.amazonaws.c3r.utils.ParquetTypeDefsTestUtility.UNSIGNED_INT8_TYPE;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
public class ParquetDataTypeTest {
@Test
public void isSupportedTypeTest() {
for (var primitiveType : PrimitiveType.PrimitiveTypeName.values()) {
if (primitiveType == PrimitiveType.PrimitiveTypeName.INT96) {
assertFalse(ParquetDataType.isSupportedType(
Types.required(PrimitiveType.PrimitiveTypeName.INT96).named("INT96")));
} else if (primitiveType == PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY) {
// This is its own case because the length parameter is needed.
assertTrue(ParquetDataType.isSupportedType(REQUIRED_FIXED_LEN_BYTE_ARRAY), "isSupportedType " + primitiveType);
} else {
assertTrue(ParquetDataType.isSupportedType(Types.required(primitiveType).named(primitiveType.toString())),
"isSupportedType " + primitiveType);
}
}
assertFalse(ParquetDataType.isSupportedType(COMPLEX_TYPE));
assertFalse(ParquetDataType.isSupportedType(REPEATED_STRING_TYPE));
}
@Test
public void isStringTypeTest() {
assertFalse(ParquetDataType.isStringType(COMPLEX_TYPE));
assertFalse(ParquetDataType.isStringType(REQUIRED_INT64_TYPE));
assertFalse(ParquetDataType.isStringType(REQUIRED_BINARY_TYPE));
assertTrue(ParquetDataType.isStringType(REQUIRED_STRING_TYPE));
assertTrue(ParquetDataType.isStringType(OPTIONAL_STRING_TYPE));
assertTrue(ParquetDataType.isStringType(REPEATED_STRING_TYPE));
}
@Test
public void isBigIntTypeTest() {
assertFalse(ParquetDataType.isBigIntType(COMPLEX_TYPE));
assertFalse(ParquetDataType.isBigIntType(REQUIRED_INT32_TYPE));
assertTrue(ParquetDataType.isBigIntType(REQUIRED_INT64_TYPE));
assertTrue(ParquetDataType.isBigIntType(OPTIONAL_INT64_TYPE));
assertTrue(ParquetDataType.isBigIntType(OPTIONAL_INT64_ANNOTATED_TYPE));
}
@Test
public void isBooleanTypeTest() {
assertFalse(ParquetDataType.isBooleanType(COMPLEX_TYPE));
assertFalse(ParquetDataType.isBooleanType(REQUIRED_INT32_TYPE));
assertTrue(ParquetDataType.isBooleanType(REQUIRED_BOOLEAN_TYPE));
assertTrue(ParquetDataType.isBooleanType(OPTIONAL_BOOLEAN_TYPE));
}
@Test
public void isDateTypeTest() {
assertFalse(ParquetDataType.isDateType(COMPLEX_TYPE));
assertFalse(ParquetDataType.isDateType(REQUIRED_INT32_TYPE));
assertTrue(ParquetDataType.isDateType(REQUIRED_DATE_TYPE));
assertTrue(ParquetDataType.isDateType(OPTIONAL_DATE_TYPE));
}
@Test
public void isDecimalTypeTest() {
assertFalse(ParquetDataType.isDecimalType(COMPLEX_TYPE));
assertFalse(ParquetDataType.isDecimalType(REQUIRED_INT32_TYPE));
assertTrue(ParquetDataType.isDecimalType(REQUIRED_BYTE_ARRAY_DECIMAL_TYPE));
assertTrue(ParquetDataType.isDecimalType(OPTIONAL_BYTE_ARRAY_DECIMAL_TYPE));
}
@Test
public void isDoubleTypeTest() {
assertFalse(ParquetDataType.isDoubleType(COMPLEX_TYPE));
assertFalse(ParquetDataType.isDoubleType(REQUIRED_INT32_TYPE));
assertTrue(ParquetDataType.isDoubleType(REQUIRED_DOUBLE_TYPE));
assertTrue(ParquetDataType.isDoubleType(OPTIONAL_DOUBLE_TYPE));
}
@Test
public void isFloatTypeTest() {
assertFalse(ParquetDataType.isFloatType(COMPLEX_TYPE));
assertFalse(ParquetDataType.isFloatType(REQUIRED_DOUBLE_TYPE));
assertTrue(ParquetDataType.isFloatType(REQUIRED_FLOAT_TYPE));
assertTrue(ParquetDataType.isFloatType(OPTIONAL_FLOAT_TYPE));
}
@Test
public void isInt32TypeTest() {
assertFalse(ParquetDataType.isInt32Type(COMPLEX_TYPE));
assertFalse(ParquetDataType.isInt32Type(REQUIRED_INT64_TYPE));
assertTrue(ParquetDataType.isInt32Type(REQUIRED_INT32_TYPE));
assertTrue(ParquetDataType.isInt32Type(REQUIRED_INT32_ANNOTATED_TYPE));
assertTrue(ParquetDataType.isInt32Type(OPTIONAL_INT32_TYPE));
}
@Test
public void fromTypeTest() {
assertEquals(
ParquetDataType.fromType(REQUIRED_STRING_TYPE),
ParquetDataType.fromType(REQUIRED_STRING_TYPE));
assertEquals(
ParquetDataType.fromType(REQUIRED_INT32_TYPE),
ParquetDataType.fromType(REQUIRED_INT32_TYPE));
assertEquals(ParquetDataType.fromType(REQUIRED_INT64_ANNOTATED_TYPE),
ParquetDataType.fromType(REQUIRED_INT64_ANNOTATED_TYPE));
assertThrows(C3rIllegalArgumentException.class, () ->
ParquetDataType.fromType(COMPLEX_TYPE));
final var parquetInt32 = ParquetDataType.fromType(REQUIRED_INT32_TYPE);
assertNotEquals(ClientDataType.STRING, parquetInt32.getClientDataType());
assertThrows(C3rRuntimeException.class, () -> ParquetDataType.fromType(REQUIRED_BINARY_TYPE));
assertThrows(C3rRuntimeException.class, () -> ParquetDataType.fromType(COMPLEX_TYPE).getClientDataType());
assertEquals(ClientDataType.STRING, ParquetDataType.fromType(REQUIRED_STRING_TYPE).getClientDataType());
assertEquals(ClientDataType.STRING, ParquetDataType.fromType(OPTIONAL_STRING_TYPE).getClientDataType());
}
@Test
public void toTypeWithNameTest() {
assertEquals(
Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("FOO"),
ParquetDataType.fromType(OPTIONAL_STRING_TYPE)
.toTypeWithName("FOO")
);
assertEquals(
Types.required(PrimitiveType.PrimitiveTypeName.BINARY)
.as(LogicalTypeAnnotation.stringType())
.named("FOO"),
ParquetDataType.fromType(REQUIRED_STRING_TYPE)
.toTypeWithName("FOO")
);
assertEquals(
Types.required(PrimitiveType.PrimitiveTypeName.INT32)
.as(LogicalTypeAnnotation.intType(32, true))
.named("FOO"),
ParquetDataType.fromType(REQUIRED_INT32_ANNOTATED_TYPE).toTypeWithName("FOO")
);
assertThrows(C3rRuntimeException.class, () -> ParquetDataType.fromType(REPEATED_STRING_TYPE).toTypeWithName("FOO"));
}
@Test
public void binaryConstructionTest() {
// Binary values are not supported in Clean Rooms
assertThrows(C3rRuntimeException.class, () -> new ParquetValue.Binary(ParquetDataType.fromType(REQUIRED_BINARY_TYPE),
Binary.fromString("hello"))
);
}
@Test
public void booleanConstructionTest() {
assertThrows(C3rRuntimeException.class, () ->
new ParquetValue.Boolean(ParquetDataType.fromType(REQUIRED_INT32_TYPE), true));
assertDoesNotThrow(() -> new ParquetValue.Boolean(ParquetDataType.fromType(REQUIRED_BOOLEAN_TYPE), false));
}
@Test
public void doubleConstructionTest() {
assertThrows(C3rRuntimeException.class, () ->
new ParquetValue.Double(ParquetDataType.fromType(REQUIRED_INT32_TYPE), 2.71828));
assertDoesNotThrow(() -> new ParquetValue.Double(ParquetDataType.fromType(REQUIRED_DOUBLE_TYPE), 2.71828));
}
@Test
public void floatConstructionTest() {
assertThrows(C3rRuntimeException.class, () ->
new ParquetValue.Float(ParquetDataType.fromType(REQUIRED_INT32_TYPE), (float) 2.71828));
assertDoesNotThrow(() -> new ParquetValue.Float(ParquetDataType.fromType(REQUIRED_FLOAT_TYPE), (float) 2.71828));
}
@Test
public void int32ConstructionTest() {
assertThrows(C3rRuntimeException.class, () ->
new ParquetValue.Int32(ParquetDataType.fromType(REQUIRED_INT64_TYPE), 27));
assertDoesNotThrow(() -> new ParquetValue.Int32(ParquetDataType.fromType(REQUIRED_INT32_TYPE), 27));
assertDoesNotThrow(() -> new ParquetValue.Int32(ParquetDataType.fromType(OPTIONAL_INT32_ANNOTATED_TYPE), 27));
assertDoesNotThrow(() -> new ParquetValue.Int32(ParquetDataType.fromType(REQUIRED_INT32_ANNOTATED_TYPE), 27));
}
@Test
public void int64ConstructionTest() {
assertThrows(C3rRuntimeException.class, () ->
new ParquetValue.Int64(ParquetDataType.fromType(REQUIRED_INT32_TYPE), (long) 271828));
assertDoesNotThrow(() -> new ParquetValue.Int64(ParquetDataType.fromType(REQUIRED_INT64_TYPE), (long) 271828));
assertDoesNotThrow(() -> new ParquetValue.Int64(ParquetDataType.fromType(OPTIONAL_INT64_ANNOTATED_TYPE), (long) 271828));
assertDoesNotThrow(() -> new ParquetValue.Int64(ParquetDataType.fromType(REQUIRED_INT64_ANNOTATED_TYPE), (long) 271828));
}
@Test
public void oneByteIntsConstructionTest() {
assertThrows(C3rRuntimeException.class, () -> new ParquetValue.Int32(ParquetDataType.fromType(UNSIGNED_INT8_TYPE), 8));
assertDoesNotThrow(() -> new ParquetValue.Int32(ParquetDataType.fromType(OPTIONAL_INT8_TYPE), 8));
}
@Test
public void unsignedIntsAreNotSupportedTest() {
assertThrows(C3rRuntimeException.class, () -> new ParquetValue.Int32(ParquetDataType.fromType(UNSIGNED_INT16_TYPE), 16));
assertThrows(C3rRuntimeException.class, () -> new ParquetValue.Int32(ParquetDataType.fromType(UNSIGNED_INT32_TYPE), 32));
assertThrows(C3rRuntimeException.class, () -> new ParquetValue.Int64(ParquetDataType.fromType(UNSIGNED_INT64_TYPE), 64L));
}
@Test
public void unannotatedBinariesAreNotSupportedTest() {
assertThrows(C3rRuntimeException.class, () -> new ParquetValue.Binary(ParquetDataType.fromType(REQUIRED_BINARY_TYPE),
Binary.fromConstantByteArray(new byte[]{1, 2})));
}
}
| 2,484 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/config/ParquetConfig.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.config;
import lombok.Builder;
import lombok.Value;
/**
* Information needed for processing a Parquet files.
*/
@Value
@Builder
public class ParquetConfig {
/**
* An instance of ParquetConfig using only default values.
*/
public static final ParquetConfig DEFAULT = new ParquetConfig(false);
/**
* Treat Parquet Binary values without annotations as if they had the string annotation.
*/
private final Boolean binaryAsString;
/**
* Checks if any of the Parquet settings are configured. Used for validating that specified CLI options match file type.
*
* @return {@code true} If any configuration option is set.
*/
public boolean isSet() {
return binaryAsString != null;
}
}
| 2,485 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/config/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Classes that contain extra information needed to perform cryptographic computations on Parquet data.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.config; | 2,486 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io/ParquetRowWriter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ParquetDataType;
import com.amazonaws.c3r.data.ParquetSchema;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.parquet.ParquetWriterBuilder;
import lombok.Builder;
import lombok.Getter;
import lombok.NonNull;
import org.apache.parquet.hadoop.ParquetWriter;
import java.io.IOException;
import java.util.Collection;
/**
* Resource for writing Parquet entries to an underlying store for processing.
*/
public final class ParquetRowWriter implements RowWriter<ParquetValue> {
/**
* Where to write Parquet data.
*/
@Getter
private final String targetName;
/**
* Interprets and writes Parquet data to target.
*/
private final ParquetWriter<Row<ParquetValue>> rowWriter;
/**
* Description of the data type in each column along with Parquet metadata information.
*
* @see ParquetDataType
*/
private final ParquetSchema parquetSchema;
/**
* Initialize a writer to put Parquet data of the specified types into the target file.
*
* @param targetName Name of file to write to
* @param parquetSchema Description of data types and metadata
*/
@Builder
private ParquetRowWriter(
@NonNull final String targetName,
@NonNull final ParquetSchema parquetSchema) {
this.targetName = targetName;
this.parquetSchema = parquetSchema;
final org.apache.hadoop.fs.Path file = new org.apache.hadoop.fs.Path(targetName);
rowWriter = new ParquetWriterBuilder(file)
.withSchema(parquetSchema)
.build();
}
/**
* Gets the headers for the output file.
*
* @return The ColumnHeaders of the output file
*/
@Override
public Collection<ColumnHeader> getHeaders() {
return parquetSchema.getHeaders();
}
/**
* Write a record to the store.
*
* @param row Row to write with columns mapped to respective values
*/
@Override
public void writeRow(@NonNull final Row<ParquetValue> row) {
try {
rowWriter.write(row);
} catch (IOException e) {
throw new C3rRuntimeException("Error writing to file " + targetName + ".", e);
}
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
try {
rowWriter.close();
} catch (IOException e) {
throw new C3rRuntimeException("Unable to close connection to Parquet file.", e);
}
}
/**
* {@inheritDoc}
*/
@Override
public void flush() {
// N/A
}
}
| 2,487 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io/ParquetRowReader.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ParquetDataType;
import com.amazonaws.c3r.data.ParquetRowFactory;
import com.amazonaws.c3r.data.ParquetSchema;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import com.amazonaws.c3r.io.parquet.ParquetRowMaterializer;
import lombok.Builder;
import lombok.Getter;
import lombok.NonNull;
import org.apache.parquet.column.page.PageReadStore;
import org.apache.parquet.hadoop.ParquetFileReader;
import org.apache.parquet.hadoop.util.HadoopInputFile;
import org.apache.parquet.io.ColumnIOFactory;
import org.apache.parquet.io.MessageColumnIO;
import org.apache.parquet.io.RecordReader;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Specific implementation for reading Parquet files in preparation for data marshalling.
*/
public final class ParquetRowReader extends RowReader<ParquetValue> {
/**
* Maximum number of columns allowed.
*
* <p>
* This is defined at the implementation layer and not the RowReader interface in order to allow tuning among different formats.
*/
static final int MAX_COLUMN_COUNT = 10000;
/**
* Name of input file.
*/
private final String sourceName;
/**
* Reads Parquet files from disk.
*/
private final ParquetFileReader fileReader;
/**
* Data types for each column along with other metadata for Parquet.
*
* @see ParquetDataType
*/
@Getter
private final ParquetSchema parquetSchema;
/**
* Creates an empty row for Parquet data to be written in to.
*/
private final ParquetRowFactory rowFactory;
/**
* Headers for columns in the Parquet file.
*/
private PageReadStore rowGroup;
/**
* Size of the largest row group encountered while reading.
*/
@Getter
private int maxRowGroupSize;
/**
* Number of rows remaining in current batch.
*/
private long rowsLeftInGroup;
/**
* Whether {@code Binary} values without annotations should be processed like they have a string logical annotation.
*/
private final Boolean binaryAsString;
/**
* Handles reading Parquet data from some data source.
*/
private RecordReader<Row<ParquetValue>> rowReader;
/**
* Whether the reader has been closed.
*/
private boolean closed;
/**
* Data in the next row.
*/
private Row<ParquetValue> nextRow;
/**
* Creates a record reader for a Parquet file.
*
* @param sourceName Path to be read as a Parquet file path
* @throws C3rRuntimeException If {@code fileName} cannot be opened for reading
* @deprecated Use the {@link #builder()} method for this class
*/
@Deprecated
public ParquetRowReader(@NonNull final String sourceName) {
this(sourceName, false, null);
}
/**
* Creates a record reader for a Parquet file.
*
* @param sourceName Path to be read as a Parquet file path
* @param skipHeaderNormalization Whether to skip the normalization of read in headers
* @param binaryAsString If {@code true}, treat unannounced binary values as strings
* @throws C3rRuntimeException If {@code fileName} cannot be opened for reading
*/
@Builder
private ParquetRowReader(@NonNull final String sourceName,
final boolean skipHeaderNormalization,
final Boolean binaryAsString) {
this.sourceName = sourceName;
this.binaryAsString = binaryAsString;
final var conf = new org.apache.hadoop.conf.Configuration();
final org.apache.hadoop.fs.Path file = new org.apache.hadoop.fs.Path(sourceName);
try {
fileReader = ParquetFileReader.open(HadoopInputFile.fromPath(file, conf));
} catch (FileNotFoundException e) {
throw new C3rRuntimeException("Unable to find file " + sourceName + ".", e);
} catch (IOException | RuntimeException e) {
throw new C3rRuntimeException("Error reading from file " + sourceName + ".", e);
}
parquetSchema = ParquetSchema.builder()
.messageType(fileReader.getFooter().getFileMetaData().getSchema())
.skipHeaderNormalization(skipHeaderNormalization)
.binaryAsString(binaryAsString)
.build();
if (parquetSchema.getHeaders().size() > MAX_COLUMN_COUNT) {
throw new C3rRuntimeException("Couldn't parse input file. Please verify that column count does not exceed " + MAX_COLUMN_COUNT
+ ".");
}
final Map<ColumnHeader, ParquetDataType> columnTypeMap = parquetSchema.getHeaders().stream()
.collect(Collectors.toMap(Function.identity(), parquetSchema::getColumnType));
rowFactory = new ParquetRowFactory(columnTypeMap);
refreshNextRow();
}
/**
* Get the column headers.
*
* @return List of column headers
*/
public List<ColumnHeader> getHeaders() {
return parquetSchema.getHeaders();
}
/**
* Gets the next row out of the Parquet file along with the associated schema.
*
* @throws C3rRuntimeException If the next row group can't be read
*/
private void loadNextRowGroup() {
try {
rowGroup = fileReader.readNextRowGroup();
} catch (IOException e) {
throw new C3rRuntimeException("Error while reading row group from " + fileReader.getFile(), e);
}
if (rowGroup != null) {
rowsLeftInGroup = rowGroup.getRowCount();
maxRowGroupSize = Math.max(maxRowGroupSize,
rowsLeftInGroup > Integer.MAX_VALUE ? Integer.MAX_VALUE : Long.valueOf(rowsLeftInGroup).intValue());
final MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(parquetSchema.getReconstructedMessageType());
rowReader = columnIO.getRecordReader(rowGroup, new ParquetRowMaterializer(parquetSchema, rowFactory));
}
}
/**
* {@inheritDoc}
*/
@Override
public void close() {
if (!closed) {
try {
fileReader.close();
} catch (IOException e) {
throw new C3rRuntimeException("Unable to close connection to file.", e);
}
closed = true;
}
}
/**
* {@inheritDoc}
*/
@Override
protected void refreshNextRow() {
if (rowsLeftInGroup <= 0) {
loadNextRowGroup();
}
if (rowGroup != null && rowsLeftInGroup > 0) {
nextRow = rowReader.read();
rowsLeftInGroup--;
} else {
nextRow = null;
}
}
/**
* {@inheritDoc}
*/
@Override
protected Row<ParquetValue> peekNextRow() {
return nextRow != null ? nextRow.clone() : null;
}
/**
* Describes where rows are being read from in a human-friendly fashion.
*
* @return Name of source being used
*/
@Override
public String getSourceName() {
return sourceName;
}
} | 2,488 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Support for reading and writing rows of Parquet data.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.io; | 2,489 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io/parquet/ParquetRowMaterializer.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.parquet;
import com.amazonaws.c3r.data.ParquetSchema;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.data.RowFactory;
import lombok.Getter;
import lombok.NonNull;
import org.apache.parquet.io.api.Converter;
import org.apache.parquet.io.api.GroupConverter;
import org.apache.parquet.io.api.RecordMaterializer;
import java.util.List;
import java.util.stream.Collectors;
/**
* Takes raw Parquet data and turns it into usable Java values.
*/
public class ParquetRowMaterializer extends RecordMaterializer<Row<ParquetValue>> {
/**
* Creates a new row for Parquet values to be stored in.
*/
private final RowFactory<ParquetValue> rowFactory;
/**
* Converts raw data into Java values.
*/
private final ParquetGroupConverter root;
/**
* Set up a converter for a given schema specification along with a row generator for Parquet data.
*
* @param schema Description of how data maps to columns, including associated metadata for each type
* @param rowFactory Generate new empty rows to store Parquet data in
*/
public ParquetRowMaterializer(final ParquetSchema schema, final RowFactory<ParquetValue> rowFactory) {
this.rowFactory = rowFactory;
root = new ParquetGroupConverter(schema, rowFactory);
}
/**
* {@inheritDoc}
*/
@Override
public Row<ParquetValue> getCurrentRecord() {
return root.getRow();
}
/**
* Converter for Parquet data into values.
*
* @return Top level converter for transforming Parquet data into Java objects
*/
@Override
public GroupConverter getRootConverter() {
return root;
}
/**
* Converts raw Parquet primitive types into Java objects.
*/
private static class ParquetGroupConverter extends GroupConverter {
/**
* Converters for each supported primitive data type.
*/
private final List<ParquetPrimitiveConverter> converters;
/**
* Creates a new, empty row for Parquet values.
*/
private final RowFactory<ParquetValue> rowFactory;
/**
* Description of the data types plus metadata and which columns they map to.
*/
private final ParquetSchema schema;
/**
* Row of data currently being filled out.
*/
@Getter
private Row<ParquetValue> row;
/**
* Sets up for converting data from raw Parquet data into Java objects.
*
* @param schema Description of the data types plus metadata and which columns they map to
* @param rowFactory Creates a new, empty row for Parquet values
*/
ParquetGroupConverter(@NonNull final ParquetSchema schema, @NonNull final RowFactory<ParquetValue> rowFactory) {
this.schema = schema;
this.rowFactory = rowFactory;
converters = schema.getHeaders().stream()
.map(c -> new ParquetPrimitiveConverter(c, schema.getColumnType(c)))
.collect(Collectors.toList());
}
/**
* {@inheritDoc}
*/
@Override
public Converter getConverter(final int fieldIndex) {
return converters.get(fieldIndex);
}
/**
* {@inheritDoc}
*/
@Override
public void start() {
row = rowFactory.newRow();
for (var converter : converters) {
converter.setRow(row);
}
}
/**
* {@inheritDoc}
*/
@Override
public void end() {
if (row.size() != schema.getHeaders().size()) {
// Fill in any missing entries with explicit NULLs
for (var header : schema.getHeaders()) {
if (!row.hasColumn(header)) {
row.putBytes(header, null);
}
}
}
}
}
} | 2,490 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io/parquet/ParquetPrimitiveConverter.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.parquet;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.data.ParquetDataType;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import lombok.Getter;
import lombok.NonNull;
import lombok.Setter;
import org.apache.parquet.io.api.Binary;
import org.apache.parquet.io.api.PrimitiveConverter;
/**
* Converts an individual Parquet entry/value, adding it to a specified row.
*/
class ParquetPrimitiveConverter extends PrimitiveConverter {
/**
* Name of the column.
*/
@Getter
private final ColumnHeader column;
/**
* Type of data stored in the column.
*/
private final ParquetDataType columnType;
/**
* Specific row location to store value.
*/
@Setter
private Row<ParquetValue> row;
/**
* Construct a converter for a Parquet value associated with a particular
* {@code column} and {@code row}.
*
* @param column The column the value will be associated with
* @param columnType Type of data in this column
*/
ParquetPrimitiveConverter(@NonNull final ColumnHeader column, @NonNull final ParquetDataType columnType) {
this.column = column;
this.columnType = columnType;
}
/**
* Puts the Parquet value into the corresponding column entry for this row.
*
* @param value Parquet primitive value
* @throws C3rRuntimeException if row does not exist or value in row is being overwritten
*/
private void addValue(final ParquetValue value) {
if (row == null) {
throw new C3rRuntimeException("ParquetPrimitiveConverter called without a row to populate!");
}
if (row.hasColumn(column)) {
throw new C3rRuntimeException("ParquetPrimitiveConverter is overwriting the column " + column + ".");
}
row.putValue(column, value);
}
/**
* Puts the specified Parquet binary value into the corresponding column entry for this row.
*
* @param value Parquet binary value
*/
@Override
public void addBinary(final Binary value) {
addValue(new ParquetValue.Binary(columnType, value));
}
/**
* Puts the specified Parquet boolean value into the corresponding column entry for this row.
*
* @param value Parquet boolean value
*/
@Override
public void addBoolean(final boolean value) {
addValue(new ParquetValue.Boolean(columnType, value));
}
/**
* Puts the specified Parquet double value into the corresponding column entry for this row.
*
* @param value Parquet double value
*/
@Override
public void addDouble(final double value) {
addValue(new ParquetValue.Double(columnType, value));
}
/**
* Puts the specified Parquet float value into the corresponding column entry for this row.
*
* @param value Parquet float value
*/
@Override
public void addFloat(final float value) {
addValue(new ParquetValue.Float(columnType, value));
}
/**
* Puts the specified Parquet integer (int32) value into the corresponding column entry for this row.
*
* @param value Parquet integer value
*/
@Override
public void addInt(final int value) {
addValue(new ParquetValue.Int32(columnType, value));
}
/**
* Puts the specified Parquet long (int64) value into the corresponding column entry for this row.
*
* @param value Parquet long value
*/
@Override
public void addLong(final long value) {
addValue(new ParquetValue.Int64(columnType, value));
}
} | 2,491 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io/parquet/ParquetWriterBuilder.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.parquet;
import com.amazonaws.c3r.data.ParquetSchema;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.data.Row;
import com.amazonaws.c3r.exception.C3rRuntimeException;
import org.apache.hadoop.conf.Configuration;
import org.apache.parquet.hadoop.ParquetFileWriter;
import org.apache.parquet.hadoop.ParquetWriter;
import org.apache.parquet.hadoop.api.WriteSupport;
import java.io.IOException;
/**
* Builds a writer for Parquet data.
*/
public class ParquetWriterBuilder extends ParquetWriter.Builder<Row<ParquetValue>, ParquetWriterBuilder> {
/**
* Description of parquet data and how it maps to columns.
*/
private ParquetSchema schema = null;
/**
* Sets up a writer for putting data in the specified file. Will overwrite file if it already exists.
*
* @param file Location of the file to write to
*/
public ParquetWriterBuilder(final org.apache.hadoop.fs.Path file) {
super(file);
super.withWriteMode(ParquetFileWriter.Mode.OVERWRITE);
}
/**
* Associate a schema describing data with the file.
*
* @param schema Description of parquet data and how it maps to columns
* @return Parquet data writer with information about the data its writing
*/
public ParquetWriterBuilder withSchema(final ParquetSchema schema) {
this.schema = schema;
return this;
}
/**
* Override the Parquet {@code build} method to trap any I/O errors.
*
* @return A file writer for Parquet files with the specified schema
* @throws C3rRuntimeException If there's an error while creating the file reader
*/
@Override
public ParquetWriter<Row<ParquetValue>> build() {
try {
return super.build();
} catch (IOException e) {
throw new C3rRuntimeException("Error while creating Parquet file reader.", e);
}
}
/**
* Creates a writer to be used for Parquet data output.
*
* @return Created writer will all needed information about the data
*/
@Override
protected ParquetWriterBuilder self() {
return this;
}
/**
* Create a {@link ParquetRowWriteSupport} instance with the specified schema and system configuration information.
* (System configuration info currently is not needed.)
*
* @param conf System configuration information
* @return Object to write Parquet data out to disc
*/
@Override
protected WriteSupport<Row<ParquetValue>> getWriteSupport(final Configuration conf) {
return new ParquetRowWriteSupport(schema);
}
} | 2,492 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io/parquet/ParquetRowWriteSupport.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.io.parquet;
import com.amazonaws.c3r.data.ParquetSchema;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.data.Row;
import org.apache.hadoop.conf.Configuration;
import org.apache.parquet.hadoop.api.WriteSupport;
import org.apache.parquet.io.api.RecordConsumer;
import java.util.HashMap;
/**
* Convert Java values to raw Parquet primitive equivalents for writing.
*/
class ParquetRowWriteSupport extends WriteSupport<Row<ParquetValue>> {
/**
* Description of the data types plus metadata and which columns they map to.
*/
private final ParquetSchema schema;
/**
* Writes Parquet data.
*/
private RecordConsumer consumer;
/**
* Create object for writing Parquet data.
*
* @param schema Description of parquet data and how it maps to columns
*/
ParquetRowWriteSupport(final ParquetSchema schema) {
this.schema = schema;
}
/**
* Call first to configure Parquet data writer.
*
* @param configuration the job's configuration
* @return Information to write to file
*/
@Override
public WriteContext init(final Configuration configuration) {
return new WriteContext(schema.getReconstructedMessageType(), new HashMap<>());
}
/**
* Prepare to write a new row of Parquet data to file.
*
* @param recordConsumer the recordConsumer to write to
*/
@Override
public void prepareForWrite(final RecordConsumer recordConsumer) {
consumer = recordConsumer;
}
/**
* Write a single record to the consumer specified by {@link #prepareForWrite}.
*
* @param row One row of Parquet data to write
*/
@Override
public void write(final Row<ParquetValue> row) {
consumer.startMessage();
writeRowContent(row);
consumer.endMessage();
}
/**
* Write the content for each value in a row.
*
* @param row Row of Parquet data to write
*/
private void writeRowContent(final Row<ParquetValue> row) {
row.forEach((column, value) -> {
if (!value.isNull()) {
final int i = schema.getColumnIndex(column);
consumer.startField(column.toString(), i);
value.writeValue(consumer);
consumer.endField(column.toString(), i);
}
});
}
} | 2,493 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/io/parquet/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* Support classes for managing I/O access to Parquet files. Handles the type metadata that needs to be associated with each value,
* as well as conversion to/from the binary format of Parquet to Java values.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.io.parquet; | 2,494 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/action/ParquetRowUnmarshaller.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.action;
import com.amazonaws.c3r.Transformer;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.DecryptConfig;
import com.amazonaws.c3r.data.ParquetRowFactory;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.io.ParquetRowReader;
import com.amazonaws.c3r.io.ParquetRowWriter;
import lombok.Builder;
import lombok.NonNull;
import java.util.Map;
/**
* Used to instantiate an instance of {@link RowUnmarshaller} that handles Parquet data. {@link RowUnmarshaller} provides all the
* functionality except for creating the Parquet file reader ({@link ParquetRowReader}), writer ({@link ParquetRowWriter}) and
* {@link ParquetRowFactory} which is done here.
*/
public final class ParquetRowUnmarshaller {
/**
* Utility class, hide default constructor.
*/
private ParquetRowUnmarshaller() {
}
/**
* Creates an instance of the marshaller based off of an {@link DecryptConfig}. Verifies the input file appears to contain Parquet data
* before continuing.
*
* @param config Configuration information on how data will be transformed, file locations, etc.
* @return Parquet data unmarshaller
* @throws C3rIllegalArgumentException If non-Parquet data was found to be in the file
* @see DecryptConfig
*/
public static RowUnmarshaller<ParquetValue> newInstance(@NonNull final DecryptConfig config) {
if (config.getFileFormat() != FileFormat.PARQUET) {
throw new C3rIllegalArgumentException("Expected a PARQUET decryption configuration, but found "
+ config.getFileFormat() + ".");
}
return ParquetRowUnmarshaller.builder()
.sourceFile(config.getSourceFile())
.targetFile(config.getTargetFile())
.transformers(Transformer.initTransformers(config))
.build();
}
/**
* Creates an instance of a Parquet row unmarshaller based off of individually specified settings.
*
* @param sourceFile Input Parquet file location
* @param targetFile Where to write Parquet data
* @param transformers Cryptographic transforms that are possible to use
* @return Parquet data unmarshaller
*/
@Builder
private static RowUnmarshaller<ParquetValue> newInstance(
@NonNull final String sourceFile,
@NonNull final String targetFile,
@NonNull final Map<ColumnType, Transformer> transformers) {
final ParquetRowReader reader = ParquetRowReader.builder()
.sourceName(sourceFile)
.skipHeaderNormalization(true)
.build();
final var parquetSchema = reader.getParquetSchema();
final ParquetRowWriter writer = ParquetRowWriter.builder()
.targetName(targetFile)
.parquetSchema(parquetSchema)
.build();
return RowUnmarshaller.<ParquetValue>builder()
.inputReader(reader)
.rowFactory(new ParquetRowFactory(parquetSchema.getColumnParquetDataTypeMap()))
.outputWriter(writer)
.transformers(transformers)
.build();
}
}
| 2,495 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/action/ParquetRowMarshaller.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.action;
import com.amazonaws.c3r.Transformer;
import com.amazonaws.c3r.config.ClientSettings;
import com.amazonaws.c3r.config.ColumnType;
import com.amazonaws.c3r.config.EncryptConfig;
import com.amazonaws.c3r.config.ParquetConfig;
import com.amazonaws.c3r.config.TableSchema;
import com.amazonaws.c3r.data.ParquetRowFactory;
import com.amazonaws.c3r.data.ParquetSchema;
import com.amazonaws.c3r.data.ParquetValue;
import com.amazonaws.c3r.exception.C3rIllegalArgumentException;
import com.amazonaws.c3r.io.FileFormat;
import com.amazonaws.c3r.io.ParquetRowReader;
import com.amazonaws.c3r.io.ParquetRowWriter;
import lombok.Builder;
import lombok.NonNull;
import java.util.Map;
/**
* Used to instantiate an instance of {@link RowMarshaller} that handles Parquet data. {@link RowMarshaller} provides all the functionality
* except for creating the Parquet file reader ({@link ParquetRowReader}), writer ({@link ParquetRowWriter}) and {@link ParquetRowFactory}
* which is done here.
*/
public final class ParquetRowMarshaller {
/**
* Utility class, hide default constructor.
*/
private ParquetRowMarshaller() {
}
/**
* Creates an instance of the marshaller based off of an {@link EncryptConfig}. Verifies the input file appears to contain Parquet data
* before continuing.
*
* @param config Configuration information on how data will be transformed, file locations, etc.
* @param parquetConfig Configuration information specific to Parquet data transformation and processing
* @return Parquet data marshaller
* @throws C3rIllegalArgumentException If non-Parquet data was found to be in the file
* @see EncryptConfig
*/
public static RowMarshaller<ParquetValue> newInstance(@NonNull final EncryptConfig config, @NonNull final ParquetConfig parquetConfig) {
if (config.getFileFormat() != FileFormat.PARQUET) {
throw new C3rIllegalArgumentException("Expected a PARQUET encryption configuration, but found "
+ config.getFileFormat() + ".");
}
return ParquetRowMarshaller.builder()
.sourceFile(config.getSourceFile())
.targetFile(config.getTargetFile())
.settings(config.getSettings())
.schema(config.getTableSchema())
.tempDir(config.getTempDir())
.transforms(Transformer.initTransformers(config))
.binaryAsString(parquetConfig.getBinaryAsString())
.build();
}
/**
* Creates an instance of the marshaller where each setting is specified individually via {@link Builder}.
*
* @param sourceFile Input Parquet data file location
* @param targetFile Where to write Parquet data
* @param tempDir Where to write temporary files if needed
* @param settings Cryptographic settings for the clean room
* @param schema Specification of how data in the input file will be transformed into encrypted data in the output file
* @param transforms Cryptographic transforms that are possible to use
* @param binaryAsString If {@code true}, treat unannounced binary values as strings
* @return Parquet data marshaller
* @throws C3rIllegalArgumentException If given a non-mapped table schema
*/
@Builder
private static RowMarshaller<ParquetValue> newInstance(
@NonNull final String sourceFile,
@NonNull final String targetFile,
@NonNull final String tempDir,
@NonNull final ClientSettings settings,
@NonNull final TableSchema schema,
@NonNull final Map<ColumnType, Transformer> transforms,
final Boolean binaryAsString) {
if (schema.getPositionalColumnHeaders() != null) {
throw new C3rIllegalArgumentException("Parquet files require a mapped table schema.");
}
final ParquetRowReader reader = ParquetRowReader.builder().sourceName(sourceFile).binaryAsString(binaryAsString).build();
final ParquetSchema sourceParquetSchema = reader.getParquetSchema();
final ParquetSchema targetParquetSchema = sourceParquetSchema.deriveTargetSchema(schema);
final ParquetRowWriter writer = ParquetRowWriter.builder()
.targetName(targetFile)
.parquetSchema(targetParquetSchema)
.build();
return RowMarshaller.<ParquetValue>builder()
.settings(settings)
.schema(schema)
.tempDir(tempDir)
.inputReader(reader)
.rowFactory(new ParquetRowFactory(targetParquetSchema.getColumnParquetDataTypeMap()))
.outputWriter(writer)
.transformers(transforms)
.build();
}
}
| 2,496 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/action/package-info.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
/**
* This package classes used to marshall (encrypt) and unmarshall (decrypt) data to and from the clean room for Parquet files.
* {@link com.amazonaws.c3r.action.RowMarshaller} handles the logic of marshalling data outside of anything having to do with
* Parquet itself and {@link com.amazonaws.c3r.action.RowUnmarshaller} does the same for unmarshalling.
*
* <p>
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*/
package com.amazonaws.c3r.action; | 2,497 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/data/ParquetRow.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import com.amazonaws.c3r.config.ColumnHeader;
import com.amazonaws.c3r.internal.Nonce;
import lombok.NonNull;
import java.util.Map;
import java.util.Objects;
/**
* Row of Parquet data.
*/
public final class ParquetRow extends Row<ParquetValue> {
/**
* Maps column name to data type.
*/
private final Map<ColumnHeader, ParquetDataType> columnTypes;
/**
* Stores the Parquet values for a particular row.
*
* @param columnTypes Maps column name to data type in column
*/
public ParquetRow(final Map<ColumnHeader, ParquetDataType> columnTypes) {
this.columnTypes = Map.copyOf(columnTypes);
}
/**
* {@inheritDoc}
*/
@Override
public void putBytes(@NonNull final ColumnHeader column, final byte[] bytes) {
final ParquetDataType parquetType = Objects.requireNonNull(this.columnTypes.get(column),
"Internal error! Unknown column: " + column);
putValue(column, ParquetValue.fromBytes(parquetType, bytes));
}
/**
* {@inheritDoc}
*/
@Override
public void putNonce(@NonNull final ColumnHeader nonceColumn, final Nonce nonce) {
putValue(nonceColumn, ParquetValue.fromBytes(ParquetDataType.NONCE_TYPE, nonce.getBytes()));
}
@Override
public Row<ParquetValue> clone() {
final Row<ParquetValue> row = new ParquetRow(columnTypes);
getHeaders().forEach(header -> row.putValue(header, getValue(header)));
return row;
}
}
| 2,498 |
0 | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r | Create_ds/c3r/c3r-sdk-parquet/src/main/java/com/amazonaws/c3r/data/ParquetRowFactory.java | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package com.amazonaws.c3r.data;
import com.amazonaws.c3r.config.ColumnHeader;
import java.util.Map;
/**
* Factory for creating empty Parquet rows with the given column types.
*/
public class ParquetRowFactory implements RowFactory<ParquetValue> {
/**
* Map column name to type.
*/
private final Map<ColumnHeader, ParquetDataType> columnTypes;
/**
* Initializes the factory with type information for the row.
*
* @param columnTypes Data type associated with a column name
*/
public ParquetRowFactory(final Map<ColumnHeader, ParquetDataType> columnTypes) {
this.columnTypes = Map.copyOf(columnTypes);
}
/**
* Create an empty row to be populated by the callee.
*
* @return An empty Row for storing data in
*/
@Override
public Row<ParquetValue> newRow() {
return new ParquetRow(columnTypes);
}
}
| 2,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.