index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/iceberg/pig/src/main/java/com/netflix/iceberg | Create_ds/iceberg/pig/src/main/java/com/netflix/iceberg/pig/IcebergPigInputFormat.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.pig;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.iceberg.CombinedScanTask;
import com.netflix.iceberg.DataFile;
import com.netflix.iceberg.FileScanTask;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.Table;
import com.netflix.iceberg.TableScan;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.hadoop.HadoopInputFile;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.parquet.Parquet;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import org.apache.commons.lang.SerializationUtils;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.pig.data.DataByteArray;
import org.apache.pig.impl.util.ObjectSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static com.netflix.iceberg.pig.SchemaUtil.project;
public class IcebergPigInputFormat<T> extends InputFormat<Void, T> {
private static final Logger LOG = LoggerFactory.getLogger(IcebergPigInputFormat.class);
static final String ICEBERG_SCHEMA = "iceberg.schema";
static final String ICEBERG_PROJECTED_FIELDS = "iceberg.projected.fields";
static final String ICEBERG_FILTER_EXPRESSION = "iceberg.filter.expression";
private Table table;
private List<InputSplit> splits;
IcebergPigInputFormat(Table table) {
this.table = table;
}
@Override
@SuppressWarnings("unchecked")
public List<InputSplit> getSplits(JobContext context) throws IOException {
if (splits != null) {
LOG.info("Returning cached splits: " + splits.size());
return splits;
}
splits = Lists.newArrayList();
TableScan scan = table.newScan();
//Apply Filters
Expression filterExpression = (Expression) ObjectSerializer.deserialize(context.getConfiguration().get(ICEBERG_FILTER_EXPRESSION));
if (filterExpression != null) {
LOG.info("Filter Expression: " + filterExpression);
scan = scan.filter(filterExpression);
}
//Wrap in Splits
try (CloseableIterable<CombinedScanTask> tasks = scan.planTasks()) {
tasks.forEach((scanTask) -> splits.add(new IcebergSplit(scanTask)));
}
return splits;
}
@Override
public RecordReader<Void, T> createRecordReader(InputSplit split, TaskAttemptContext context) {
return new IcebergRecordReader<>();
}
private static class IcebergSplit extends InputSplit implements Writable {
private CombinedScanTask task;
IcebergSplit(CombinedScanTask task) {
this.task = task;
}
public IcebergSplit() {
}
@Override
public long getLength() {
return task.files().stream().mapToLong(FileScanTask::length).sum();
}
@Override
public String[] getLocations() {
return new String[0];
}
@Override
public void write(DataOutput out) throws IOException {
byte[] data = SerializationUtils.serialize(this.task);
out.writeInt(data.length);
out.write(data);
}
@Override
public void readFields(DataInput in) throws IOException {
byte[] data = new byte[in.readInt()];
in.readFully(data);
this.task = (CombinedScanTask) SerializationUtils.deserialize(data);
}
}
public class IcebergRecordReader<T> extends RecordReader<Void, T> {
private TaskAttemptContext context;
private Iterator<FileScanTask> tasks;
private FileScanTask currentTask;
private CloseableIterable reader;
private Iterator<T> recordIterator;
private T currentRecord;
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException {
this.context = context;
CombinedScanTask task = ((IcebergSplit) split).task;
tasks = task.files().iterator();
advance();
}
@SuppressWarnings("unchecked")
private boolean advance() throws IOException {
if(reader != null) {
reader.close();
}
if (!tasks.hasNext()) {
return false;
}
currentTask = tasks.next();
Schema tableSchema = (Schema) ObjectSerializer.deserialize(context.getConfiguration().get(ICEBERG_SCHEMA));
List<String> projectedFields = (List<String>) ObjectSerializer.deserialize(context.getConfiguration().get(ICEBERG_PROJECTED_FIELDS));
Schema projectedSchema = projectedFields != null ? project(tableSchema, projectedFields) : tableSchema;
PartitionSpec spec = currentTask.asFileScanTask().spec();
DataFile file = currentTask.file();
InputFile inputFile = HadoopInputFile.fromLocation(file.path(), context.getConfiguration());
Set<Integer> idColumns = spec.identitySourceIds();
// schema needed for the projection and filtering
boolean hasJoinedPartitionColumns = !idColumns.isEmpty();
switch (file.format()) {
case PARQUET:
Map<Integer, Object> partitionValueMap = Maps.newHashMap();
if (hasJoinedPartitionColumns) {
Schema readSchema = TypeUtil.selectNot(projectedSchema, idColumns);
Schema partitionSchema = TypeUtil.select(tableSchema, idColumns);
Schema projectedPartitionSchema = TypeUtil.select(projectedSchema, idColumns);
for (Types.NestedField field : projectedPartitionSchema.columns()) {
int tupleIndex = projectedSchema.columns().indexOf(field);
int partitionIndex = partitionSchema.columns().indexOf(field);
Object partitionValue = file.partition().get(partitionIndex, Object.class);
partitionValueMap.put(tupleIndex, convertPartitionValue(field.type(), partitionValue));
}
reader = Parquet.read(inputFile)
.project(readSchema)
.split(currentTask.start(), currentTask.length())
.filter(currentTask.residual())
.createReaderFunc(fileSchema -> PigParquetReader.buildReader(fileSchema, readSchema, partitionValueMap))
.build();
} else {
reader = Parquet.read(inputFile)
.project(projectedSchema)
.split(currentTask.start(), currentTask.length())
.filter(currentTask.residual())
.createReaderFunc(fileSchema -> PigParquetReader.buildReader(fileSchema, projectedSchema, partitionValueMap))
.build();
}
recordIterator = reader.iterator();
break;
default:
throw new UnsupportedOperationException("Unsupported file format: " + file.format());
}
return true;
}
private Object convertPartitionValue(Type type, Object value) {
if(type.typeId() == Types.BinaryType.get().typeId()) {
ByteBuffer buffer = (ByteBuffer) value;
return new DataByteArray(buffer.get(new byte[buffer.remaining()]).array());
}
return value;
}
@Override
public boolean nextKeyValue() throws IOException {
if (recordIterator.hasNext() || advance()) {
currentRecord = recordIterator.next();
return true;
}
return false;
}
@Override
public Void getCurrentKey() {
return null;
}
@Override
public T getCurrentValue() {
return currentRecord;
}
@Override
public float getProgress() {
return 0;
}
@Override
public void close() {
}
}
}
| 1,900 |
0 | Create_ds/iceberg/pig/src/main/java/com/netflix/iceberg | Create_ds/iceberg/pig/src/main/java/com/netflix/iceberg/pig/SchemaUtil.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.pig;
import com.google.common.collect.Lists;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.apache.pig.ResourceSchema;
import org.apache.pig.ResourceSchema.ResourceFieldSchema;
import org.apache.pig.data.DataType;
import org.apache.pig.impl.logicalLayer.FrontendException;
import java.io.IOException;
import java.util.List;
import static java.lang.String.format;
import static com.netflix.iceberg.types.Types.ListType;
import static com.netflix.iceberg.types.Types.MapType;
import static com.netflix.iceberg.types.Types.NestedField;
import static com.netflix.iceberg.types.Types.StructType;
public class SchemaUtil {
public static ResourceSchema convert(Schema icebergSchema) throws IOException {
ResourceSchema result = new ResourceSchema();
result.setFields(convertFields(icebergSchema.columns()));
return result;
}
private static ResourceFieldSchema [] convertFields(List<Types.NestedField> fields) throws IOException {
List<ResourceFieldSchema> result = Lists.newArrayList();
for (Types.NestedField nf : fields) {
result.add(convert(nf));
}
return result.toArray(new ResourceFieldSchema[0]);
}
private static ResourceFieldSchema convert(Types.NestedField field) throws IOException {
ResourceFieldSchema result = convert(field.type());
result.setName(field.name());
result.setDescription(format("FieldId: %s", field.fieldId()));
return result;
}
private static ResourceFieldSchema convert(Type type) throws IOException {
ResourceFieldSchema result = new ResourceFieldSchema();
result.setType(convertType(type));
if (!type.isPrimitiveType()) {
result.setSchema(convertComplex(type));
}
return result;
}
private static byte convertType(Type type) throws IOException {
switch (type.typeId()) {
case BOOLEAN: return DataType.BOOLEAN;
case INTEGER: return DataType.INTEGER;
case LONG: return DataType.LONG;
case FLOAT: return DataType.FLOAT;
case DOUBLE: return DataType.DOUBLE;
case TIMESTAMP: return DataType.CHARARRAY;
case DATE: return DataType.CHARARRAY;
case STRING: return DataType.CHARARRAY;
case FIXED: return DataType.BYTEARRAY;
case BINARY: return DataType.BYTEARRAY;
case DECIMAL: return DataType.BIGDECIMAL;
case STRUCT: return DataType.TUPLE;
case LIST: return DataType.BAG;
case MAP: return DataType.MAP;
default:
throw new FrontendException("Unsupported primitive type:" + type);
}
}
private static ResourceSchema convertComplex(Type type) throws IOException {
ResourceSchema result = new ResourceSchema();
switch (type.typeId()) {
case STRUCT:
StructType structType = type.asStructType();
List<ResourceFieldSchema> fields = Lists.newArrayList();
for (Types.NestedField f : structType.fields()) {
fields.add(convert(f));
}
result.setFields(fields.toArray(new ResourceFieldSchema[0]));
return result;
case LIST:
ListType listType = type.asListType();
ResourceFieldSchema [] elementFieldSchemas = new ResourceFieldSchema[]{convert(listType.elementType())};
if (listType.elementType().isStructType()) {
result.setFields(elementFieldSchemas);
} else {
//Wrap non-struct types in tuples
ResourceSchema elementSchema = new ResourceSchema();
elementSchema.setFields(elementFieldSchemas);
ResourceFieldSchema tupleSchema = new ResourceFieldSchema();
tupleSchema.setType(DataType.TUPLE);
tupleSchema.setSchema(elementSchema);
result.setFields(new ResourceFieldSchema[]{tupleSchema});
}
return result;
case MAP:
MapType mapType = type.asMapType();
if (mapType.keyType().typeId() != Type.TypeID.STRING) {
throw new FrontendException("Unsupported map key type: " + mapType.keyType());
}
result.setFields(new ResourceFieldSchema[]{convert(mapType.valueType())});
return result;
default:
throw new FrontendException("Unsupported complex type: " + type);
}
}
public static Schema project(Schema schema, List<String> requiredFields) {
List<NestedField> columns = Lists.newArrayList();
for (String column : requiredFields) {
columns.add(schema.findField(column));
}
return new Schema(columns);
}
}
| 1,901 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TableTestBase.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import com.netflix.iceberg.types.Types;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.util.Iterator;
import java.util.List;
import static com.netflix.iceberg.Files.localInput;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TableTestBase {
// Schema passed to create tables
static final Schema SCHEMA = new Schema(
required(3, "id", Types.IntegerType.get()),
required(4, "data", Types.StringType.get())
);
// Partition spec used to create tables
static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA)
.bucket("data", 16)
.build();
static final DataFile FILE_A = DataFiles.builder(SPEC)
.withPath("/path/to/data-a.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=0") // easy way to set partition data for now
.withRecordCount(0)
.build();
static final DataFile FILE_B = DataFiles.builder(SPEC)
.withPath("/path/to/data-b.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=1") // easy way to set partition data for now
.withRecordCount(0)
.build();
static final DataFile FILE_C = DataFiles.builder(SPEC)
.withPath("/path/to/data-c.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=2") // easy way to set partition data for now
.withRecordCount(0)
.build();
static final DataFile FILE_D = DataFiles.builder(SPEC)
.withPath("/path/to/data-d.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=3") // easy way to set partition data for now
.withRecordCount(0)
.build();
@Rule
public TemporaryFolder temp = new TemporaryFolder();
File tableDir = null;
File metadataDir = null;
TestTables.TestTable table = null;
@Before
public void setupTable() throws Exception {
this.tableDir = temp.newFolder();
tableDir.delete(); // created by table create
this.metadataDir = new File(tableDir, "metadata");
this.table = create(SCHEMA, SPEC);
}
@After
public void cleanupTables() {
TestTables.clearTables();
}
List<File> listManifestFiles() {
return listManifestFiles(tableDir);
}
List<File> listManifestFiles(File tableDir) {
return Lists.newArrayList(new File(tableDir, "metadata").listFiles((dir, name) ->
!name.startsWith("snap") && Files.getFileExtension(name).equalsIgnoreCase("avro")));
}
private TestTables.TestTable create(Schema schema, PartitionSpec spec) {
return TestTables.create(tableDir, "test", schema, spec);
}
TestTables.TestTable load() {
return TestTables.load(tableDir, "test");
}
Integer version() {
return TestTables.metadataVersion("test");
}
TableMetadata readMetadata() {
return TestTables.readMetadata("test");
}
void validateSnapshot(Snapshot old, Snapshot snap, DataFile... newFiles) {
List<ManifestFile> oldManifests = old != null ? old.manifests() : ImmutableList.of();
// copy the manifests to a modifiable list and remove the existing manifests
List<ManifestFile> newManifests = Lists.newArrayList(snap.manifests());
for (ManifestFile oldManifest : oldManifests) {
Assert.assertTrue("New snapshot should contain old manifests",
newManifests.remove(oldManifest));
}
Assert.assertEquals("Should create 1 new manifest and reuse old manifests",
1, newManifests.size());
ManifestFile manifest = newManifests.get(0);
long id = snap.snapshotId();
Iterator<String> newPaths = paths(newFiles).iterator();
for (ManifestEntry entry : ManifestReader.read(localInput(manifest.path())).entries()) {
DataFile file = entry.file();
Assert.assertEquals("Path should match expected", newPaths.next(), file.path().toString());
Assert.assertEquals("File's snapshot ID should match", id, entry.snapshotId());
}
Assert.assertFalse("Should find all files in the manifest", newPaths.hasNext());
}
List<String> paths(DataFile... dataFiles) {
List<String> paths = Lists.newArrayListWithExpectedSize(dataFiles.length);
for (DataFile file : dataFiles) {
paths.add(file.path().toString());
}
return paths;
}
static void validateManifest(ManifestFile manifest,
Iterator<Long> ids,
Iterator<DataFile> expectedFiles) {
validateManifest(manifest.path(), ids, expectedFiles);
}
static void validateManifest(String manifest,
Iterator<Long> ids,
Iterator<DataFile> expectedFiles) {
for (ManifestEntry entry : ManifestReader.read(localInput(manifest)).entries()) {
DataFile file = entry.file();
DataFile expected = expectedFiles.next();
Assert.assertEquals("Path should match expected",
expected.path().toString(), file.path().toString());
Assert.assertEquals("Snapshot ID should match expected ID",
(long) ids.next(), entry.snapshotId());
}
Assert.assertFalse("Should find all files in the manifest", expectedFiles.hasNext());
}
static void validateManifestEntries(ManifestFile manifest,
Iterator<Long> ids,
Iterator<DataFile> expectedFiles,
Iterator<ManifestEntry.Status> expectedStatuses) {
validateManifestEntries(manifest.path(), ids, expectedFiles, expectedStatuses);
}
static void validateManifestEntries(String manifest,
Iterator<Long> ids,
Iterator<DataFile> expectedFiles,
Iterator<ManifestEntry.Status> expectedStatuses) {
for (ManifestEntry entry : ManifestReader.read(localInput(manifest)).entries()) {
DataFile file = entry.file();
DataFile expected = expectedFiles.next();
final ManifestEntry.Status expectedStatus = expectedStatuses.next();
Assert.assertEquals("Path should match expected",
expected.path().toString(), file.path().toString());
Assert.assertEquals("Snapshot ID should match expected ID",
(long) ids.next(), entry.snapshotId());
Assert.assertEquals("Entry status should match expected ID",
expectedStatus, entry.status());
}
Assert.assertFalse("Should find all files in the manifest", expectedFiles.hasNext());
}
static Iterator<ManifestEntry.Status> statuses(ManifestEntry.Status... statuses) {
return Iterators.forArray(statuses);
}
static Iterator<Long> ids(Long... ids) {
return Iterators.forArray(ids);
}
static Iterator<DataFile> files(DataFile... files) {
return Iterators.forArray(files);
}
static Iterator<DataFile> files(ManifestFile manifest) {
return ManifestReader.read(localInput(manifest.path())).iterator();
}
}
| 1,902 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestMergeAppend.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.ManifestEntry.Status;
import com.netflix.iceberg.exceptions.CommitFailedException;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.util.Set;
import static com.google.common.collect.Iterators.concat;
public class TestMergeAppend extends TableTestBase {
@Test
public void testEmptyTableAppend() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
Snapshot pending = table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.apply();
Assert.assertEquals("Should create 1 manifest for initial write",
1, pending.manifests().size());
long pendingId = pending.snapshotId();
validateManifest(pending.manifests().get(0), ids(pendingId, pendingId), files(FILE_A, FILE_B));
}
@Test
public void testMergeWithExistingManifest() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
Snapshot pending = table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Assert.assertEquals("Should contain 1 merged manifest for second write",
1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertNotEquals("Should not contain manifest from initial write",
initialManifest, newManifest);
long pendingId = pending.snapshotId();
validateManifest(newManifest,
ids(pendingId, pendingId, baseId, baseId),
concat(files(FILE_C, FILE_D), files(initialManifest)));
}
@Test
public void testMergeWithExistingManifestAfterDelete() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
table.newDelete()
.deleteFile(FILE_A)
.commit();
TableMetadata delete = readMetadata();
long deleteId = delete.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 filtered manifest for delete",
1, delete.currentSnapshot().manifests().size());
ManifestFile deleteManifest = delete.currentSnapshot().manifests().get(0);
validateManifestEntries(deleteManifest,
ids(deleteId, baseId),
files(FILE_A, FILE_B),
statuses(Status.DELETED, Status.EXISTING));
Snapshot pending = table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Assert.assertEquals("Should contain 1 merged manifest for second write",
1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertNotEquals("Should not contain manifest from initial write",
initialManifest, newManifest);
long pendingId = pending.snapshotId();
// the deleted entry from the previous manifest should be removed
validateManifestEntries(newManifest,
ids(pendingId, pendingId, baseId),
files(FILE_C, FILE_D, FILE_B),
statuses(Status.ADDED, Status.ADDED, Status.EXISTING));
}
@Test
public void testMinMergeCount() {
// only merge when there are at least 4 manifests
table.updateProperties().set("commit.manifest.min-count-to-merge", "4").commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newFastAppend()
.appendFile(FILE_A)
.commit();
long idFileA = readMetadata().currentSnapshot().snapshotId();
table.newFastAppend()
.appendFile(FILE_B)
.commit();
long idFileB = readMetadata().currentSnapshot().snapshotId();
Assert.assertEquals("Should have 2 manifests from setup writes",
2, readMetadata().currentSnapshot().manifests().size());
table.newAppend()
.appendFile(FILE_C)
.commit();
long idFileC = readMetadata().currentSnapshot().snapshotId();
TableMetadata base = readMetadata();
Assert.assertEquals("Should have 3 unmerged manifests",
3, base.currentSnapshot().manifests().size());
Set<ManifestFile> unmerged = Sets.newHashSet(base.currentSnapshot().manifests());
Snapshot pending = table.newAppend()
.appendFile(FILE_D)
.apply();
Assert.assertEquals("Should contain 1 merged manifest after the 4th write",
1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertFalse("Should not contain previous manifests", unmerged.contains(newManifest));
long pendingId = pending.snapshotId();
validateManifest(newManifest,
ids(pendingId, idFileC, idFileB, idFileA),
files(FILE_D, FILE_C, FILE_B, FILE_A));
}
@Test
public void testMergeSizeTargetWithExistingManifest() {
// use a small limit on manifest size to prevent merging
table.updateProperties()
.set(TableProperties.MANIFEST_TARGET_SIZE_BYTES, "10")
.commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
Snapshot pending = table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Assert.assertEquals("Should contain 2 unmerged manifests after second write",
2, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertNotEquals("Should not contain manifest from initial write",
initialManifest, newManifest);
long pendingId = pending.snapshotId();
validateManifest(newManifest, ids(pendingId, pendingId), files(FILE_C, FILE_D));
validateManifest(pending.manifests().get(1), ids(baseId, baseId), files(initialManifest));
}
@Test
public void testChangedPartitionSpec() {
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
// build the new spec using the table's schema, which uses fresh IDs
PartitionSpec newSpec = PartitionSpec.builderFor(base.schema())
.bucket("data", 16)
.bucket("id", 4)
.build();
// commit the new partition spec to the table manually
table.ops().commit(base, base.updatePartitionSpec(newSpec));
DataFile newFileC = DataFiles.builder(newSpec)
.copy(FILE_C)
.withPartitionPath("data_bucket=2/id_bucket=3")
.build();
Snapshot pending = table.newAppend()
.appendFile(newFileC)
.apply();
Assert.assertEquals("Should use 2 manifest files",
2, pending.manifests().size());
// new manifest comes first
validateManifest(pending.manifests().get(0), ids(pending.snapshotId()), files(newFileC));
Assert.assertEquals("Second manifest should be the initial manifest with the old spec",
initialManifest, pending.manifests().get(1));
}
@Test
public void testChangedPartitionSpecMergeExisting() {
table.newAppend()
.appendFile(FILE_A)
.commit();
long id1 = readMetadata().currentSnapshot().snapshotId();
// create a second compatible manifest
table.newFastAppend()
.appendFile(FILE_B)
.commit();
long id2 = readMetadata().currentSnapshot().snapshotId();
TableMetadata base = readMetadata();
Assert.assertEquals("Should contain 2 manifests",
2, base.currentSnapshot().manifests().size());
ManifestFile manifest = base.currentSnapshot().manifests().get(0);
// build the new spec using the table's schema, which uses fresh IDs
PartitionSpec newSpec = PartitionSpec.builderFor(base.schema())
.bucket("data", 16)
.bucket("id", 4)
.build();
// commit the new partition spec to the table manually
table.ops().commit(base, base.updatePartitionSpec(newSpec));
DataFile newFileC = DataFiles.builder(newSpec)
.copy(FILE_C)
.withPartitionPath("data_bucket=2/id_bucket=3")
.build();
Snapshot pending = table.newAppend()
.appendFile(newFileC)
.apply();
Assert.assertEquals("Should use 2 manifest files",
2, pending.manifests().size());
Assert.assertFalse("First manifest should not be in the new snapshot",
pending.manifests().contains(manifest));
validateManifest(pending.manifests().get(0), ids(pending.snapshotId()), files(newFileC));
validateManifest(pending.manifests().get(1), ids(id2, id1), files(FILE_B, FILE_A));
}
@Test
public void testFailure() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
table.ops().failCommits(5);
AppendFiles append = table.newAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
Assert.assertEquals("Should merge to 1 manifest", 1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
validateManifest(newManifest,
ids(pending.snapshotId(), baseId),
concat(files(FILE_B), files(initialManifest)));
AssertHelpers.assertThrows("Should retry 4 times and throw last failure",
CommitFailedException.class, "Injected failure", append::commit);
Assert.assertFalse("Should clean up new manifest", new File(newManifest.path()).exists());
}
@Test
public void testRecovery() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
table.ops().failCommits(3);
AppendFiles append = table.newAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
Assert.assertEquals("Should merge to 1 manifest", 1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
validateManifest(newManifest,
ids(pending.snapshotId(), baseId),
concat(files(FILE_B), files(initialManifest)));
append.commit();
TableMetadata metadata = readMetadata();
Assert.assertTrue("Should reuse the new manifest", new File(newManifest.path()).exists());
Assert.assertEquals("Should commit the same new manifest during retry",
Lists.newArrayList(newManifest), metadata.currentSnapshot().manifests());
}
}
| 1,903 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestReplaceFiles.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.ValidationException;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.internal.util.collections.Sets;
import java.io.File;
import java.util.Collections;
import static com.netflix.iceberg.ManifestEntry.Status.ADDED;
import static com.netflix.iceberg.ManifestEntry.Status.DELETED;
import static com.netflix.iceberg.ManifestEntry.Status.EXISTING;
public class TestReplaceFiles extends TableTestBase {
@Test
public void testEmptyTable() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
AssertHelpers.assertThrows("Expected an exception",
ValidationException.class,
"Missing required files to delete: /path/to/data-a.parquet",
() -> table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_B))
.commit());
}
@Test
public void testAddOnly() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
AssertHelpers.assertThrows("Expected an exception",
IllegalArgumentException.class,
"Files to add can not be null or empty",
() -> table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_A), Collections.emptySet())
.apply());
}
@Test
public void testDeleteOnly() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
AssertHelpers.assertThrows("Expected an exception",
IllegalArgumentException.class,
"Files to delete cannot be null or empty",
() -> table.newRewrite()
.rewriteFiles(Collections.emptySet(), Sets.newSet(FILE_A))
.apply());
}
@Test
public void testDeleteWithDuplicateEntriesInManifest() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseSnapshotId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
Snapshot pending = table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_C))
.apply();
Assert.assertEquals("Should contain 2 manifest",
2, pending.manifests().size());
Assert.assertFalse("Should not contain manifest from initial write",
pending.manifests().contains(initialManifest));
long pendingId = pending.snapshotId();
validateManifestEntries(pending.manifests().get(0),
ids(pendingId),
files(FILE_C),
statuses(ADDED));
validateManifestEntries(pending.manifests().get(1),
ids(pendingId,pendingId, baseSnapshotId),
files(FILE_A, FILE_A, FILE_B),
statuses(DELETED, DELETED, EXISTING));
// We should only get the 3 manifests that this test is expected to add.
Assert.assertEquals("Only 3 manifests should exist", 3, listManifestFiles().size());
}
@Test
public void testAddAndDelete() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseSnapshotId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
Snapshot pending = table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_C))
.apply();
Assert.assertEquals("Should contain 2 manifest",
2, pending.manifests().size());
Assert.assertFalse("Should not contain manifest from initial write",
pending.manifests().contains(initialManifest));
long pendingId = pending.snapshotId();
validateManifestEntries(pending.manifests().get(0),
ids(pendingId),
files(FILE_C),
statuses(ADDED));
validateManifestEntries(pending.manifests().get(1),
ids(pendingId, baseSnapshotId),
files(FILE_A, FILE_B),
statuses(DELETED, EXISTING));
// We should only get the 3 manifests that this test is expected to add.
Assert.assertEquals("Only 3 manifests should exist", 3, listManifestFiles().size());
}
@Test
public void testFailure() {
table.newAppend()
.appendFile(FILE_A)
.commit();
table.ops().failCommits(5);
RewriteFiles rewrite = table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_B));
Snapshot pending = rewrite.apply();
Assert.assertEquals("Should produce 2 manifests", 2, pending.manifests().size());
ManifestFile manifest1 = pending.manifests().get(0);
ManifestFile manifest2 = pending.manifests().get(1);
validateManifestEntries(manifest1,
ids(pending.snapshotId()), files(FILE_B), statuses(ADDED));
validateManifestEntries(manifest2,
ids(pending.snapshotId()), files(FILE_A), statuses(DELETED));
AssertHelpers.assertThrows("Should retry 4 times and throw last failure",
CommitFailedException.class, "Injected failure", rewrite::commit);
Assert.assertFalse("Should clean up new manifest", new File(manifest1.path()).exists());
Assert.assertFalse("Should clean up new manifest", new File(manifest2.path()).exists());
// As commit failed all the manifests added with rewrite should be cleaned up
Assert.assertEquals("Only 1 manifest should exist", 1, listManifestFiles().size());
}
@Test
public void testRecovery() {
table.newAppend()
.appendFile(FILE_A)
.commit();
table.ops().failCommits(3);
RewriteFiles rewrite = table.newRewrite().rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_B));
Snapshot pending = rewrite.apply();
Assert.assertEquals("Should produce 2 manifests", 2, pending.manifests().size());
ManifestFile manifest1 = pending.manifests().get(0);
ManifestFile manifest2 = pending.manifests().get(1);
validateManifestEntries(manifest1,
ids(pending.snapshotId()), files(FILE_B), statuses(ADDED));
validateManifestEntries(manifest2,
ids(pending.snapshotId()), files(FILE_A), statuses(DELETED));
rewrite.commit();
Assert.assertTrue("Should reuse the manifest for appends", new File(manifest1.path()).exists());
Assert.assertTrue("Should reuse the manifest with deletes", new File(manifest2.path()).exists());
TableMetadata metadata = readMetadata();
Assert.assertTrue("Should commit the manifest for append",
metadata.currentSnapshot().manifests().contains(manifest2));
// 2 manifests added by rewrite and 1 original manifest should be found.
Assert.assertEquals("Only 3 manifests should exist", 3, listManifestFiles().size());
}
@Test
public void testDeleteNonExistentFile() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
AssertHelpers.assertThrows("Expected an exception",
ValidationException.class,
"Missing required files to delete: /path/to/data-c.parquet",
() -> table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_C), Sets.newSet(FILE_D))
.commit());
Assert.assertEquals("Only 1 manifests should exist", 1, listManifestFiles().size());
}
@Test
public void testAlreadyDeletedFile() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
RewriteFiles rewrite = table.newRewrite();
Snapshot pending = rewrite
.rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_B))
.apply();
Assert.assertEquals("Should contain 2 manifest",
2, pending.manifests().size());
long pendingId = pending.snapshotId();
validateManifestEntries(pending.manifests().get(0),
ids(pendingId),
files(FILE_B),
statuses(ADDED));
validateManifestEntries(pending.manifests().get(1),
ids(pendingId, base.currentSnapshot().snapshotId()),
files(FILE_A),
statuses(DELETED));
rewrite.commit();
AssertHelpers.assertThrows("Expected an exception",
ValidationException.class,
"Missing required files to delete: /path/to/data-a.parquet",
() -> table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_D))
.commit());
Assert.assertEquals("Only 3 manifests should exist", 3, listManifestFiles().size());
}
}
| 1,904 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestTableMetadataJson.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.netflix.iceberg.TableMetadata.SnapshotLogEntry;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.JsonUtil;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Random;
import static com.netflix.iceberg.Files.localInput;
import static com.netflix.iceberg.TableMetadataParser.CURRENT_SNAPSHOT_ID;
import static com.netflix.iceberg.TableMetadataParser.FORMAT_VERSION;
import static com.netflix.iceberg.TableMetadataParser.LAST_COLUMN_ID;
import static com.netflix.iceberg.TableMetadataParser.LAST_UPDATED_MILLIS;
import static com.netflix.iceberg.TableMetadataParser.LOCATION;
import static com.netflix.iceberg.TableMetadataParser.PARTITION_SPEC;
import static com.netflix.iceberg.TableMetadataParser.PROPERTIES;
import static com.netflix.iceberg.TableMetadataParser.SCHEMA;
import static com.netflix.iceberg.TableMetadataParser.SNAPSHOTS;
public class TestTableMetadataJson {
@Rule
public TemporaryFolder temp = new TemporaryFolder();
public TableOperations ops = new LocalTableOperations(temp);
@Test
public void testJsonConversion() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(1, "x", Types.LongType.get()),
Types.NestedField.required(2, "y", Types.LongType.get()),
Types.NestedField.required(3, "z", Types.LongType.get())
);
PartitionSpec spec = PartitionSpec.builderFor(schema).withSpecId(5).build();
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
null, previousSnapshotId, null, previousSnapshotId, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), spec.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
null, currentSnapshotId, previousSnapshotId, currentSnapshotId, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), spec.specId())));
List<SnapshotLogEntry> snapshotLog = ImmutableList.<SnapshotLogEntry>builder()
.add(new SnapshotLogEntry(previousSnapshot.timestampMillis(), previousSnapshot.snapshotId()))
.add(new SnapshotLogEntry(currentSnapshot.timestampMillis(), currentSnapshot.snapshotId()))
.build();
TableMetadata expected = new TableMetadata(ops, null, "s3://bucket/test/location",
System.currentTimeMillis(), 3, schema, 5, ImmutableList.of(spec),
ImmutableMap.of("property", "value"), currentSnapshotId,
Arrays.asList(previousSnapshot, currentSnapshot), snapshotLog);
String asJson = TableMetadataParser.toJson(expected);
TableMetadata metadata = TableMetadataParser.fromJson(ops, null,
JsonUtil.mapper().readValue(asJson, JsonNode.class));
Assert.assertEquals("Table location should match",
expected.location(), metadata.location());
Assert.assertEquals("Last column ID should match",
expected.lastColumnId(), metadata.lastColumnId());
Assert.assertEquals("Schema should match",
expected.schema().asStruct(), metadata.schema().asStruct());
Assert.assertEquals("Partition spec should match",
expected.spec().toString(), metadata.spec().toString());
Assert.assertEquals("Default spec ID should match",
expected.defaultSpecId(), metadata.defaultSpecId());
Assert.assertEquals("PartitionSpec map should match",
expected.specs(), metadata.specs());
Assert.assertEquals("Properties should match",
expected.properties(), metadata.properties());
Assert.assertEquals("Snapshot logs should match",
expected.snapshotLog(), metadata.snapshotLog());
Assert.assertEquals("Current snapshot ID should match",
currentSnapshotId, metadata.currentSnapshot().snapshotId());
Assert.assertEquals("Parent snapshot ID should match",
(Long) previousSnapshotId, metadata.currentSnapshot().parentId());
Assert.assertEquals("Current snapshot files should match",
currentSnapshot.manifests(), metadata.currentSnapshot().manifests());
Assert.assertEquals("Previous snapshot ID should match",
previousSnapshotId, metadata.snapshot(previousSnapshotId).snapshotId());
Assert.assertEquals("Previous snapshot files should match",
previousSnapshot.manifests(),
metadata.snapshot(previousSnapshotId).manifests());
}
@Test
public void testFromJsonSortsSnapshotLog() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(1, "x", Types.LongType.get()),
Types.NestedField.required(2, "y", Types.LongType.get()),
Types.NestedField.required(3, "z", Types.LongType.get())
);
PartitionSpec spec = PartitionSpec.builderFor(schema).withSpecId(5).build();
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
ops, previousSnapshotId, null, previousSnapshotId, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), spec.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
ops, currentSnapshotId, previousSnapshotId, currentSnapshotId, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), spec.specId())));
List<SnapshotLogEntry> reversedSnapshotLog = Lists.newArrayList();
TableMetadata expected = new TableMetadata(ops, null, "s3://bucket/test/location",
System.currentTimeMillis(), 3, schema, 5, ImmutableList.of(spec),
ImmutableMap.of("property", "value"), currentSnapshotId,
Arrays.asList(previousSnapshot, currentSnapshot), reversedSnapshotLog);
// add the entries after creating TableMetadata to avoid the sorted check
reversedSnapshotLog.add(
new SnapshotLogEntry(currentSnapshot.timestampMillis(), currentSnapshot.snapshotId()));
reversedSnapshotLog.add(
new SnapshotLogEntry(previousSnapshot.timestampMillis(), previousSnapshot.snapshotId()));
String asJson = TableMetadataParser.toJson(expected);
TableMetadata metadata = TableMetadataParser.fromJson(ops, null,
JsonUtil.mapper().readValue(asJson, JsonNode.class));
List<SnapshotLogEntry> expectedSnapshotLog = ImmutableList.<SnapshotLogEntry>builder()
.add(new SnapshotLogEntry(previousSnapshot.timestampMillis(), previousSnapshot.snapshotId()))
.add(new SnapshotLogEntry(currentSnapshot.timestampMillis(), currentSnapshot.snapshotId()))
.build();
Assert.assertEquals("Snapshot logs should match",
expectedSnapshotLog, metadata.snapshotLog());
}
@Test
public void testBackwardCompatMissingPartitionSpecList() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(1, "x", Types.LongType.get()),
Types.NestedField.required(2, "y", Types.LongType.get()),
Types.NestedField.required(3, "z", Types.LongType.get())
);
PartitionSpec spec = PartitionSpec.builderFor(schema).identity("x").withSpecId(6).build();
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
ops, previousSnapshotId, null, previousSnapshotId, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), spec.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
ops, currentSnapshotId, previousSnapshotId, currentSnapshotId, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), spec.specId())));
TableMetadata expected = new TableMetadata(ops, null, "s3://bucket/test/location",
System.currentTimeMillis(), 3, schema, 6, ImmutableList.of(spec),
ImmutableMap.of("property", "value"), currentSnapshotId,
Arrays.asList(previousSnapshot, currentSnapshot), ImmutableList.of());
String asJson = toJsonWithoutSpecList(expected);
TableMetadata metadata = TableMetadataParser.fromJson(ops, null,
JsonUtil.mapper().readValue(asJson, JsonNode.class));
Assert.assertEquals("Table location should match",
expected.location(), metadata.location());
Assert.assertEquals("Last column ID should match",
expected.lastColumnId(), metadata.lastColumnId());
Assert.assertEquals("Schema should match",
expected.schema().asStruct(), metadata.schema().asStruct());
Assert.assertEquals("Partition spec should be the default",
expected.spec().toString(), metadata.spec().toString());
Assert.assertEquals("Default spec ID should default to TableMetadata.INITIAL_SPEC_ID",
TableMetadata.INITIAL_SPEC_ID, metadata.defaultSpecId());
Assert.assertEquals("PartitionSpec should contain the spec",
1, metadata.specs().size());
Assert.assertTrue("PartitionSpec should contain the spec",
metadata.specs().get(0).compatibleWith(spec));
Assert.assertEquals("PartitionSpec should have ID TableMetadata.INITIAL_SPEC_ID",
TableMetadata.INITIAL_SPEC_ID, metadata.specs().get(0).specId());
Assert.assertEquals("Properties should match",
expected.properties(), metadata.properties());
Assert.assertEquals("Snapshot logs should match",
expected.snapshotLog(), metadata.snapshotLog());
Assert.assertEquals("Current snapshot ID should match",
currentSnapshotId, metadata.currentSnapshot().snapshotId());
Assert.assertEquals("Parent snapshot ID should match",
(Long) previousSnapshotId, metadata.currentSnapshot().parentId());
Assert.assertEquals("Current snapshot files should match",
currentSnapshot.manifests(), metadata.currentSnapshot().manifests());
Assert.assertEquals("Previous snapshot ID should match",
previousSnapshotId, metadata.snapshot(previousSnapshotId).snapshotId());
Assert.assertEquals("Previous snapshot files should match",
previousSnapshot.manifests(),
metadata.snapshot(previousSnapshotId).manifests());
}
public static String toJsonWithoutSpecList(TableMetadata metadata) {
StringWriter writer = new StringWriter();
try {
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
generator.writeStartObject(); // start table metadata object
generator.writeNumberField(FORMAT_VERSION, TableMetadata.TABLE_FORMAT_VERSION);
generator.writeStringField(LOCATION, metadata.location());
generator.writeNumberField(LAST_UPDATED_MILLIS, metadata.lastUpdatedMillis());
generator.writeNumberField(LAST_COLUMN_ID, metadata.lastColumnId());
generator.writeFieldName(SCHEMA);
SchemaParser.toJson(metadata.schema(), generator);
// mimic an old writer by writing only partition-spec and not the default ID or spec list
generator.writeFieldName(PARTITION_SPEC);
PartitionSpecParser.toJsonFields(metadata.spec(), generator);
generator.writeObjectFieldStart(PROPERTIES);
for (Map.Entry<String, String> keyValue : metadata.properties().entrySet()) {
generator.writeStringField(keyValue.getKey(), keyValue.getValue());
}
generator.writeEndObject();
generator.writeNumberField(CURRENT_SNAPSHOT_ID,
metadata.currentSnapshot() != null ? metadata.currentSnapshot().snapshotId() : -1);
generator.writeArrayFieldStart(SNAPSHOTS);
for (Snapshot snapshot : metadata.snapshots()) {
SnapshotParser.toJson(snapshot, generator);
}
generator.writeEndArray();
// skip the snapshot log
generator.writeEndObject(); // end table metadata object
generator.flush();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write json for: %s", metadata);
}
return writer.toString();
}
}
| 1,905 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TableMetadataParserTest.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.types.Types.BooleanType;
import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
import org.apache.hadoop.conf.Configuration;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Paths;
import static avro.shaded.com.google.common.collect.Lists.newArrayList;
import static com.netflix.iceberg.ConfigProperties.COMPRESS_METADATA;
import static com.netflix.iceberg.PartitionSpec.unpartitioned;
import static com.netflix.iceberg.TableMetadata.newTableMetadata;
import static com.netflix.iceberg.TableMetadataParser.getFileExtension;
import static com.netflix.iceberg.types.Types.NestedField.optional;
public class TableMetadataParserTest {
private final Schema SCHEMA = new Schema(newArrayList(optional(1, "b", BooleanType.get())));
private final TableMetadata EXPECTED = newTableMetadata(null, SCHEMA, unpartitioned(), "file://tmp/db/table");
@Test
public void testCompressionProperty() throws IOException {
final boolean[] props = {true, false};
final Configuration configuration = new Configuration();
for (boolean prop : props) {
configuration.setBoolean(COMPRESS_METADATA, prop);
final OutputFile outputFile = Files.localOutput(getFileExtension(configuration));
TableMetadataParser.write(EXPECTED, outputFile);
Assert.assertEquals(prop, isCompressed(getFileExtension(configuration)));
final TableMetadata read = TableMetadataParser.read(null, Files.localInput(new File(getFileExtension(configuration))));
verifyMetadata(read);
}
}
@After
public void cleanup() throws IOException {
final boolean[] props = {true, false};
Configuration configuration = new Configuration();
for (boolean prop : props) {
configuration.setBoolean(COMPRESS_METADATA, prop);
java.nio.file.Files.deleteIfExists(Paths.get(getFileExtension(configuration)));
}
}
private void verifyMetadata(TableMetadata read) {
Assert.assertEquals(EXPECTED.schema().asStruct(), read.schema().asStruct());
Assert.assertEquals(EXPECTED.location(), read.location());
Assert.assertEquals(EXPECTED.lastColumnId(), read.lastColumnId());
Assert.assertEquals(EXPECTED.properties(), read.properties());
}
private boolean isCompressed(String path) throws IOException {
try (InputStream ignored = new GzipCompressorInputStream(new FileInputStream(new File(path)))) {
return true;
} catch (IOException e) {
if (e.getMessage().equals("Input is not in the .gz format"))
return false;
else
throw e;
}
}
}
| 1,906 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestReplacePartitions.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.ManifestEntry.Status;
import com.netflix.iceberg.exceptions.ValidationException;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
public class TestReplacePartitions extends TableTestBase {
static final DataFile FILE_E = DataFiles.builder(SPEC)
.withPath("/path/to/data-e.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=0") // same partition as FILE_A
.withRecordCount(0)
.build();
static final DataFile FILE_F = DataFiles.builder(SPEC)
.withPath("/path/to/data-f.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=1") // same partition as FILE_B
.withRecordCount(0)
.build();
static final DataFile FILE_G = DataFiles.builder(SPEC)
.withPath("/path/to/data-g.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=10") // no other partition
.withRecordCount(0)
.build();
@Test
public void testReplaceOnePartition() {
table.newFastAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
table.newReplacePartitions()
.addFile(FILE_E)
.commit();
long replaceId = readMetadata().currentSnapshot().snapshotId();
Assert.assertNotEquals("Should create a new snapshot", baseId, replaceId);
Assert.assertEquals("Table should have 2 manifests",
2, table.currentSnapshot().manifests().size());
// manifest is not merged because it is less than the minimum
validateManifestEntries(table.currentSnapshot().manifests().get(0),
ids(replaceId),
files(FILE_E),
statuses(Status.ADDED));
validateManifestEntries(table.currentSnapshot().manifests().get(1),
ids(replaceId, baseId),
files(FILE_A, FILE_B),
statuses(Status.DELETED, Status.EXISTING));
}
@Test
public void testReplaceAndMergeOnePartition() {
// ensure the overwrite results in a merge
table.updateProperties().set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "1").commit();
table.newFastAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
table.newReplacePartitions()
.addFile(FILE_E)
.commit();
long replaceId = readMetadata().currentSnapshot().snapshotId();
Assert.assertNotEquals("Should create a new snapshot", baseId, replaceId);
Assert.assertEquals("Table should have 1 manifest",
1, table.currentSnapshot().manifests().size());
validateManifestEntries(table.currentSnapshot().manifests().get(0),
ids(replaceId, replaceId, baseId),
files(FILE_E, FILE_A, FILE_B),
statuses(Status.ADDED, Status.DELETED, Status.EXISTING));
}
@Test
public void testReplaceWithUnpartitionedTable() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Table unpartitioned = TestTables.create(
tableDir, "unpartitioned", SCHEMA, PartitionSpec.unpartitioned());
Assert.assertEquals("Table version should be 0",
0, (long) TestTables.metadataVersion("unpartitioned"));
unpartitioned.newAppend()
.appendFile(FILE_A)
.commit();
// make sure the data was successfully added
Assert.assertEquals("Table version should be 1",
1, (long) TestTables.metadataVersion("unpartitioned"));
validateSnapshot(null, TestTables.readMetadata("unpartitioned").currentSnapshot(), FILE_A);
unpartitioned.newReplacePartitions()
.addFile(FILE_B)
.commit();
Assert.assertEquals("Table version should be 2",
2, (long) TestTables.metadataVersion("unpartitioned"));
TableMetadata replaceMetadata = TestTables.readMetadata("unpartitioned");
long replaceId = replaceMetadata.currentSnapshot().snapshotId();
Assert.assertEquals("Table should have 2 manifests",
2, replaceMetadata.currentSnapshot().manifests().size());
validateManifestEntries(replaceMetadata.currentSnapshot().manifests().get(0),
ids(replaceId), files(FILE_B), statuses(Status.ADDED));
validateManifestEntries(replaceMetadata.currentSnapshot().manifests().get(1),
ids(replaceId), files(FILE_A), statuses(Status.DELETED));
}
@Test
public void testReplaceAndMergeWithUnpartitionedTable() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Table unpartitioned = TestTables.create(
tableDir, "unpartitioned", SCHEMA, PartitionSpec.unpartitioned());
// ensure the overwrite results in a merge
unpartitioned.updateProperties().set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "1").commit();
Assert.assertEquals("Table version should be 1",
1, (long) TestTables.metadataVersion("unpartitioned"));
unpartitioned.newAppend()
.appendFile(FILE_A)
.commit();
// make sure the data was successfully added
Assert.assertEquals("Table version should be 2",
2, (long) TestTables.metadataVersion("unpartitioned"));
validateSnapshot(null, TestTables.readMetadata("unpartitioned").currentSnapshot(), FILE_A);
unpartitioned.newReplacePartitions()
.addFile(FILE_B)
.commit();
Assert.assertEquals("Table version should be 3",
3, (long) TestTables.metadataVersion("unpartitioned"));
TableMetadata replaceMetadata = TestTables.readMetadata("unpartitioned");
long replaceId = replaceMetadata.currentSnapshot().snapshotId();
Assert.assertEquals("Table should have 1 manifest",
1, replaceMetadata.currentSnapshot().manifests().size());
validateManifestEntries(replaceMetadata.currentSnapshot().manifests().get(0),
ids(replaceId, replaceId), files(FILE_B, FILE_A), statuses(Status.ADDED, Status.DELETED));
}
@Test
public void testValidationFailure() {
table.newFastAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
ReplacePartitions replace = table.newReplacePartitions()
.addFile(FILE_F)
.addFile(FILE_G)
.validateAppendOnly();
AssertHelpers.assertThrows("Should reject commit with file not matching delete expression",
ValidationException.class, "Cannot commit file that conflicts with existing partition",
replace::commit);
Assert.assertEquals("Should not create a new snapshot",
baseId, readMetadata().currentSnapshot().snapshotId());
}
@Test
public void testValidationSuccess() {
table.newFastAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
table.newReplacePartitions()
.addFile(FILE_G)
.validateAppendOnly()
.commit();
long replaceId = readMetadata().currentSnapshot().snapshotId();
Assert.assertNotEquals("Should create a new snapshot", baseId, replaceId);
Assert.assertEquals("Table should have 2 manifests",
2, table.currentSnapshot().manifests().size());
// manifest is not merged because it is less than the minimum
validateManifestEntries(table.currentSnapshot().manifests().get(0),
ids(replaceId),
files(FILE_G),
statuses(Status.ADDED));
validateManifestEntries(table.currentSnapshot().manifests().get(1),
ids(baseId, baseId),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
}
}
| 1,907 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestScanSummary.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableList;
import com.netflix.iceberg.util.Pair;
import org.junit.Assert;
import org.junit.Test;
import static com.netflix.iceberg.ScanSummary.timestampRange;
import static com.netflix.iceberg.ScanSummary.toMillis;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.greaterThan;
import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.lessThan;
import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual;
public class TestScanSummary {
@Test
public void testTimestampRanges() {
long lower = 1542750188523L;
long upper = 1542750695131L;
Assert.assertEquals("Should use inclusive bound",
Pair.of(Long.MIN_VALUE, upper),
timestampRange(ImmutableList.of(lessThanOrEqual("ts_ms", upper))));
Assert.assertEquals("Should use lower value for upper bound",
Pair.of(Long.MIN_VALUE, upper),
timestampRange(ImmutableList.of(
lessThanOrEqual("ts_ms", upper + 918234),
lessThanOrEqual("ts_ms", upper))));
Assert.assertEquals("Should make upper bound inclusive",
Pair.of(Long.MIN_VALUE, upper - 1),
timestampRange(ImmutableList.of(lessThan("ts_ms", upper))));
Assert.assertEquals("Should use inclusive bound",
Pair.of(lower, Long.MAX_VALUE),
timestampRange(ImmutableList.of(greaterThanOrEqual("ts_ms", lower))));
Assert.assertEquals("Should use upper value for lower bound",
Pair.of(lower, Long.MAX_VALUE),
timestampRange(ImmutableList.of(
greaterThanOrEqual("ts_ms", lower - 918234),
greaterThanOrEqual("ts_ms", lower))));
Assert.assertEquals("Should make lower bound inclusive",
Pair.of(lower + 1, Long.MAX_VALUE),
timestampRange(ImmutableList.of(greaterThan("ts_ms", lower))));
Assert.assertEquals("Should set both bounds for equals",
Pair.of(lower, lower),
timestampRange(ImmutableList.of(equal("ts_ms", lower))));
Assert.assertEquals("Should set both bounds",
Pair.of(lower, upper - 1),
timestampRange(ImmutableList.of(
greaterThanOrEqual("ts_ms", lower),
lessThan("ts_ms", upper))));
// >= lower and < lower is an empty range
AssertHelpers.assertThrows("Should reject empty ranges",
IllegalArgumentException.class, "No timestamps can match filters",
() -> timestampRange(ImmutableList.of(
greaterThanOrEqual("ts_ms", lower),
lessThan("ts_ms", lower))));
}
@Test
public void testToMillis() {
long millis = 1542750947417L;
Assert.assertEquals(1542750947000L, toMillis(millis / 1000));
Assert.assertEquals(1542750947417L, toMillis(millis));
Assert.assertEquals(1542750947417L, toMillis(millis * 1000 + 918));
}
}
| 1,908 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestSnapshotJson.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableList;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.io.IOException;
import java.util.List;
import static com.netflix.iceberg.Files.localInput;
public class TestSnapshotJson {
@Rule
public TemporaryFolder temp = new TemporaryFolder();
public TableOperations ops = new LocalTableOperations(temp);
@Test
public void testJsonConversion() {
Snapshot expected = new BaseSnapshot(ops, System.currentTimeMillis(),
"file:/tmp/manifest1.avro", "file:/tmp/manifest2.avro");
String json = SnapshotParser.toJson(expected);
Snapshot snapshot = SnapshotParser.fromJson(ops, json);
Assert.assertEquals("Snapshot ID should match",
expected.snapshotId(), snapshot.snapshotId());
Assert.assertEquals("Files should match",
expected.manifests(), snapshot.manifests());
}
@Test
public void testJsonConversionWithManifestList() throws IOException {
long parentId = 1;
long id = 2;
List<ManifestFile> manifests = ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manifest1.avro"), 0),
new GenericManifestFile(localInput("file:/tmp/manifest2.avro"), 0));
File manifestList = temp.newFile("manifests");
Assert.assertTrue(manifestList.delete());
manifestList.deleteOnExit();
try (ManifestListWriter writer = new ManifestListWriter(
Files.localOutput(manifestList), id, parentId)) {
writer.addAll(manifests);
}
Snapshot expected = new BaseSnapshot(
ops, id, parentId, System.currentTimeMillis(), localInput(manifestList));
Snapshot inMemory = new BaseSnapshot(
ops, id, parentId, expected.timestampMillis(), manifests);
Assert.assertEquals("Files should match in memory list",
inMemory.manifests(), expected.manifests());
String json = SnapshotParser.toJson(expected);
Snapshot snapshot = SnapshotParser.fromJson(ops, json);
Assert.assertEquals("Snapshot ID should match",
expected.snapshotId(), snapshot.snapshotId());
Assert.assertEquals("Timestamp should match",
expected.timestampMillis(), snapshot.timestampMillis());
Assert.assertEquals("Parent ID should match",
expected.parentId(), snapshot.parentId());
Assert.assertEquals("Manifest list should match",
expected.manifestListLocation(), snapshot.manifestListLocation());
Assert.assertEquals("Files should match",
expected.manifests(), snapshot.manifests());
}
}
| 1,909 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestTransaction.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Sets;
import com.netflix.iceberg.ManifestEntry.Status;
import com.netflix.iceberg.exceptions.CommitFailedException;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.util.Set;
public class TestTransaction extends TableTestBase {
@Test
public void testEmptyTransaction() {
Assert.assertEquals("Table should be on version 0", 0, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
t.commitTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0", 0, (int) version());
}
@Test
public void testSingleOperationTransaction() {
Assert.assertEquals("Table should be on version 0", 0, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertSame("Base metadata should not change when an append is committed",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after append", 0, (int) version());
t.commitTransaction();
validateSnapshot(base.currentSnapshot(), readMetadata().currentSnapshot(), FILE_A, FILE_B);
Assert.assertEquals("Table should be on version 1 after commit", 1, (int) version());
}
@Test
public void testMultipleOperationTransaction() {
Assert.assertEquals("Table should be on version 0", 0, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
Snapshot appendSnapshot = t.table().currentSnapshot();
t.newDelete()
.deleteFile(FILE_A)
.commit();
Snapshot deleteSnapshot = t.table().currentSnapshot();
Assert.assertSame("Base metadata should not change when an append is committed",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after append", 0, (int) version());
t.commitTransaction();
Assert.assertEquals("Table should be on version 1 after commit", 1, (int) version());
Assert.assertEquals("Table should have one manifest after commit",
1, readMetadata().currentSnapshot().manifests().size());
Assert.assertEquals("Table snapshot should be the delete snapshot",
deleteSnapshot, readMetadata().currentSnapshot());
validateManifestEntries(readMetadata().currentSnapshot().manifests().get(0),
ids(deleteSnapshot.snapshotId(), appendSnapshot.snapshotId()),
files(FILE_A, FILE_B), statuses(Status.DELETED, Status.EXISTING));
Assert.assertEquals("Table should have a snapshot for each operation",
2, readMetadata().snapshots().size());
validateManifestEntries(readMetadata().snapshots().get(0).manifests().get(0),
ids(appendSnapshot.snapshotId(), appendSnapshot.snapshotId()),
files(FILE_A, FILE_B), statuses(Status.ADDED, Status.ADDED));
}
@Test
public void testMultipleOperationTransactionFromTable() {
Assert.assertEquals("Table should be on version 0", 0, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
Snapshot appendSnapshot = t.table().currentSnapshot();
t.table().newDelete()
.deleteFile(FILE_A)
.commit();
Snapshot deleteSnapshot = t.table().currentSnapshot();
Assert.assertSame("Base metadata should not change when an append is committed",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after append", 0, (int) version());
t.commitTransaction();
Assert.assertEquals("Table should be on version 1 after commit", 1, (int) version());
Assert.assertEquals("Table should have one manifest after commit",
1, readMetadata().currentSnapshot().manifests().size());
Assert.assertEquals("Table snapshot should be the delete snapshot",
deleteSnapshot, readMetadata().currentSnapshot());
validateManifestEntries(readMetadata().currentSnapshot().manifests().get(0),
ids(deleteSnapshot.snapshotId(), appendSnapshot.snapshotId()),
files(FILE_A, FILE_B), statuses(Status.DELETED, Status.EXISTING));
Assert.assertEquals("Table should have a snapshot for each operation",
2, readMetadata().snapshots().size());
validateManifestEntries(readMetadata().snapshots().get(0).manifests().get(0),
ids(appendSnapshot.snapshotId(), appendSnapshot.snapshotId()),
files(FILE_A, FILE_B), statuses(Status.ADDED, Status.ADDED));
}
@Test
public void testDetectsUncommittedChange() {
Assert.assertEquals("Table should be on version 0", 0, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
t.newAppend().appendFile(FILE_A).appendFile(FILE_B); // not committed
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
AssertHelpers.assertThrows("Should reject commit when last operation has not committed",
IllegalStateException.class,
"Cannot create new DeleteFiles: last operation has not committed",
t::newDelete);
}
@Test
public void testDetectsUncommittedChangeOnCommit() {
Assert.assertEquals("Table should be on version 0", 0, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
t.newAppend().appendFile(FILE_A).appendFile(FILE_B); // not committed
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
AssertHelpers.assertThrows("Should reject commit when last operation has not committed",
IllegalStateException.class,
"Cannot commit transaction: last operation has not committed",
t::commitTransaction);
}
@Test
public void testTransactionConflict() {
// set retries to 0 to catch the failure
table.updateProperties()
.set(TableProperties.COMMIT_NUM_RETRIES, "0")
.commit();
Assert.assertEquals("Table should be on version 1", 1, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after txn create", 1, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after append", 1, (int) version());
// cause the transaction commit to fail
table.ops().failCommits(1);
AssertHelpers.assertThrows("Transaction commit should fail",
CommitFailedException.class, "Injected failure", t::commitTransaction);
}
@Test
public void testTransactionRetry() {
// use only one retry
table.updateProperties()
.set(TableProperties.COMMIT_NUM_RETRIES, "1")
.commit();
Assert.assertEquals("Table should be on version 1", 1, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after txn create", 1, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Set<ManifestFile> appendManifests = Sets.newHashSet(t.table().currentSnapshot().manifests());
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after append", 1, (int) version());
// cause the transaction commit to fail
table.ops().failCommits(1);
t.commitTransaction();
Assert.assertEquals("Table should be on version 2 after commit", 2, (int) version());
Assert.assertEquals("Should reuse manifests from initial append commit",
appendManifests, Sets.newHashSet(table.currentSnapshot().manifests()));
}
@Test
public void testTransactionRetryMergeAppend() {
// use only one retry
table.updateProperties()
.set(TableProperties.COMMIT_NUM_RETRIES, "1")
.commit();
Assert.assertEquals("Table should be on version 1", 1, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after txn create", 1, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Set<ManifestFile> appendManifests = Sets.newHashSet(t.table().currentSnapshot().manifests());
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after append", 1, (int) version());
// cause the transaction commit to fail
table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
Assert.assertEquals("Table should be on version 2 after real append", 2, (int) version());
Set<ManifestFile> conflictAppendManifests = Sets.newHashSet(table.currentSnapshot().manifests());
t.commitTransaction();
Assert.assertEquals("Table should be on version 3 after commit", 3, (int) version());
Set<ManifestFile> expectedManifests = Sets.newHashSet();
expectedManifests.addAll(appendManifests);
expectedManifests.addAll(conflictAppendManifests);
Assert.assertEquals("Should reuse manifests from initial append commit and conflicting append",
expectedManifests, Sets.newHashSet(table.currentSnapshot().manifests()));
}
@Test
public void testMultipleUpdateTransactionRetryMergeCleanup() {
// use only one retry and aggressively merge manifests
table.updateProperties()
.set(TableProperties.COMMIT_NUM_RETRIES, "1")
.set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "0")
.commit();
Assert.assertEquals("Table should be on version 1", 1, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after txn create", 1, (int) version());
t.updateProperties()
.set("test-property", "test-value")
.commit();
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertEquals("Append should create one manifest",
1, t.table().currentSnapshot().manifests().size());
ManifestFile appendManifest = t.table().currentSnapshot().manifests().get(0);
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after append", 1, (int) version());
// cause the transaction commit to fail
table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
Assert.assertEquals("Table should be on version 2 after real append", 2, (int) version());
Set<ManifestFile> conflictAppendManifests = Sets.newHashSet(table.currentSnapshot().manifests());
t.commitTransaction();
Assert.assertEquals("Table should be on version 3 after commit", 3, (int) version());
Set<ManifestFile> previousManifests = Sets.newHashSet();
previousManifests.add(appendManifest);
previousManifests.addAll(conflictAppendManifests);
Assert.assertEquals("Should merge both commit manifests into a single manifest",
1, table.currentSnapshot().manifests().size());
Assert.assertFalse("Should merge both commit manifests into a new manifest",
previousManifests.contains(table.currentSnapshot().manifests().get(0)));
Assert.assertFalse("Append manifest should be deleted", new File(appendManifest.path()).exists());
}
@Test
public void testTransactionRetryMergeCleanup() {
// use only one retry and aggressively merge manifests
table.updateProperties()
.set(TableProperties.COMMIT_NUM_RETRIES, "1")
.set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "0")
.commit();
Assert.assertEquals("Table should be on version 1", 1, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after txn create", 1, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertEquals("Append should create one manifest",
1, t.table().currentSnapshot().manifests().size());
ManifestFile appendManifest = t.table().currentSnapshot().manifests().get(0);
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after append", 1, (int) version());
// cause the transaction commit to fail
table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
Assert.assertEquals("Table should be on version 2 after real append", 2, (int) version());
Set<ManifestFile> conflictAppendManifests = Sets.newHashSet(table.currentSnapshot().manifests());
t.commitTransaction();
Assert.assertEquals("Table should be on version 3 after commit", 3, (int) version());
Set<ManifestFile> previousManifests = Sets.newHashSet();
previousManifests.add(appendManifest);
previousManifests.addAll(conflictAppendManifests);
Assert.assertEquals("Should merge both commit manifests into a single manifest",
1, table.currentSnapshot().manifests().size());
Assert.assertFalse("Should merge both commit manifests into a new manifest",
previousManifests.contains(table.currentSnapshot().manifests().get(0)));
Assert.assertFalse("Append manifest should be deleted", new File(appendManifest.path()).exists());
}
}
| 1,910 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestTables.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.netflix.iceberg.exceptions.AlreadyExistsException;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import java.io.File;
import java.util.Map;
import static com.netflix.iceberg.TableMetadata.newTableMetadata;
public class TestTables {
static TestTable create(File temp, String name, Schema schema, PartitionSpec spec) {
TestTableOperations ops = new TestTableOperations(name, temp);
if (ops.current() != null) {
throw new AlreadyExistsException("Table %s already exists at location: %s", name, temp);
}
ops.commit(null, TableMetadata.newTableMetadata(ops, schema, spec, temp.toString()));
return new TestTable(ops, name);
}
static Transaction beginCreate(File temp, String name, Schema schema, PartitionSpec spec) {
TableOperations ops = new TestTableOperations(name, temp);
if (ops.current() != null) {
throw new AlreadyExistsException("Table %s already exists at location: %s", name, temp);
}
TableMetadata metadata = TableMetadata.newTableMetadata(ops, schema, spec, temp.toString());
return BaseTransaction.createTableTransaction(ops, metadata);
}
public static Transaction beginReplace(File temp, String name, Schema schema, PartitionSpec spec) {
return beginReplace(temp, name, schema, spec, ImmutableMap.of());
}
public static Transaction beginReplace(File temp, String name, Schema schema, PartitionSpec spec,
Map<String, String> properties) {
TestTableOperations ops = new TestTableOperations(name, temp);
TableMetadata current = ops.current();
TableMetadata metadata;
if (current != null) {
metadata = current.buildReplacement(schema, spec, properties);
return BaseTransaction.replaceTableTransaction(ops, metadata);
} else {
metadata = newTableMetadata(ops, schema, spec, temp.toString(), properties);
return BaseTransaction.createTableTransaction(ops, metadata);
}
}
static TestTable load(File temp, String name) {
TestTableOperations ops = new TestTableOperations(name, temp);
return new TestTable(ops, name);
}
static class TestTable extends BaseTable {
private final TestTableOperations ops;
private TestTable(TestTableOperations ops, String name) {
super(ops, name);
this.ops = ops;
}
TestTableOperations ops() {
return ops;
}
}
private static final Map<String, TableMetadata> METADATA = Maps.newHashMap();
private static final Map<String, Integer> VERSIONS = Maps.newHashMap();
static void clearTables() {
synchronized (METADATA) {
METADATA.clear();
VERSIONS.clear();
}
}
static TableMetadata readMetadata(String tableName) {
synchronized (METADATA) {
return METADATA.get(tableName);
}
}
static Integer metadataVersion(String tableName) {
synchronized (METADATA) {
return VERSIONS.get(tableName);
}
}
public static class TestTableOperations implements TableOperations {
private final String tableName;
private final File metadata;
private TableMetadata current = null;
private long lastSnapshotId = 0;
private int failCommits = 0;
public TestTableOperations(String tableName, File location) {
this.tableName = tableName;
this.metadata = new File(location, "metadata");
metadata.mkdirs();
refresh();
if (current != null) {
for (Snapshot snap : current.snapshots()) {
this.lastSnapshotId = Math.max(lastSnapshotId, snap.snapshotId());
}
} else {
this.lastSnapshotId = 0;
}
}
void failCommits(int numFailures) {
this.failCommits = numFailures;
}
@Override
public TableMetadata current() {
return current;
}
@Override
public TableMetadata refresh() {
synchronized (METADATA) {
this.current = METADATA.get(tableName);
}
return current;
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
if (base != current) {
throw new CommitFailedException("Cannot commit changes based on stale metadata");
}
synchronized (METADATA) {
refresh();
if (base == current) {
if (failCommits > 0) {
this.failCommits -= 1;
throw new CommitFailedException("Injected failure");
}
Integer version = VERSIONS.get(tableName);
VERSIONS.put(tableName, version == null ? 0 : version + 1);
METADATA.put(tableName, metadata);
this.current = metadata;
} else {
throw new CommitFailedException(
"Commit failed: table was updated at %d", current.lastUpdatedMillis());
}
}
}
@Override
public FileIO io() {
return new LocalFileIO();
}
@Override
public String metadataFileLocation(String fileName) {
return new File(metadata, fileName).getAbsolutePath();
}
@Override
public long newSnapshotId() {
long nextSnapshotId = lastSnapshotId + 1;
this.lastSnapshotId = nextSnapshotId;
return nextSnapshotId;
}
}
static class LocalFileIO implements FileIO {
@Override
public InputFile newInputFile(String path) {
return Files.localInput(path);
}
@Override
public OutputFile newOutputFile(String path) {
return Files.localOutput(path);
}
@Override
public void deleteFile(String path) {
if (!new File(path).delete()) {
throw new RuntimeIOException("Failed to delete file: " + path);
}
}
}
}
| 1,911 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestDeleteFiles.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.ManifestEntry.Status;
import org.junit.Assert;
import org.junit.Test;
public class TestDeleteFiles extends TableTestBase {
@Test
public void testMultipleDeletes() {
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.appendFile(FILE_C)
.commit();
Assert.assertEquals("Metadata should be at version 1", 1L, (long) version());
Snapshot append = readMetadata().currentSnapshot();
validateSnapshot(null, append, FILE_A, FILE_B, FILE_C);
table.newDelete()
.deleteFile(FILE_A)
.commit();
Assert.assertEquals("Metadata should be at version 2", 2L, (long) version());
Snapshot delete = readMetadata().currentSnapshot();
Assert.assertEquals("Should have 1 manifest", 1, delete.manifests().size());
validateManifestEntries(delete.manifests().get(0),
ids(delete.snapshotId(), append.snapshotId(), append.snapshotId()),
files(FILE_A, FILE_B, FILE_C),
statuses(Status.DELETED, Status.EXISTING, Status.EXISTING));
table.newDelete()
.deleteFile(FILE_B)
.commit();
Assert.assertEquals("Metadata should be at version 3", 3L, (long) version());
Snapshot delete2 = readMetadata().currentSnapshot();
Assert.assertEquals("Should have 1 manifest", 1, delete2.manifests().size());
validateManifestEntries(delete2.manifests().get(0),
ids(delete2.snapshotId(), append.snapshotId()),
files(FILE_B, FILE_C),
statuses(Status.DELETED, Status.EXISTING));
}
}
| 1,912 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestFastAppend.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.CommitFailedException;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.util.List;
import java.util.Set;
public class TestFastAppend extends TableTestBase {
@Test
public void testEmptyTableAppend() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
Snapshot pending = table.newFastAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.apply();
validateSnapshot(base.currentSnapshot(), pending, FILE_A, FILE_B);
}
@Test
public void testNonEmptyTableAppend() {
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
Assert.assertNotNull("Should have a current snapshot", base.currentSnapshot());
List<ManifestFile> v2manifests = base.currentSnapshot().manifests();
Assert.assertEquals("Should have one existing manifest", 1, v2manifests.size());
// prepare a new append
Snapshot pending = table.newFastAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Assert.assertNotEquals("Snapshots should have unique IDs",
base.currentSnapshot().snapshotId(), pending.snapshotId());
validateSnapshot(base.currentSnapshot(), pending, FILE_C, FILE_D);
}
@Test
public void testNoMerge() {
table.newAppend()
.appendFile(FILE_A)
.commit();
table.newFastAppend()
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
Assert.assertNotNull("Should have a current snapshot", base.currentSnapshot());
List<ManifestFile> v3manifests = base.currentSnapshot().manifests();
Assert.assertEquals("Should have 2 existing manifests", 2, v3manifests.size());
// prepare a new append
Snapshot pending = table.newFastAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Set<Long> ids = Sets.newHashSet();
for (Snapshot snapshot : base.snapshots()) {
ids.add(snapshot.snapshotId());
}
ids.add(pending.snapshotId());
Assert.assertEquals("Snapshots should have 3 unique IDs", 3, ids.size());
validateSnapshot(base.currentSnapshot(), pending, FILE_C, FILE_D);
}
@Test
public void testRefreshBeforeApply() {
// load a new copy of the table that will not be refreshed by the commit
Table stale = load();
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
Assert.assertNotNull("Should have a current snapshot", base.currentSnapshot());
List<ManifestFile> v2manifests = base.currentSnapshot().manifests();
Assert.assertEquals("Should have 1 existing manifest", 1, v2manifests.size());
// commit from the stale table
AppendFiles append = stale.newFastAppend()
.appendFile(FILE_D);
Snapshot pending = append.apply();
// table should have been refreshed before applying the changes
validateSnapshot(base.currentSnapshot(), pending, FILE_D);
}
@Test
public void testRefreshBeforeCommit() {
// commit from the stale table
AppendFiles append = table.newFastAppend()
.appendFile(FILE_D);
Snapshot pending = append.apply();
validateSnapshot(null, pending, FILE_D);
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
Assert.assertNotNull("Should have a current snapshot", base.currentSnapshot());
List<ManifestFile> v2manifests = base.currentSnapshot().manifests();
Assert.assertEquals("Should have 1 existing manifest", 1, v2manifests.size());
append.commit();
TableMetadata committed = readMetadata();
// apply was called before the conflicting commit, but the commit was still consistent
validateSnapshot(base.currentSnapshot(), committed.currentSnapshot(), FILE_D);
List<ManifestFile> committedManifests = Lists.newArrayList(committed.currentSnapshot().manifests());
committedManifests.removeAll(base.currentSnapshot().manifests());
Assert.assertEquals("Should reused manifest created by apply",
pending.manifests().get(0), committedManifests.get(0));
}
@Test
public void testFailure() {
// inject 5 failures
TestTables.TestTableOperations ops = table.ops();
ops.failCommits(5);
AppendFiles append = table.newFastAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
AssertHelpers.assertThrows("Should retry 4 times and throw last failure",
CommitFailedException.class, "Injected failure", append::commit);
Assert.assertFalse("Should clean up new manifest", new File(newManifest.path()).exists());
}
@Test
public void testRecoveryWithManifestList() {
table.updateProperties().set(TableProperties.MANIFEST_LISTS_ENABLED, "true").commit();
// inject 3 failures, the last try will succeed
TestTables.TestTableOperations ops = table.ops();
ops.failCommits(3);
AppendFiles append = table.newFastAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
append.commit();
TableMetadata metadata = readMetadata();
validateSnapshot(null, metadata.currentSnapshot(), FILE_B);
Assert.assertTrue("Should commit same new manifest", new File(newManifest.path()).exists());
Assert.assertTrue("Should commit the same new manifest",
metadata.currentSnapshot().manifests().contains(newManifest));
}
@Test
public void testRecoveryWithoutManifestList() {
table.updateProperties().set(TableProperties.MANIFEST_LISTS_ENABLED, "false").commit();
// inject 3 failures, the last try will succeed
TestTables.TestTableOperations ops = table.ops();
ops.failCommits(3);
AppendFiles append = table.newFastAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
append.commit();
TableMetadata metadata = readMetadata();
validateSnapshot(null, metadata.currentSnapshot(), FILE_B);
Assert.assertTrue("Should commit same new manifest", new File(newManifest.path()).exists());
Assert.assertTrue("Should commit the same new manifest",
metadata.currentSnapshot().manifests().contains(newManifest));
}
}
| 1,913 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestCreateTransaction.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.types.TypeUtil;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import static com.netflix.iceberg.PartitionSpec.unpartitioned;
public class TestCreateTransaction extends TableTestBase {
@Test
public void testCreateTransaction() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "test_create", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_create"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_create"));
t.commitTransaction();
TableMetadata meta = TestTables.readMetadata("test_create");
Assert.assertNotNull("Table metadata should be created after transaction commits", meta);
Assert.assertEquals("Should have metadata version 0",
0, (int) TestTables.metadataVersion("test_create"));
Assert.assertEquals("Should have 0 manifest files",
0, listManifestFiles(tableDir).size());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), meta.schema().asStruct());
Assert.assertEquals("Table spec should match", unpartitioned(), meta.spec());
Assert.assertEquals("Table should not have any snapshots", 0, meta.snapshots().size());
}
@Test
public void testCreateAndAppendWithTransaction() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "test_append", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_append"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_append"));
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertNull("Appending in a transaction should not commit metadata",
TestTables.readMetadata("test_append"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_append"));
t.commitTransaction();
TableMetadata meta = TestTables.readMetadata("test_append");
Assert.assertNotNull("Table metadata should be created after transaction commits", meta);
Assert.assertEquals("Should have metadata version 0",
0, (int) TestTables.metadataVersion("test_append"));
Assert.assertEquals("Should have 1 manifest file",
1, listManifestFiles(tableDir).size());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), meta.schema().asStruct());
Assert.assertEquals("Table spec should match", unpartitioned(), meta.spec());
Assert.assertEquals("Table should have one snapshot", 1, meta.snapshots().size());
validateSnapshot(null, meta.currentSnapshot(), FILE_A, FILE_B);
}
@Test
public void testCreateAndAppendWithTable() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "test_append", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_append"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_append"));
Assert.assertTrue("Should return a transaction table",
t.table() instanceof BaseTransaction.TransactionTable);
t.table().newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertNull("Appending in a transaction should not commit metadata",
TestTables.readMetadata("test_append"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_append"));
t.commitTransaction();
TableMetadata meta = TestTables.readMetadata("test_append");
Assert.assertNotNull("Table metadata should be created after transaction commits", meta);
Assert.assertEquals("Should have metadata version 0",
0, (int) TestTables.metadataVersion("test_append"));
Assert.assertEquals("Should have 1 manifest file",
1, listManifestFiles(tableDir).size());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), meta.schema().asStruct());
Assert.assertEquals("Table spec should match", unpartitioned(), meta.spec());
Assert.assertEquals("Table should have one snapshot", 1, meta.snapshots().size());
validateSnapshot(null, meta.currentSnapshot(), FILE_A, FILE_B);
}
@Test
public void testCreateAndUpdatePropertiesWithTransaction() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "test_properties", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_properties"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_properties"));
t.updateProperties()
.set("test-property", "test-value")
.commit();
Assert.assertNull("Adding properties in a transaction should not commit metadata",
TestTables.readMetadata("test_properties"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_properties"));
t.commitTransaction();
TableMetadata meta = TestTables.readMetadata("test_properties");
Assert.assertNotNull("Table metadata should be created after transaction commits", meta);
Assert.assertEquals("Should have metadata version 0",
0, (int) TestTables.metadataVersion("test_properties"));
Assert.assertEquals("Should have 0 manifest files",
0, listManifestFiles(tableDir).size());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), meta.schema().asStruct());
Assert.assertEquals("Table spec should match", unpartitioned(), meta.spec());
Assert.assertEquals("Table should not have any snapshots", 0, meta.snapshots().size());
Assert.assertEquals("Should have one table property", 1, meta.properties().size());
Assert.assertEquals("Should have correct table property value",
"test-value", meta.properties().get("test-property"));
}
@Test
public void testCreateAndUpdatePropertiesWithTable() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "test_properties", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_properties"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_properties"));
Assert.assertTrue("Should return a transaction table",
t.table() instanceof BaseTransaction.TransactionTable);
t.table().updateProperties()
.set("test-property", "test-value")
.commit();
Assert.assertNull("Adding properties in a transaction should not commit metadata",
TestTables.readMetadata("test_properties"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_properties"));
t.commitTransaction();
TableMetadata meta = TestTables.readMetadata("test_properties");
Assert.assertNotNull("Table metadata should be created after transaction commits", meta);
Assert.assertEquals("Should have metadata version 0",
0, (int) TestTables.metadataVersion("test_properties"));
Assert.assertEquals("Should have 0 manifest files",
0, listManifestFiles(tableDir).size());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), meta.schema().asStruct());
Assert.assertEquals("Table spec should match", unpartitioned(), meta.spec());
Assert.assertEquals("Table should not have any snapshots", 0, meta.snapshots().size());
Assert.assertEquals("Should have one table property", 1, meta.properties().size());
Assert.assertEquals("Should have correct table property value",
"test-value", meta.properties().get("test-property"));
}
@Test
public void testCreateDetectsUncommittedChange() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "uncommitted_change", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("uncommitted_change"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("uncommitted_change"));
t.updateProperties().set("test-property", "test-value"); // not committed
AssertHelpers.assertThrows("Should reject commit when last operation has not committed",
IllegalStateException.class,
"Cannot create new DeleteFiles: last operation has not committed",
t::newDelete);
}
@Test
public void testCreateDetectsUncommittedChangeOnCommit() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "uncommitted_change", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("uncommitted_change"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("uncommitted_change"));
t.updateProperties().set("test-property", "test-value"); // not committed
AssertHelpers.assertThrows("Should reject commit when last operation has not committed",
IllegalStateException.class, "Cannot commit transaction: last operation has not committed",
t::commitTransaction);
}
@Test
public void testCreateTransactionConflict() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "test_conflict", SCHEMA, SPEC);
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_conflict"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_conflict"));
Table conflict = TestTables.create(tableDir, "test_conflict", SCHEMA, unpartitioned());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), conflict.schema().asStruct());
Assert.assertEquals("Table spec should match conflict table, not transaction table",
unpartitioned(), conflict.spec());
Assert.assertFalse("Table should not have any snapshots",
conflict.snapshots().iterator().hasNext());
AssertHelpers.assertThrows("Transaction commit should fail",
CommitFailedException.class, "Commit failed: table was updated", t::commitTransaction);
}
private static Schema assignFreshIds(Schema schema) {
AtomicInteger lastColumnId = new AtomicInteger(0);
return TypeUtil.assignFreshIds(schema, lastColumnId::incrementAndGet);
}
}
| 1,914 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestSchemaUpdate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.Pair;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
import java.util.Set;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestSchemaUpdate {
private static final Schema SCHEMA = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "data", Types.StringType.get()),
optional(3, "preferences", Types.StructType.of(
required(8, "feature1", Types.BooleanType.get()),
optional(9, "feature2", Types.BooleanType.get())
)),
required(4, "locations", Types.MapType.ofRequired(10, 11,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(12, "lat", Types.FloatType.get()),
required(13, "long", Types.FloatType.get())
))),
optional(5, "points", Types.ListType.ofOptional(14,
Types.StructType.of(
required(15, "x", Types.LongType.get()),
required(16, "y", Types.LongType.get())
))),
required(6, "doubles", Types.ListType.ofRequired(17,
Types.DoubleType.get()
)),
optional(7, "properties", Types.MapType.ofOptional(18, 19,
Types.StringType.get(),
Types.StringType.get()
))
);
private static final int SCHEMA_LAST_COLUMN_ID = 23;
@Test
public void testNoChanges() {
Schema identical = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID).apply();
Assert.assertEquals("Should not include any changes", SCHEMA.asStruct(), identical.asStruct());
}
@Test
public void testDeleteFields() {
// use schema projection to test column deletes
Set<Integer> ALL_IDS = ImmutableSet.copyOf(TypeUtil.getProjectedIds(SCHEMA));
List<String> columns = Lists.newArrayList("id", "data", "preferences", "preferences.feature1",
"preferences.feature2", "locations", "locations.lat", "locations.long", "points",
"points.x", "points.y", "doubles", "properties");
for (String name : columns) {
Set<Integer> selected = Sets.newHashSet(ALL_IDS);
// remove the id and any nested fields from the projection
Types.NestedField nested = SCHEMA.findField(name);
selected.remove(nested.fieldId());
selected.removeAll(TypeUtil.getProjectedIds(nested.type()));
Schema del = new SchemaUpdate(SCHEMA, 19).deleteColumn(name).apply();
Assert.assertEquals("Should match projection with '" + name + "' removed",
TypeUtil.select(SCHEMA, selected).asStruct(), del.asStruct());
}
}
@Test
public void testUpdateTypes() {
Types.StructType expected = Types.StructType.of(
required(1, "id", Types.LongType.get()),
optional(2, "data", Types.StringType.get()),
optional(3, "preferences", Types.StructType.of(
required(8, "feature1", Types.BooleanType.get()),
optional(9, "feature2", Types.BooleanType.get())
)),
required(4, "locations", Types.MapType.ofRequired(10, 11,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(12, "lat", Types.DoubleType.get()),
required(13, "long", Types.DoubleType.get())
))),
optional(5, "points", Types.ListType.ofOptional(14,
Types.StructType.of(
required(15, "x", Types.LongType.get()),
required(16, "y", Types.LongType.get())
))),
required(6, "doubles", Types.ListType.ofRequired(17,
Types.DoubleType.get()
)),
optional(7, "properties", Types.MapType.ofOptional(18, 19,
Types.StringType.get(),
Types.StringType.get()
))
);
Schema updated = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID)
.updateColumn("id", Types.LongType.get())
.updateColumn("locations.lat", Types.DoubleType.get())
.updateColumn("locations.long", Types.DoubleType.get())
.apply();
Assert.assertEquals("Should convert types", expected, updated.asStruct());
}
@Test
public void testUpdateFailure() {
Set<Pair<Type.PrimitiveType, Type.PrimitiveType>> allowedUpdates = Sets.newHashSet(
Pair.of(Types.IntegerType.get(), Types.LongType.get()),
Pair.of(Types.FloatType.get(), Types.DoubleType.get()),
Pair.of(Types.DecimalType.of(9, 2), Types.DecimalType.of(18, 2))
);
List<Type.PrimitiveType> primitives = Lists.newArrayList(
Types.BooleanType.get(), Types.IntegerType.get(), Types.LongType.get(),
Types.FloatType.get(), Types.DoubleType.get(), Types.DateType.get(), Types.TimeType.get(),
Types.TimestampType.withZone(), Types.TimestampType.withoutZone(),
Types.StringType.get(), Types.UUIDType.get(), Types.BinaryType.get(),
Types.FixedType.ofLength(3), Types.FixedType.ofLength(4),
Types.DecimalType.of(9, 2), Types.DecimalType.of(9, 3),
Types.DecimalType.of(18, 2)
);
for (Type.PrimitiveType fromType : primitives) {
for (Type.PrimitiveType toType : primitives) {
Schema fromSchema = new Schema(required(1, "col", fromType));
if (fromType.equals(toType) ||
allowedUpdates.contains(Pair.of(fromType, toType))) {
Schema expected = new Schema(required(1, "col", toType));
Schema result = new SchemaUpdate(fromSchema, 1).updateColumn("col", toType).apply();
Assert.assertEquals("Should allow update", expected.asStruct(), result.asStruct());
continue;
}
String typeChange = fromType.toString() + " -> " + toType.toString();
AssertHelpers.assertThrows("Should reject update: " + typeChange,
IllegalArgumentException.class, "change column type: col: " + typeChange,
() -> new SchemaUpdate(fromSchema, 1).updateColumn("col", toType));
}
}
}
@Test
public void testRename() {
Types.StructType expected = Types.StructType.of(
required(1, "id", Types.IntegerType.get()),
optional(2, "json", Types.StringType.get()),
optional(3, "options", Types.StructType.of(
required(8, "feature1", Types.BooleanType.get()),
optional(9, "newfeature", Types.BooleanType.get())
)),
required(4, "locations", Types.MapType.ofRequired(10, 11,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(12, "latitude", Types.FloatType.get()),
required(13, "long", Types.FloatType.get())
))),
optional(5, "points", Types.ListType.ofOptional(14,
Types.StructType.of(
required(15, "X", Types.LongType.get()),
required(16, "y.y", Types.LongType.get())
))),
required(6, "doubles", Types.ListType.ofRequired(17,
Types.DoubleType.get()
)),
optional(7, "properties", Types.MapType.ofOptional(18, 19,
Types.StringType.get(),
Types.StringType.get()
))
);
Schema renamed = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID)
.renameColumn("data", "json")
.renameColumn("preferences", "options")
.renameColumn("preferences.feature2", "newfeature") // inside a renamed column
.renameColumn("locations.lat", "latitude")
.renameColumn("points.x", "X")
.renameColumn("points.y", "y.y") // has a '.' in the field name
.apply();
Assert.assertEquals("Should rename all fields", expected, renamed.asStruct());
}
@Test
public void testAddFields() {
Schema expected = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "data", Types.StringType.get()),
optional(3, "preferences", Types.StructType.of(
required(8, "feature1", Types.BooleanType.get()),
optional(9, "feature2", Types.BooleanType.get())
)),
required(4, "locations", Types.MapType.ofRequired(10, 11,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(12, "lat", Types.FloatType.get()),
required(13, "long", Types.FloatType.get()),
optional(25, "alt", Types.FloatType.get())
))),
optional(5, "points", Types.ListType.ofOptional(14,
Types.StructType.of(
required(15, "x", Types.LongType.get()),
required(16, "y", Types.LongType.get()),
optional(26, "z", Types.LongType.get()),
optional(27, "t.t", Types.LongType.get())
))),
required(6, "doubles", Types.ListType.ofRequired(17,
Types.DoubleType.get()
)),
optional(7, "properties", Types.MapType.ofOptional(18, 19,
Types.StringType.get(),
Types.StringType.get()
)),
optional(24, "toplevel", Types.DecimalType.of(9, 2))
);
Schema added = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID)
.addColumn("toplevel", Types.DecimalType.of(9, 2))
.addColumn("locations", "alt", Types.FloatType.get()) // map of structs
.addColumn("points", "z", Types.LongType.get()) // list of structs
.addColumn("points", "t.t", Types.LongType.get()) // name with '.'
.apply();
Assert.assertEquals("Should match with added fields", expected.asStruct(), added.asStruct());
}
@Test
public void testAddNestedStruct() {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get()));
Types.StructType struct = Types.StructType.of(
required(1, "lat", Types.IntegerType.get()), // conflicts with id
optional(2, "long", Types.IntegerType.get())
);
Schema expected = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "location", Types.StructType.of(
required(3, "lat", Types.IntegerType.get()),
optional(4, "long", Types.IntegerType.get())
))
);
Schema result = new SchemaUpdate(schema, 1)
.addColumn("location", struct)
.apply();
Assert.assertEquals("Should add struct and reassign column IDs",
expected.asStruct(), result.asStruct());
}
@Test
public void testAddNestedMapOfStructs() {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get()));
Types.MapType map = Types.MapType.ofOptional(1, 2,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(9, "lat", Types.IntegerType.get()),
optional(8, "long", Types.IntegerType.get())
)
);
Schema expected = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "locations", Types.MapType.ofOptional(3, 4,
Types.StructType.of(
required(5, "address", Types.StringType.get()),
required(6, "city", Types.StringType.get()),
required(7, "state", Types.StringType.get()),
required(8, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(9, "lat", Types.IntegerType.get()),
optional(10, "long", Types.IntegerType.get())
)
))
);
Schema result = new SchemaUpdate(schema, 1)
.addColumn("locations", map)
.apply();
Assert.assertEquals("Should add map and reassign column IDs",
expected.asStruct(), result.asStruct());
}
@Test
public void testAddNestedListOfStructs() {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get()));
Types.ListType list = Types.ListType.ofOptional(1,
Types.StructType.of(
required(9, "lat", Types.IntegerType.get()),
optional(8, "long", Types.IntegerType.get())
)
);
Schema expected = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "locations", Types.ListType.ofOptional(3,
Types.StructType.of(
required(4, "lat", Types.IntegerType.get()),
optional(5, "long", Types.IntegerType.get())
)
))
);
Schema result = new SchemaUpdate(schema, 1)
.addColumn("locations", list)
.apply();
Assert.assertEquals("Should add map and reassign column IDs",
expected.asStruct(), result.asStruct());
}
@Test
public void testMixedChanges() {
Schema expected = new Schema(
required(1, "id", Types.LongType.get()),
optional(2, "json", Types.StringType.get()),
optional(3, "options", Types.StructType.of(
required(8, "feature1", Types.BooleanType.get()),
optional(9, "newfeature", Types.BooleanType.get())
)),
required(4, "locations", Types.MapType.ofRequired(10, 11,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(12, "latitude", Types.DoubleType.get()),
optional(25, "alt", Types.FloatType.get())
))),
optional(5, "points", Types.ListType.ofOptional(14,
Types.StructType.of(
required(15, "X", Types.LongType.get()),
required(16, "y.y", Types.LongType.get()),
optional(26, "z", Types.LongType.get()),
optional(27, "t.t", Types.LongType.get())
))),
required(6, "doubles", Types.ListType.ofRequired(17,
Types.DoubleType.get()
)),
optional(24, "toplevel", Types.DecimalType.of(9, 2))
);
Schema updated = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID)
.addColumn("toplevel", Types.DecimalType.of(9, 2))
.addColumn("locations", "alt", Types.FloatType.get()) // map of structs
.addColumn("points", "z", Types.LongType.get()) // list of structs
.addColumn("points", "t.t", Types.LongType.get()) // name with '.'
.renameColumn("data", "json")
.renameColumn("preferences", "options")
.renameColumn("preferences.feature2", "newfeature") // inside a renamed column
.renameColumn("locations.lat", "latitude")
.renameColumn("points.x", "X")
.renameColumn("points.y", "y.y") // has a '.' in the field name
.updateColumn("id", Types.LongType.get())
.updateColumn("locations.lat", Types.DoubleType.get()) // use the original name
.deleteColumn("locations.long")
.deleteColumn("properties")
.apply();
Assert.assertEquals("Should match with added fields", expected.asStruct(), updated.asStruct());
}
@Test
public void testAmbiguousAdd() {
// preferences.booleans could be top-level or a field of preferences
AssertHelpers.assertThrows("Should reject ambiguous column name",
IllegalArgumentException.class, "ambiguous name: preferences.booleans", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.addColumn("preferences.booleans", Types.BooleanType.get());
}
);
}
@Test
public void testAddAlreadyExists() {
AssertHelpers.assertThrows("Should reject column name that already exists",
IllegalArgumentException.class, "already exists: preferences.feature1", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.addColumn("preferences", "feature1", Types.BooleanType.get());
}
);
AssertHelpers.assertThrows("Should reject column name that already exists",
IllegalArgumentException.class, "already exists: preferences", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.addColumn("preferences", Types.BooleanType.get());
}
);
}
@Test
public void testDeleteMissingColumn() {
AssertHelpers.assertThrows("Should reject delete missing column",
IllegalArgumentException.class, "missing column: col", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.deleteColumn("col");
}
);
}
@Test
public void testAddDeleteConflict() {
AssertHelpers.assertThrows("Should reject add then delete",
IllegalArgumentException.class, "missing column: col", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.addColumn("col", Types.IntegerType.get()).deleteColumn("col");
}
);
AssertHelpers.assertThrows("Should reject add then delete",
IllegalArgumentException.class, "column that has additions: preferences", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.addColumn("preferences", "feature3", Types.IntegerType.get())
.deleteColumn("preferences");
}
);
}
@Test
public void testRenameMissingColumn() {
AssertHelpers.assertThrows("Should reject rename missing column",
IllegalArgumentException.class, "missing column: col", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.renameColumn("col", "fail");
}
);
}
@Test
public void testRenameDeleteConflict() {
AssertHelpers.assertThrows("Should reject rename then delete",
IllegalArgumentException.class, "column that has updates: id", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.renameColumn("id", "col").deleteColumn("id");
}
);
AssertHelpers.assertThrows("Should reject rename then delete",
IllegalArgumentException.class, "missing column: col", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.renameColumn("id", "col").deleteColumn("col");
}
);
}
@Test
public void testDeleteRenameConflict() {
AssertHelpers.assertThrows("Should reject delete then rename",
IllegalArgumentException.class, "column that will be deleted: id", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.deleteColumn("id").renameColumn("id", "identifier");
}
);
}
@Test
public void testUpdateMissingColumn() {
AssertHelpers.assertThrows("Should reject rename missing column",
IllegalArgumentException.class, "missing column: col", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.updateColumn("col", Types.DateType.get());
}
);
}
@Test
public void testUpdateDeleteConflict() {
AssertHelpers.assertThrows("Should reject update then delete",
IllegalArgumentException.class, "column that has updates: id", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.updateColumn("id", Types.LongType.get()).deleteColumn("id");
}
);
}
@Test
public void testDeleteUpdateConflict() {
AssertHelpers.assertThrows("Should reject delete then update",
IllegalArgumentException.class, "column that will be deleted: id", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.deleteColumn("id").updateColumn("id", Types.LongType.get());
}
);
}
@Test
public void testDeleteMapKey() {
AssertHelpers.assertThrows("Should reject delete map key",
IllegalArgumentException.class, "Cannot delete map keys", () -> {
new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID).deleteColumn("locations.key").apply();
}
);
}
@Test
public void testAddFieldToMapKey() {
AssertHelpers.assertThrows("Should reject add sub-field to map key",
IllegalArgumentException.class, "Cannot add fields to map keys", () -> {
new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID)
.addColumn("locations.key", "address_line_2", Types.StringType.get()).apply();
}
);
}
@Test
public void testAlterMapKey() {
AssertHelpers.assertThrows("Should reject add sub-field to map key",
IllegalArgumentException.class, "Cannot alter map keys", () -> {
new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID)
.updateColumn("locations.zip", Types.LongType.get()).apply();
}
);
}
@Test
public void testUpdateMapKey() {
Schema schema = new Schema(required(1, "m", Types.MapType.ofOptional(2, 3,
Types.IntegerType.get(), Types.DoubleType.get())));
AssertHelpers.assertThrows("Should reject update map key",
IllegalArgumentException.class, "Cannot update map keys", () -> {
new SchemaUpdate(schema, 3).updateColumn("m.key", Types.LongType.get()).apply();
}
);
}
}
| 1,915 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/AssertHelpers.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import org.junit.Assert;
import java.util.concurrent.Callable;
public class AssertHelpers {
/**
* A convenience method to avoid a large number of @Test(expected=...) tests
* @param message A String message to describe this assertion
* @param expected An Exception class that the Runnable should throw
* @param containedInMessage A String that should be contained by the thrown
* exception's message
* @param callable A Callable that is expected to throw the exception
*/
public static void assertThrows(String message,
Class<? extends Exception> expected,
String containedInMessage,
Callable callable) {
try {
callable.call();
Assert.fail("No exception was thrown (" + message + "), expected: " +
expected.getName());
} catch (Exception actual) {
handleException(message, expected, containedInMessage, actual);
}
}
/**
* A convenience method to avoid a large number of @Test(expected=...) tests
* @param message A String message to describe this assertion
* @param expected An Exception class that the Runnable should throw
* @param containedInMessage A String that should be contained by the thrown
* exception's message
* @param runnable A Runnable that is expected to throw the runtime exception
*/
public static void assertThrows(String message,
Class<? extends Exception> expected,
String containedInMessage,
Runnable runnable) {
try {
runnable.run();
Assert.fail("No exception was thrown (" + message + "), expected: " +
expected.getName());
} catch (Exception actual) {
handleException(message, expected, containedInMessage, actual);
}
}
private static void handleException(String message,
Class<? extends Exception> expected,
String containedInMessage,
Exception actual) {
try {
Assert.assertEquals(message, expected, actual.getClass());
Assert.assertTrue(
"Expected exception message (" + containedInMessage + ") missing: " +
actual.getMessage(),
actual.getMessage().contains(containedInMessage)
);
} catch (AssertionError e) {
e.addSuppressed(actual);
throw e;
}
}
}
| 1,916 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/LocalTableOperations.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Maps;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import java.util.Map;
import org.junit.rules.TemporaryFolder;
import java.io.IOException;
class LocalTableOperations implements TableOperations {
private final TemporaryFolder temp;
private final FileIO io;
private final Map<String, String> createdMetadataFilePaths = Maps.newHashMap();
LocalTableOperations(TemporaryFolder temp) {
this.temp = temp;
this.io = new TestTables.LocalFileIO();
}
@Override
public TableMetadata current() {
throw new UnsupportedOperationException("Not implemented for tests");
}
@Override
public TableMetadata refresh() {
throw new UnsupportedOperationException("Not implemented for tests");
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
throw new UnsupportedOperationException("Not implemented for tests");
}
@Override
public FileIO io() {
return io;
}
@Override
public String metadataFileLocation(String fileName) {
return createdMetadataFilePaths.computeIfAbsent(fileName, name -> {
try {
return temp.newFile(name).getAbsolutePath();
} catch (IOException e) {
throw new RuntimeIOException(e);
}
});
}
@Override
public long newSnapshotId() {
throw new UnsupportedOperationException("Not implemented for tests");
}
}
| 1,917 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestReplaceTransaction.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import static com.netflix.iceberg.PartitionSpec.unpartitioned;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestReplaceTransaction extends TableTestBase {
@Test
public void testReplaceTransaction() {
Schema newSchema = new Schema(
required(4, "id", Types.IntegerType.get()),
required(5, "data", Types.StringType.get()));
Snapshot start = table.currentSnapshot();
Schema schema = table.schema();
table.newAppend()
.appendFile(FILE_A)
.commit();
Assert.assertEquals("Version should be 1", 1L, (long) version());
validateSnapshot(start, table.currentSnapshot(), FILE_A);
Transaction replace = TestTables.beginReplace(tableDir, "test", newSchema, unpartitioned());
replace.commitTransaction();
table.refresh();
Assert.assertEquals("Version should be 2", 2L, (long) version());
Assert.assertNull("Table should not have a current snapshot", table.currentSnapshot());
Assert.assertEquals("Schema should match previous schema",
schema.asStruct(), table.schema().asStruct());
Assert.assertEquals("Partition spec should have no fields",
0, table.spec().fields().size());
}
@Test
public void testReplaceWithIncompatibleSchemaUpdate() {
Schema newSchema = new Schema(
required(4, "obj_id", Types.IntegerType.get()));
Snapshot start = table.currentSnapshot();
table.newAppend()
.appendFile(FILE_A)
.commit();
Assert.assertEquals("Version should be 1", 1L, (long) version());
validateSnapshot(start, table.currentSnapshot(), FILE_A);
Transaction replace = TestTables.beginReplace(tableDir, "test", newSchema, unpartitioned());
replace.commitTransaction();
table.refresh();
Assert.assertEquals("Version should be 2", 2L, (long) version());
Assert.assertNull("Table should not have a current snapshot", table.currentSnapshot());
Assert.assertEquals("Schema should use new schema, not compatible with previous",
new Schema(required(1, "obj_id", Types.IntegerType.get())).asStruct(),
table.schema().asStruct());
}
@Test
public void testReplaceWithNewPartitionSpec() {
PartitionSpec newSpec = PartitionSpec.unpartitioned();
Snapshot start = table.currentSnapshot();
Schema schema = table.schema();
table.newAppend()
.appendFile(FILE_A)
.commit();
Assert.assertEquals("Version should be 1", 1L, (long) version());
validateSnapshot(start, table.currentSnapshot(), FILE_A);
Transaction replace = TestTables.beginReplace(tableDir, "test", table.schema(), newSpec);
replace.commitTransaction();
table.refresh();
Assert.assertEquals("Version should be 2", 2L, (long) version());
Assert.assertNull("Table should not have a current snapshot", table.currentSnapshot());
Assert.assertEquals("Schema should use new schema, not compatible with previous",
schema.asStruct(), table.schema().asStruct());
Assert.assertEquals("Table should have new unpartitioned spec",
0, table.spec().fields().size());
}
@Test
public void testReplaceWithNewData() {
Snapshot start = table.currentSnapshot();
Schema schema = table.schema();
table.newAppend()
.appendFile(FILE_A)
.commit();
Assert.assertEquals("Version should be 1", 1L, (long) version());
validateSnapshot(start, table.currentSnapshot(), FILE_A);
Transaction replace = TestTables.beginReplace(tableDir, "test", table.schema(), table.spec());
replace.newAppend()
.appendFile(FILE_B)
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
replace.commitTransaction();
table.refresh();
Assert.assertEquals("Version should be 2", 2L, (long) version());
Assert.assertNotNull("Table should have a current snapshot", table.currentSnapshot());
Assert.assertEquals("Schema should use new schema, not compatible with previous",
schema.asStruct(), table.schema().asStruct());
validateSnapshot(null, table.currentSnapshot(), FILE_B, FILE_C, FILE_D);
}
@Test
public void testReplaceDetectsUncommittedChangeOnCommit() {
Assert.assertEquals("Version should be 0", 0L, (long) version());
Transaction replace = TestTables.beginReplace(tableDir, "test", table.schema(), table.spec());
replace.newAppend() // not committed
.appendFile(FILE_B)
.appendFile(FILE_C)
.appendFile(FILE_D);
AssertHelpers.assertThrows("Should reject commit when last operation has not committed",
IllegalStateException.class, "Cannot commit transaction: last operation has not committed",
replace::commitTransaction);
Assert.assertEquals("Version should be 0", 0L, (long) version());
}
@Test
public void testReplaceDetectsUncommittedChangeOnTableCommit() {
Assert.assertEquals("Version should be 0", 0L, (long) version());
Transaction replace = TestTables.beginReplace(tableDir, "test", table.schema(), table.spec());
replace.table().newAppend() // not committed
.appendFile(FILE_B)
.appendFile(FILE_C)
.appendFile(FILE_D);
AssertHelpers.assertThrows("Should reject commit when last operation has not committed",
IllegalStateException.class, "Cannot commit transaction: last operation has not committed",
replace::commitTransaction);
Assert.assertEquals("Version should be 0", 0L, (long) version());
}
@Test
public void testReplaceTransactionRetry() {
Snapshot start = table.currentSnapshot();
Schema schema = table.schema();
table.newAppend()
.appendFile(FILE_A)
.commit();
Assert.assertEquals("Version should be 1", 1L, (long) version());
validateSnapshot(start, table.currentSnapshot(), FILE_A);
Transaction replace = TestTables.beginReplace(tableDir, "test", table.schema(), table.spec());
replace.newAppend()
.appendFile(FILE_B)
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
table.ops().failCommits(1);
replace.commitTransaction();
table.refresh();
Assert.assertEquals("Version should be 2", 2L, (long) version());
Assert.assertNotNull("Table should have a current snapshot", table.currentSnapshot());
Assert.assertEquals("Schema should use new schema, not compatible with previous",
schema.asStruct(), table.schema().asStruct());
validateSnapshot(null, table.currentSnapshot(), FILE_B, FILE_C, FILE_D);
}
@Test
public void testReplaceTransactionConflict() {
Snapshot start = table.currentSnapshot();
table.newAppend()
.appendFile(FILE_A)
.commit();
Assert.assertEquals("Version should be 1", 1L, (long) version());
validateSnapshot(start, table.currentSnapshot(), FILE_A);
Transaction replace = TestTables.beginReplace(tableDir, "test", table.schema(), table.spec());
replace.newAppend()
.appendFile(FILE_B)
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
// keep failing to trigger eventual transaction failure
((TestTables.TestTableOperations) ((BaseTransaction) replace).ops).failCommits(100);
AssertHelpers.assertThrows("Should reject commit when retries are exhausted",
CommitFailedException.class, "Injected failure",
replace::commitTransaction);
Assert.assertEquals("Version should be 1", 1L, (long) version());
table.refresh();
validateSnapshot(start, table.currentSnapshot(), FILE_A);
}
@Test
public void testReplaceToCreateAndAppend() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
// this table doesn't exist.
Transaction replace = TestTables.beginReplace(tableDir, "test_append", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_append"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_append"));
Assert.assertTrue("Should return a transaction table",
replace.table() instanceof BaseTransaction.TransactionTable);
replace.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertNull("Appending in a transaction should not commit metadata",
TestTables.readMetadata("test_append"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_append"));
replace.commitTransaction();
TableMetadata meta = TestTables.readMetadata("test_append");
Assert.assertNotNull("Table metadata should be created after transaction commits", meta);
Assert.assertEquals("Should have metadata version 0",
0, (int) TestTables.metadataVersion("test_append"));
Assert.assertEquals("Should have 1 manifest file",
1, listManifestFiles(tableDir).size());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), meta.schema().asStruct());
Assert.assertEquals("Table spec should match", unpartitioned(), meta.spec());
Assert.assertEquals("Table should have one snapshot", 1, meta.snapshots().size());
validateSnapshot(null, meta.currentSnapshot(), FILE_A, FILE_B);
}
private static Schema assignFreshIds(Schema schema) {
AtomicInteger lastColumnId = new AtomicInteger(0);
return TypeUtil.assignFreshIds(schema, lastColumnId::incrementAndGet);
}
}
| 1,918 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestOverwrite.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.ManifestEntry.Status;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import static com.netflix.iceberg.expressions.Expressions.and;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.lessThan;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestOverwrite extends TableTestBase {
private static final Schema DATE_SCHEMA = new Schema(
required(1, "id", Types.LongType.get()),
optional(2, "data", Types.StringType.get()),
required(3, "date", Types.StringType.get()));
private static final PartitionSpec PARTITION_BY_DATE = PartitionSpec
.builderFor(DATE_SCHEMA)
.identity("date")
.build();
private static final String TABLE_NAME = "overwrite_table";
private static final DataFile FILE_0_TO_4 = DataFiles.builder(PARTITION_BY_DATE)
.withPath("/path/to/data-1.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("date=2018-06-08")
.withMetrics(new Metrics(5L,
null, // no column sizes
ImmutableMap.of(1, 5L, 2, 3L), // value count
ImmutableMap.of(1, 0L, 2, 2L), // null count
ImmutableMap.of(1, longToBuffer(0L)), // lower bounds
ImmutableMap.of(1, longToBuffer(4L)) // upper bounds
))
.build();
private static final DataFile FILE_5_TO_9 = DataFiles.builder(PARTITION_BY_DATE)
.withPath("/path/to/data-2.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("date=2018-06-09")
.withMetrics(new Metrics(5L,
null, // no column sizes
ImmutableMap.of(1, 5L, 2, 3L), // value count
ImmutableMap.of(1, 0L, 2, 2L), // null count
ImmutableMap.of(1, longToBuffer(5L)), // lower bounds
ImmutableMap.of(1, longToBuffer(9L)) // upper bounds
))
.build();
private static final DataFile FILE_10_TO_14 = DataFiles.builder(PARTITION_BY_DATE)
.withPath("/path/to/data-2.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("date=2018-06-09")
.withMetrics(new Metrics(5L,
null, // no column sizes
ImmutableMap.of(1, 5L, 2, 3L), // value count
ImmutableMap.of(1, 0L, 2, 2L), // null count
ImmutableMap.of(1, longToBuffer(5L)), // lower bounds
ImmutableMap.of(1, longToBuffer(9L)) // upper bounds
))
.build();
private static ByteBuffer longToBuffer(long value) {
return ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN).putLong(0, value);
}
private Table table = null;
@Before
public void createTestTable() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
this.table = TestTables.create(tableDir, TABLE_NAME, DATE_SCHEMA, PARTITION_BY_DATE);
table.newAppend()
.appendFile(FILE_0_TO_4)
.appendFile(FILE_5_TO_9)
.commit();
}
@Test
public void testOverwriteWithoutAppend() {
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
table.newOverwrite()
.overwriteByRowFilter(equal("date", "2018-06-08"))
.commit();
long overwriteId = table.currentSnapshot().snapshotId();
Assert.assertNotEquals("Should create a new snapshot", baseId, overwriteId);
Assert.assertEquals("Table should have one manifest",
1, table.currentSnapshot().manifests().size());
validateManifestEntries(table.currentSnapshot().manifests().get(0),
ids(overwriteId, baseId),
files(FILE_0_TO_4, FILE_5_TO_9),
statuses(Status.DELETED, Status.EXISTING));
}
@Test
public void testOverwriteFailsDelete() {
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
OverwriteFiles overwrite = table.newOverwrite()
.overwriteByRowFilter(and(equal("date", "2018-06-09"), lessThan("id", 9)));
AssertHelpers.assertThrows("Should reject commit with file not matching delete expression",
ValidationException.class, "Cannot delete file where some, but not all, rows match filter",
overwrite::commit);
Assert.assertEquals("Should not create a new snapshot",
baseId, table.currentSnapshot().snapshotId());
}
@Test
public void testOverwriteWithAppendOutsideOfDelete() {
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
table.newOverwrite()
.overwriteByRowFilter(equal("date", "2018-06-08"))
.addFile(FILE_10_TO_14) // in 2018-06-09, NOT in 2018-06-08
.commit();
long overwriteId = table.currentSnapshot().snapshotId();
Assert.assertNotEquals("Should create a new snapshot", baseId, overwriteId);
Assert.assertEquals("Table should have 2 manifests",
2, table.currentSnapshot().manifests().size());
// manifest is not merged because it is less than the minimum
validateManifestEntries(table.currentSnapshot().manifests().get(0),
ids(overwriteId),
files(FILE_10_TO_14),
statuses(Status.ADDED));
validateManifestEntries(table.currentSnapshot().manifests().get(1),
ids(overwriteId, baseId),
files(FILE_0_TO_4, FILE_5_TO_9),
statuses(Status.DELETED, Status.EXISTING));
}
@Test
public void testOverwriteWithMergedAppendOutsideOfDelete() {
// ensure the overwrite results in a merge
table.updateProperties().set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "1").commit();
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
table.newOverwrite()
.overwriteByRowFilter(equal("date", "2018-06-08"))
.addFile(FILE_10_TO_14) // in 2018-06-09, NOT in 2018-06-08
.commit();
long overwriteId = table.currentSnapshot().snapshotId();
Assert.assertNotEquals("Should create a new snapshot", baseId, overwriteId);
Assert.assertEquals("Table should have one merged manifest",
1, table.currentSnapshot().manifests().size());
validateManifestEntries(table.currentSnapshot().manifests().get(0),
ids(overwriteId, overwriteId, baseId),
files(FILE_10_TO_14, FILE_0_TO_4, FILE_5_TO_9),
statuses(Status.ADDED, Status.DELETED, Status.EXISTING));
}
@Test
public void testValidatedOverwriteWithAppendOutsideOfDelete() {
// ensure the overwrite results in a merge
table.updateProperties().set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "1").commit();
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
OverwriteFiles overwrite = table.newOverwrite()
.overwriteByRowFilter(equal("date", "2018-06-08"))
.addFile(FILE_10_TO_14) // in 2018-06-09, NOT in 2018-06-08
.validateAddedFiles();
AssertHelpers.assertThrows("Should reject commit with file not matching delete expression",
ValidationException.class, "Cannot append file with rows that do not match filter",
overwrite::commit);
Assert.assertEquals("Should not create a new snapshot",
baseId, table.currentSnapshot().snapshotId());
}
@Test
public void testValidatedOverwriteWithAppendOutsideOfDeleteMetrics() {
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
OverwriteFiles overwrite = table.newOverwrite()
.overwriteByRowFilter(and(equal("date", "2018-06-09"), lessThan("id", 10)))
.addFile(FILE_10_TO_14) // in 2018-06-09 matches, but IDs are outside range
.validateAddedFiles();
AssertHelpers.assertThrows("Should reject commit with file not matching delete expression",
ValidationException.class, "Cannot append file with rows that do not match filter",
overwrite::commit);
Assert.assertEquals("Should not create a new snapshot",
baseId, table.currentSnapshot().snapshotId());
}
@Test
public void testValidatedOverwriteWithAppendSuccess() {
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
OverwriteFiles overwrite = table.newOverwrite()
.overwriteByRowFilter(and(equal("date", "2018-06-09"), lessThan("id", 20)))
.addFile(FILE_10_TO_14) // in 2018-06-09 matches and IDs are inside range
.validateAddedFiles();
AssertHelpers.assertThrows("Should reject commit with file not matching delete expression",
ValidationException.class, "Cannot append file with rows that do not match filter",
overwrite::commit);
Assert.assertEquals("Should not create a new snapshot",
baseId, table.currentSnapshot().snapshotId());
}
}
| 1,919 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/util/TestBinPacking.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.google.common.collect.Lists;
import com.netflix.iceberg.util.BinPacking.ListPacker;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
public class TestBinPacking {
@Test
public void testBasicBinPacking() {
Assert.assertEquals("Should pack the first 2 values",
l(l(1, 2), l(3), l(4), l(5)), pack(l(1, 2, 3, 4, 5), 3));
Assert.assertEquals("Should pack the first 2 values",
l(l(1, 2), l(3), l(4), l(5)), pack(l(1, 2, 3, 4, 5), 5));
Assert.assertEquals("Should pack the first 3 values",
l(l(1, 2, 3), l(4), l(5)), pack(l(1, 2, 3, 4, 5), 6));
Assert.assertEquals("Should pack the first 3 values",
l(l(1, 2, 3), l(4), l(5)), pack(l(1, 2, 3, 4, 5), 8));
Assert.assertEquals("Should pack the first 3 values, last 2 values",
l(l(1, 2, 3), l(4, 5)), pack(l(1, 2, 3, 4, 5), 9));
Assert.assertEquals("Should pack the first 4 values",
l(l(1, 2, 3, 4), l(5)), pack(l(1, 2, 3, 4, 5), 10));
Assert.assertEquals("Should pack the first 4 values",
l(l(1, 2, 3, 4), l(5)), pack(l(1, 2, 3, 4, 5), 14));
Assert.assertEquals("Should pack the first 5 values",
l(l(1, 2, 3, 4, 5)), pack(l(1, 2, 3, 4, 5), 15));
}
@Test
public void testReverseBinPackingSingleLookback() {
Assert.assertEquals("Should pack the first 2 values",
l(l(1, 2), l(3), l(4), l(5)), packEnd(l(1, 2, 3, 4, 5), 3, 1));
Assert.assertEquals("Should pack the first 2 values",
l(l(1, 2), l(3), l(4), l(5)), packEnd(l(1, 2, 3, 4, 5), 4, 1));
Assert.assertEquals("Should pack the second and third values",
l(l(1), l(2, 3), l(4), l(5)), packEnd(l(1, 2, 3, 4, 5), 5, 1));
Assert.assertEquals("Should pack the first 3 values",
l(l(1, 2, 3), l(4), l(5)), packEnd(l(1, 2, 3, 4, 5), 6, 1));
Assert.assertEquals("Should pack the first two pairs of values",
l(l(1, 2), l(3, 4), l(5)), packEnd(l(1, 2, 3, 4, 5), 7, 1));
Assert.assertEquals("Should pack the first two pairs of values",
l(l(1, 2), l(3, 4), l(5)), packEnd(l(1, 2, 3, 4, 5), 8, 1));
Assert.assertEquals("Should pack the first 3 values, last 2 values",
l(l(1, 2, 3), l(4, 5)), packEnd(l(1, 2, 3, 4, 5), 9, 1));
Assert.assertEquals("Should pack the first 3 values, last 2 values",
l(l(1, 2, 3), l(4, 5)), packEnd(l(1, 2, 3, 4, 5), 11, 1));
Assert.assertEquals("Should pack the first 3 values, last 2 values",
l(l(1, 2), l(3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 12, 1));
Assert.assertEquals("Should pack the last 4 values",
l(l(1), l(2, 3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 14, 1));
Assert.assertEquals("Should pack the first 5 values",
l(l(1, 2, 3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 15, 1));
}
@Test
public void testReverseBinPackingUnlimitedLookback() {
Assert.assertEquals("Should pack the first 2 values",
l(l(1, 2), l(3), l(4), l(5)), packEnd(l(1, 2, 3, 4, 5), 3));
Assert.assertEquals("Should pack 1 with 3",
l(l(2), l(1, 3), l(4), l(5)), packEnd(l(1, 2, 3, 4, 5), 4));
Assert.assertEquals("Should pack 2,3 and 1,4",
l(l(2, 3), l(1, 4), l(5)), packEnd(l(1, 2, 3, 4, 5), 5));
Assert.assertEquals("Should pack 2,4 and 1,5",
l(l(3), l(2, 4), l(1, 5)), packEnd(l(1, 2, 3, 4, 5), 6));
Assert.assertEquals("Should pack 3,4 and 2,5",
l(l(1), l(3, 4), l(2, 5)), packEnd(l(1, 2, 3, 4, 5), 7));
Assert.assertEquals("Should pack 1,2,3 and 3,5",
l(l(1, 2, 4), l(3, 5)), packEnd(l(1, 2, 3, 4, 5), 8));
Assert.assertEquals("Should pack the first 3 values, last 2 values",
l(l(1, 2, 3), l(4, 5)), packEnd(l(1, 2, 3, 4, 5), 9));
Assert.assertEquals("Should pack 2,3 and 1,4,5",
l(l(2, 3), l(1, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 10));
Assert.assertEquals("Should pack 1,3 and 2,4,5",
l(l(1, 3), l(2, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 11));
Assert.assertEquals("Should pack 1,2 and 3,4,5",
l(l(1, 2), l(3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 12));
Assert.assertEquals("Should pack 1,2 and 3,4,5",
l(l(2), l(1, 3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 13));
Assert.assertEquals("Should pack the last 4 values",
l(l(1), l(2, 3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 14));
Assert.assertEquals("Should pack the first 5 values",
l(l(1, 2, 3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 15));
}
@Test
public void testBinPackingLookBack() {
// lookback state:
// 1. [5]
// 2. [5, 1]
// 3. [5, 1], [5]
// 4. [5, 1, 1], [5]
// 5. [5, 1, 1], [5], [5]
// 6. [5, 1, 1, 1], [5], [5]
Assert.assertEquals("Unlimited look-back: should merge ones into first bin",
l(l(5, 1, 1, 1), l(5), l(5)), pack(l(5, 1, 5, 1, 5, 1), 8));
// lookback state:
// 1. [5]
// 2. [5, 1]
// 3. [5, 1], [5]
// 4. [5, 1, 1], [5]
// 5. [5], [5] ([5, 1, 1] drops out of look-back)
// 6. [5, 1], [5]
Assert.assertEquals("2 bin look-back: should merge two ones into first bin",
l(l(5, 1, 1), l(5, 1), l(5)), pack(l(5, 1, 5, 1, 5, 1), 8, 2));
// lookback state:
// 1. [5]
// 2. [5, 1]
// 3. [5] ([5, 1] drops out of look-back)
// 4. [5, 1]
// 5. [5] ([5, 1] #2 drops out of look-back)
// 6. [5, 1]
Assert.assertEquals("1 bin look-back: should merge ones with fives",
l(l(5, 1), l(5, 1), l(5, 1)), pack(l(5, 1, 5, 1, 5, 1), 8, 1));
}
private List<List<Integer>> pack(List<Integer> items, long targetWeight) {
return pack(items, targetWeight, Integer.MAX_VALUE);
}
private List<List<Integer>> pack(List<Integer> items, long targetWeight, int lookback) {
ListPacker<Integer> packer = new ListPacker<>(targetWeight, lookback);
return packer.pack(items, Integer::longValue);
}
private List<List<Integer>> packEnd(List<Integer> items, long targetWeight) {
return packEnd(items, targetWeight, Integer.MAX_VALUE);
}
private List<List<Integer>> packEnd(List<Integer> items, long targetWeight, int lookback) {
ListPacker<Integer> packer = new ListPacker<>(targetWeight, lookback);
return packer.packEnd(items, Integer::longValue);
}
private <T> List<T> l(T... items) {
return Lists.newArrayList(items);
}
}
| 1,920 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/AvroTestHelpers.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.CharSequenceWrapper;
import org.apache.avro.JsonProperties;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericData.Record;
import org.junit.Assert;
import java.time.LocalDate;
import java.time.temporal.ChronoUnit;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import static com.netflix.iceberg.avro.AvroSchemaUtil.toOption;
class AvroTestHelpers {
static Schema.Field optionalField(int id, String name, Schema schema) {
return addId(id, new Schema.Field(name, toOption(schema), null, JsonProperties.NULL_VALUE));
}
static Schema.Field requiredField(int id, String name, Schema schema) {
return addId(id, new Schema.Field(name, schema, null, null));
}
static Schema record(String name, Schema.Field... fields) {
return Schema.createRecord(name, null, null, false, Arrays.asList(fields));
}
static Schema.Field addId(int id, Schema.Field field) {
field.addProp(AvroSchemaUtil.FIELD_ID_PROP, id);
return field;
}
static Schema addElementId(int id, Schema schema) {
schema.addProp(AvroSchemaUtil.ELEMENT_ID_PROP, id);
return schema;
}
static Schema addKeyId(int id, Schema schema) {
schema.addProp(AvroSchemaUtil.KEY_ID_PROP, id);
return schema;
}
static Schema addValueId(int id, Schema schema) {
schema.addProp(AvroSchemaUtil.VALUE_ID_PROP, id);
return schema;
}
static void assertEquals(Types.StructType struct, Record expected, Record actual) {
List<Types.NestedField> fields = struct.fields();
for (int i = 0; i < fields.size(); i += 1) {
Type fieldType = fields.get(i).type();
Object expectedValue = expected.get(i);
Object actualValue = actual.get(i);
assertEquals(fieldType, expectedValue, actualValue);
}
}
static void assertEquals(Types.ListType list, List<?> expected, List<?> actual) {
Type elementType = list.elementType();
Assert.assertEquals("List size should match", expected.size(), actual.size());
for (int i = 0; i < expected.size(); i += 1) {
Object expectedValue = expected.get(i);
Object actualValue = actual.get(i);
assertEquals(elementType, expectedValue, actualValue);
}
}
static void assertEquals(Types.MapType map, Map<?, ?> expected, Map<?, ?> actual) {
Type valueType = map.valueType();
Assert.assertEquals("Map size should match", expected.size(), actual.size());
for (Object expectedKey : expected.keySet()) {
Object expectedValue = expected.get(expectedKey);
Object actualValue = actual.get(expectedKey);
assertEquals(valueType, expectedValue, actualValue);
}
}
private static void assertEquals(Type type, Object expected, Object actual) {
if (expected == null && actual == null) {
return;
}
switch (type.typeId()) {
case BOOLEAN:
case INTEGER:
case LONG:
case FLOAT:
case DOUBLE:
case STRING:
case DATE:
case TIME:
case TIMESTAMP:
case UUID:
case FIXED:
case BINARY:
case DECIMAL:
Assert.assertEquals("Primitive value should be equal to expected", expected, actual);
break;
case STRUCT:
Assert.assertTrue("Expected should be a Record", expected instanceof Record);
Assert.assertTrue("Actual should be a Record", actual instanceof Record);
assertEquals(type.asStructType(), (Record) expected, (Record) actual);
break;
case LIST:
Assert.assertTrue("Expected should be a List", expected instanceof List);
Assert.assertTrue("Actual should be a List", actual instanceof List);
assertEquals(type.asListType(), (List) expected, (List) actual);
break;
case MAP:
Assert.assertTrue("Expected should be a Map", expected instanceof Map);
Assert.assertTrue("Actual should be a Map", actual instanceof Map);
assertEquals(type.asMapType(), (Map<?, ?>) expected, (Map<?, ?>) actual);
break;
default:
throw new IllegalArgumentException("Not a supported type: " + type);
}
}
}
| 1,921 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/RandomAvroData.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericData.Record;
import org.apache.avro.util.Utf8;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.function.Supplier;
public class RandomAvroData {
public static List<Record> generate(Schema schema, int numRecords, long seed) {
RandomDataGenerator generator = new RandomDataGenerator(schema, seed);
List<Record> records = Lists.newArrayListWithExpectedSize(numRecords);
for (int i = 0; i < numRecords; i += 1) {
records.add((Record) TypeUtil.visit(schema, generator));
}
return records;
}
private static class RandomDataGenerator extends TypeUtil.CustomOrderSchemaVisitor<Object> {
private final Map<Type, org.apache.avro.Schema> typeToSchema;
private final Random random;
private RandomDataGenerator(Schema schema, long seed) {
this.typeToSchema = AvroSchemaUtil.convertTypes(schema.asStruct(), "test");
this.random = new Random(seed);
}
@Override
public Record schema(Schema schema, Supplier<Object> structResult) {
return (Record) structResult.get();
}
@Override
public Record struct(Types.StructType struct, Iterable<Object> fieldResults) {
Record rec = new Record(typeToSchema.get(struct));
List<Object> values = Lists.newArrayList(fieldResults);
for (int i = 0; i < values.size(); i += 1) {
rec.put(i, values.get(i));
}
return rec;
}
@Override
public Object field(Types.NestedField field, Supplier<Object> fieldResult) {
// return null 5% of the time when the value is optional
if (field.isOptional() && random.nextInt(20) == 1) {
return null;
}
return fieldResult.get();
}
@Override
public Object list(Types.ListType list, Supplier<Object> elementResult) {
int numElements = random.nextInt(20);
List<Object> result = Lists.newArrayListWithExpectedSize(numElements);
for (int i = 0; i < numElements; i += 1) {
// return null 5% of the time when the value is optional
if (list.isElementOptional() && random.nextInt(20) == 1) {
result.add(null);
} else {
result.add(elementResult.get());
}
}
return result;
}
@Override
public Object map(Types.MapType map, Supplier<Object> keyResult, Supplier<Object> valueResult) {
int numEntries = random.nextInt(20);
Map<Object, Object> result = Maps.newLinkedHashMap();
Supplier<Object> keyFunc;
if (map.keyType() == Types.StringType.get()) {
keyFunc = () -> keyResult.get().toString();
} else {
keyFunc = keyResult;
}
Set<Object> keySet = Sets.newHashSet();
for (int i = 0; i < numEntries; i += 1) {
Object key = keyFunc.get();
// ensure no collisions
while (keySet.contains(key)) {
key = keyFunc.get();
}
keySet.add(key);
// return null 5% of the time when the value is optional
if (map.isValueOptional() && random.nextInt(20) == 1) {
result.put(key, null);
} else {
result.put(key, valueResult.get());
}
}
return result;
}
@Override
public Object primitive(Type.PrimitiveType primitive) {
Object result = generatePrimitive(primitive, random);
// For the primitives that Avro needs a different type than Spark, fix
// them here.
switch (primitive.typeId()) {
case FIXED:
return new GenericData.Fixed(typeToSchema.get(primitive),
(byte[]) result);
case BINARY:
return ByteBuffer.wrap((byte[]) result);
case UUID:
return UUID.nameUUIDFromBytes((byte[]) result);
default:
return result;
}
}
}
private static Object generatePrimitive(Type.PrimitiveType primitive,
Random random) {
int choice = random.nextInt(20);
switch (primitive.typeId()) {
case BOOLEAN:
return choice < 10;
case INTEGER:
switch (choice) {
case 1:
return Integer.MIN_VALUE;
case 2:
return Integer.MAX_VALUE;
case 3:
return 0;
default:
return random.nextInt();
}
case LONG:
switch (choice) {
case 1:
return Long.MIN_VALUE;
case 2:
return Long.MAX_VALUE;
case 3:
return 0L;
default:
return random.nextLong();
}
case FLOAT:
switch (choice) {
case 1:
return Float.MIN_VALUE;
case 2:
return -Float.MIN_VALUE;
case 3:
return Float.MAX_VALUE;
case 4:
return -Float.MAX_VALUE;
case 5:
return Float.NEGATIVE_INFINITY;
case 6:
return Float.POSITIVE_INFINITY;
case 7:
return 0.0F;
case 8:
return Float.NaN;
default:
return random.nextFloat();
}
case DOUBLE:
switch (choice) {
case 1:
return Double.MIN_VALUE;
case 2:
return -Double.MIN_VALUE;
case 3:
return Double.MAX_VALUE;
case 4:
return -Double.MAX_VALUE;
case 5:
return Double.NEGATIVE_INFINITY;
case 6:
return Double.POSITIVE_INFINITY;
case 7:
return 0.0D;
case 8:
return Double.NaN;
default:
return random.nextDouble();
}
case DATE:
// this will include negative values (dates before 1970-01-01)
return random.nextInt() % ABOUT_380_YEARS_IN_DAYS;
case TIME:
return (random.nextLong() & Integer.MAX_VALUE) % ONE_DAY_IN_MICROS;
case TIMESTAMP:
return random.nextLong() % FIFTY_YEARS_IN_MICROS;
case STRING:
return randomString(random);
case UUID:
byte[] uuidBytes = new byte[16];
random.nextBytes(uuidBytes);
// this will hash the uuidBytes
return uuidBytes;
case FIXED:
byte[] fixed = new byte[((Types.FixedType) primitive).length()];
random.nextBytes(fixed);
return fixed;
case BINARY:
byte[] binary = new byte[random.nextInt(50)];
random.nextBytes(binary);
return binary;
case DECIMAL:
Types.DecimalType type = (Types.DecimalType) primitive;
BigInteger unscaled = randomUnscaled(type.precision(), random);
return new BigDecimal(unscaled, type.scale());
default:
throw new IllegalArgumentException(
"Cannot generate random value for unknown type: " + primitive);
}
}
private static final long FIFTY_YEARS_IN_MICROS =
(50L * (365 * 3 + 366) * 24 * 60 * 60 * 1_000_000) / 4;
private static final int ABOUT_380_YEARS_IN_DAYS = 380 * 365;
private static final long ONE_DAY_IN_MICROS = 24 * 60 * 60 * 1_000_000L;
private static final String CHARS =
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.!?";
private static Utf8 randomString(Random random) {
int length = random.nextInt(50);
byte[] buffer = new byte[length];
for (int i = 0; i < length; i += 1) {
buffer[i] = (byte) CHARS.charAt(random.nextInt(CHARS.length()));
}
return new Utf8(buffer);
}
private static final String DIGITS = "0123456789";
private static BigInteger randomUnscaled(int precision, Random random) {
int length = random.nextInt(precision);
if (length == 0) {
return BigInteger.ZERO;
}
StringBuilder sb = new StringBuilder();
for (int i = 0; i < length; i += 1) {
sb.append(DIGITS.charAt(random.nextInt(DIGITS.length())));
}
return new BigInteger(sb.toString());
}
}
| 1,922 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/TestGenericAvro.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Lists;
import com.netflix.iceberg.Files;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.io.FileAppender;
import org.apache.avro.generic.GenericData.Record;
import org.junit.Assert;
import java.io.File;
import java.io.IOException;
import java.util.List;
public class TestGenericAvro extends AvroDataTest {
protected void writeAndValidate(Schema schema) throws IOException {
List<Record> expected = RandomAvroData.generate(schema, 100, 0L);
File testFile = temp.newFile();
Assert.assertTrue("Delete should succeed", testFile.delete());
try (FileAppender<Record> writer = Avro.write(Files.localOutput(testFile))
.schema(schema)
.named("test")
.build()) {
for (Record rec : expected) {
writer.add(rec);
}
}
List<Record> rows;
try (AvroIterable<Record> reader = Avro.read(Files.localInput(testFile))
.project(schema)
.build()) {
rows = Lists.newArrayList(reader);
}
for (int i = 0; i < expected.size(); i += 1) {
AvroTestHelpers.assertEquals(schema.asStruct(), expected.get(i), rows.get(i));
}
}
}
| 1,923 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/TestSchemaConversions.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Lists;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
import static com.netflix.iceberg.avro.AvroTestHelpers.addElementId;
import static com.netflix.iceberg.avro.AvroTestHelpers.addKeyId;
import static com.netflix.iceberg.avro.AvroTestHelpers.addValueId;
import static com.netflix.iceberg.avro.AvroTestHelpers.optionalField;
import static com.netflix.iceberg.avro.AvroTestHelpers.record;
import static com.netflix.iceberg.avro.AvroTestHelpers.requiredField;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestSchemaConversions {
@Test
public void testPrimitiveTypes() {
List<Type> primitives = Lists.newArrayList(
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withZone(),
Types.TimestampType.withoutZone(),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(12),
Types.BinaryType.get(),
Types.DecimalType.of(9, 4)
);
List<Schema> avroPrimitives = Lists.newArrayList(
Schema.create(Schema.Type.BOOLEAN),
Schema.create(Schema.Type.INT),
Schema.create(Schema.Type.LONG),
Schema.create(Schema.Type.FLOAT),
Schema.create(Schema.Type.DOUBLE),
LogicalTypes.date().addToSchema(Schema.create(Schema.Type.INT)),
LogicalTypes.timeMicros().addToSchema(Schema.create(Schema.Type.LONG)),
addAdjustToUtc(LogicalTypes.timestampMicros().addToSchema(Schema.create(Schema.Type.LONG)), true),
addAdjustToUtc(LogicalTypes.timestampMicros().addToSchema(Schema.create(Schema.Type.LONG)), false),
Schema.create(Schema.Type.STRING),
LogicalTypes.uuid().addToSchema(Schema.createFixed("uuid_fixed", null, null, 16)),
Schema.createFixed("fixed_12", null, null, 12),
Schema.create(Schema.Type.BYTES),
LogicalTypes.decimal(9, 4).addToSchema(Schema.createFixed("decimal_9_4", null, null, 4))
);
for (int i = 0; i < primitives.size(); i += 1) {
Type type = primitives.get(i);
Schema avro = avroPrimitives.get(i);
Assert.assertEquals("Avro schema to primitive: " + avro,
type, AvroSchemaUtil.convert(avro));
Assert.assertEquals("Primitive to avro schema: " + type,
avro, AvroSchemaUtil.convert(type));
}
}
private Schema addAdjustToUtc(Schema schema, boolean adjustToUTC) {
schema.addProp(AvroSchemaUtil.ADJUST_TO_UTC_PROP, adjustToUTC);
return schema;
}
@Test
public void testStructAndPrimitiveTypes() {
Types.StructType struct = Types.StructType.of(
optional(20, "bool", Types.BooleanType.get()),
optional(21, "int", Types.IntegerType.get()),
optional(22, "long", Types.LongType.get()),
optional(23, "float", Types.FloatType.get()),
optional(24, "double", Types.DoubleType.get()),
optional(25, "date", Types.DateType.get()),
optional(27, "time", Types.TimeType.get()),
optional(28, "timestamptz", Types.TimestampType.withZone()),
optional(29, "timestamp", Types.TimestampType.withoutZone()),
optional(30, "string", Types.StringType.get()),
optional(31, "uuid", Types.UUIDType.get()),
optional(32, "fixed", Types.FixedType.ofLength(16)),
optional(33, "binary", Types.BinaryType.get()),
optional(34, "decimal", Types.DecimalType.of(14, 2))
);
Schema schema = record("primitives",
optionalField(20, "bool", Schema.create(Schema.Type.BOOLEAN)),
optionalField(21, "int", Schema.create(Schema.Type.INT)),
optionalField(22, "long", Schema.create(Schema.Type.LONG)),
optionalField(23, "float", Schema.create(Schema.Type.FLOAT)),
optionalField(24, "double", Schema.create(Schema.Type.DOUBLE)),
optionalField(25, "date", LogicalTypes.date().addToSchema(Schema.create(Schema.Type.INT))),
optionalField(27, "time", LogicalTypes.timeMicros().addToSchema(Schema.create(Schema.Type.LONG))),
optionalField(28, "timestamptz", addAdjustToUtc(LogicalTypes.timestampMicros().addToSchema(Schema.create(Schema.Type.LONG)), true)),
optionalField(29, "timestamp", addAdjustToUtc(LogicalTypes.timestampMicros().addToSchema(Schema.create(Schema.Type.LONG)), false)),
optionalField(30, "string", Schema.create(Schema.Type.STRING)),
optionalField(31, "uuid", LogicalTypes.uuid().addToSchema(Schema.createFixed("uuid_fixed", null, null, 16))),
optionalField(32, "fixed", Schema.createFixed("fixed_16", null, null, 16)),
optionalField(33, "binary", Schema.create(Schema.Type.BYTES)),
optionalField(34, "decimal", LogicalTypes.decimal(14, 2).addToSchema(Schema.createFixed("decimal_14_2", null, null, 6)))
);
Assert.assertEquals("Test conversion from Avro schema",
struct, AvroSchemaUtil.convert(schema));
Assert.assertEquals("Test conversion to Avro schema",
schema, AvroSchemaUtil.convert(struct, "primitives"));
}
@Test
public void testList() {
Type list = Types.ListType.ofRequired(34, Types.UUIDType.get());
Schema schema = addElementId(34, SchemaBuilder.array().items(
LogicalTypes.uuid().addToSchema(Schema.createFixed("uuid_fixed", null, null, 16))));
Assert.assertEquals("Avro schema to list",
list, AvroSchemaUtil.convert(schema));
Assert.assertEquals("List to Avro schema",
schema, AvroSchemaUtil.convert(list));
}
@Test
public void testListOfStructs() {
Type list = Types.ListType.ofRequired(34, Types.StructType.of(
required(35, "lat", Types.FloatType.get()),
required(36, "long", Types.FloatType.get())
));
Schema schema = addElementId(34, SchemaBuilder.array().items(
record("r34",
requiredField(35, "lat", Schema.create(Schema.Type.FLOAT)),
requiredField(36, "long", Schema.create(Schema.Type.FLOAT)))
));
Assert.assertEquals("Avro schema to list",
list, AvroSchemaUtil.convert(schema));
Assert.assertEquals("List to Avro schema",
schema, AvroSchemaUtil.convert(list));
}
@Test
public void testMapOfLongToBytes() {
Type map = Types.MapType.ofRequired(33, 34, Types.LongType.get(), Types.BinaryType.get());
Schema schema = AvroSchemaUtil.createMap(
33, Schema.create(Schema.Type.LONG),
34, Schema.create(Schema.Type.BYTES));
Assert.assertEquals("Avro schema to map",
map, AvroSchemaUtil.convert(schema));
Assert.assertEquals("Map to Avro schema",
schema, AvroSchemaUtil.convert(map));
}
@Test
public void testMapOfStringToBytes() {
Type map = Types.MapType.ofRequired(33, 34, Types.StringType.get(), Types.BinaryType.get());
Schema schema = addKeyId(33, addValueId(34, SchemaBuilder.map().values(
Schema.create(Schema.Type.BYTES))));
Assert.assertEquals("Avro schema to map",
map, AvroSchemaUtil.convert(schema));
Assert.assertEquals("Map to Avro schema",
schema, AvroSchemaUtil.convert(map));
}
@Test
public void testMapOfListToStructs() {
Type map = Types.MapType.ofRequired(33, 34,
Types.ListType.ofRequired(35, Types.IntegerType.get()),
Types.StructType.of(
required(36, "a", Types.IntegerType.get()),
optional(37, "b", Types.IntegerType.get())
));
Schema schema = AvroSchemaUtil.createMap(
33, addElementId(35, Schema.createArray(Schema.create(Schema.Type.INT))),
34, record("r34",
requiredField(36, "a", Schema.create(Schema.Type.INT)),
optionalField(37, "b", Schema.create(Schema.Type.INT))));
Assert.assertEquals("Avro schema to map",
map, AvroSchemaUtil.convert(schema));
Assert.assertEquals("Map to Avro schema",
schema, AvroSchemaUtil.convert(map));
}
@Test
public void testMapOfStringToStructs() {
Type map = Types.MapType.ofRequired(33, 34, Types.StringType.get(), Types.StructType.of(
required(35, "a", Types.IntegerType.get()),
optional(36, "b", Types.IntegerType.get())
));
Schema schema = addKeyId(33, addValueId(34, SchemaBuilder.map().values(
record("r34",
requiredField(35, "a", Schema.create(Schema.Type.INT)),
optionalField(36, "b", Schema.create(Schema.Type.INT))))));
Assert.assertEquals("Avro schema to map",
map, AvroSchemaUtil.convert(schema));
Assert.assertEquals("Map to Avro schema",
schema, AvroSchemaUtil.convert(map));
}
@Test
public void testComplexSchema() {
com.netflix.iceberg.Schema schema = new com.netflix.iceberg.Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "data", Types.StringType.get()),
optional(
3,
"preferences",
Types.StructType
.of(required(8, "feature1", Types.BooleanType.get()), optional(9, "feature2", Types.BooleanType.get()))),
required(
4,
"locations",
Types.MapType.ofRequired(
10,
11,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(required(12, "lat", Types.FloatType.get()), required(13, "long", Types.FloatType.get()))
)
),
optional(
5,
"points",
Types.ListType.ofOptional(
14,
Types.StructType.of(required(15, "x", Types.LongType.get()), required(16, "y", Types.LongType.get())))),
required(6, "doubles", Types.ListType.ofRequired(17, Types.DoubleType.get())),
optional(7, "properties", Types.MapType.ofOptional(18, 19, Types.StringType.get(), Types.StringType.get())));
AvroSchemaUtil.convert(schema, "newTableName").toString(true);
}
}
| 1,924 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/TestReadProjection.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.types.Comparators;
import com.netflix.iceberg.types.Types;
import org.apache.avro.generic.GenericData.Record;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public abstract class TestReadProjection {
protected abstract Record writeAndRead(String desc,
Schema writeSchema,
Schema readSchema,
Record record) throws IOException;
@Rule
public TemporaryFolder temp = new TemporaryFolder();
@Test
public void testFullProjection() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get())
);
Record record = new Record(AvroSchemaUtil.convert(schema, "table"));
record.put("id", 34L);
record.put("data", "test");
Record projected = writeAndRead("full_projection", schema, schema, record);
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
int cmp = Comparators.charSequences()
.compare("test", (CharSequence) projected.get("data"));
Assert.assertTrue("Should contain the correct data value", cmp == 0);
}
@Test
public void testReorderedFullProjection() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get())
);
Record record = new Record(AvroSchemaUtil.convert(schema, "table"));
record.put("id", 34L);
record.put("data", "test");
Schema reordered = new Schema(
Types.NestedField.optional(1, "data", Types.StringType.get()),
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("full_projection", schema, reordered, record);
Assert.assertEquals("Should contain the correct 0 value", "test", projected.get(0).toString());
Assert.assertEquals("Should contain the correct 1 value", 34L, projected.get(1));
}
@Test
public void testReorderedProjection() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get())
);
Record record = new Record(AvroSchemaUtil.convert(schema, "table"));
record.put("id", 34L);
record.put("data", "test");
Schema reordered = new Schema(
Types.NestedField.optional(2, "missing_1", Types.StringType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get()),
Types.NestedField.optional(3, "missing_2", Types.LongType.get())
);
Record projected = writeAndRead("full_projection", schema, reordered, record);
Assert.assertNull("Should contain the correct 0 value", projected.get(0));
Assert.assertEquals("Should contain the correct 1 value", "test", projected.get(1).toString());
Assert.assertNull("Should contain the correct 2 value", projected.get(2));
}
@Test
public void testEmptyProjection() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get())
);
Record record = new Record(AvroSchemaUtil.convert(schema, "table"));
record.put("id", 34L);
record.put("data", "test");
Record projected = writeAndRead("empty_projection", schema, schema.select(), record);
Assert.assertNotNull("Should read a non-null record", projected);
try {
projected.get(0);
Assert.fail("Should not retrieve value with ordinal 0");
} catch (ArrayIndexOutOfBoundsException e) {
// this is expected because there are no values
}
}
@Test
public void testBasicProjection() throws Exception {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get())
);
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
record.put("data", "test");
Schema idOnly = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("basic_projection_id", writeSchema, idOnly, record);
Assert.assertNull("Should not project data", projected.get("data"));
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
Schema dataOnly = new Schema(
Types.NestedField.optional(1, "data", Types.StringType.get())
);
projected = writeAndRead("basic_projection_data", writeSchema, dataOnly, record);
Assert.assertNull("Should not project id", projected.get("id"));
int cmp = Comparators.charSequences()
.compare("test", (CharSequence) projected.get("data"));
Assert.assertTrue("Should contain the correct data value", cmp == 0);
}
@Test
public void testRename() throws Exception {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get())
);
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
record.put("data", "test");
Schema readSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "renamed", Types.StringType.get())
);
Record projected = writeAndRead("project_and_rename", writeSchema, readSchema, record);
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
int cmp = Comparators.charSequences()
.compare("test", (CharSequence) projected.get("renamed"));
Assert.assertTrue("Should contain the correct data/renamed value", cmp == 0);
}
@Test
public void testNestedStructProjection() throws Exception {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(3, "location", Types.StructType.of(
Types.NestedField.required(1, "lat", Types.FloatType.get()),
Types.NestedField.required(2, "long", Types.FloatType.get())
))
);
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
Record location = new Record(
AvroSchemaUtil.fromOption(record.getSchema().getField("location").schema()));
location.put("lat", 52.995143f);
location.put("long", -1.539054f);
record.put("location", location);
Schema idOnly = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("id_only", writeSchema, idOnly, record);
Record projectedLocation = (Record) projected.get("location");
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
Assert.assertNull("Should not project location", projectedLocation);
Schema latOnly = new Schema(
Types.NestedField.optional(3, "location", Types.StructType.of(
Types.NestedField.required(1, "lat", Types.FloatType.get())
))
);
projected = writeAndRead("latitude_only", writeSchema, latOnly, record);
projectedLocation = (Record) projected.get("location");
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertNotNull("Should project location", projected.get("location"));
Assert.assertNull("Should not project longitude", projectedLocation.get("long"));
Assert.assertEquals("Should project latitude",
52.995143f, (float) projectedLocation.get("lat"), 0.000001f);
Schema longOnly = new Schema(
Types.NestedField.optional(3, "location", Types.StructType.of(
Types.NestedField.required(2, "long", Types.FloatType.get())
))
);
projected = writeAndRead("longitude_only", writeSchema, longOnly, record);
projectedLocation = (Record) projected.get("location");
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertNotNull("Should project location", projected.get("location"));
Assert.assertNull("Should not project latitutde", projectedLocation.get("lat"));
Assert.assertEquals("Should project longitude",
-1.539054f, (float) projectedLocation.get("long"), 0.000001f);
Schema locationOnly = writeSchema.select("location");
projected = writeAndRead("location_only", writeSchema, locationOnly, record);
projectedLocation = (Record) projected.get("location");
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertNotNull("Should project location", projected.get("location"));
Assert.assertEquals("Should project latitude",
52.995143f, (float) projectedLocation.get("lat"), 0.000001f);
Assert.assertEquals("Should project longitude",
-1.539054f, (float) projectedLocation.get("long"), 0.000001f);
}
@Test
public void testMapProjection() throws IOException {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(5, "properties",
Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StringType.get()))
);
Map<String, String> properties = ImmutableMap.of("a", "A", "b", "B");
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
record.put("properties", properties);
Schema idOnly = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("id_only", writeSchema, idOnly, record);
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
Assert.assertNull("Should not project properties map", projected.get("properties"));
Schema keyOnly = writeSchema.select("properties.key");
projected = writeAndRead("key_only", writeSchema, keyOnly, record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project entire map",
properties, toStringMap((Map) projected.get("properties")));
Schema valueOnly = writeSchema.select("properties.value");
projected = writeAndRead("value_only", writeSchema, valueOnly, record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project entire map",
properties, toStringMap((Map) projected.get("properties")));
Schema mapOnly = writeSchema.select("properties");
projected = writeAndRead("map_only", writeSchema, mapOnly, record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project entire map",
properties, toStringMap((Map) projected.get("properties")));
}
private Map<String, ?> toStringMap(Map<?, ?> map) {
Map<String, Object> stringMap = Maps.newHashMap();
for (Map.Entry<?, ?> entry : map.entrySet()) {
if (entry.getValue() instanceof CharSequence) {
stringMap.put(entry.getKey().toString(), entry.getValue().toString());
} else {
stringMap.put(entry.getKey().toString(), entry.getValue());
}
}
return stringMap;
}
@Test
public void testMapOfStructsProjection() throws IOException {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7,
Types.StringType.get(),
Types.StructType.of(
Types.NestedField.required(1, "lat", Types.FloatType.get()),
Types.NestedField.required(2, "long", Types.FloatType.get())
)
))
);
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
Record l1 = new Record(AvroSchemaUtil.fromOption(
AvroSchemaUtil.fromOption(record.getSchema().getField("locations").schema())
.getValueType()));
l1.put("lat", 53.992811f);
l1.put("long", -1.542616f);
Record l2 = new Record(l1.getSchema());
l2.put("lat", 52.995143f);
l2.put("long", -1.539054f);
record.put("locations", ImmutableMap.of("L1", l1, "L2", l2));
Schema idOnly = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("id_only", writeSchema, idOnly, record);
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
Assert.assertNull("Should not project locations map", projected.get("locations"));
projected = writeAndRead("all_locations", writeSchema, writeSchema.select("locations"), record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project locations map",
record.get("locations"), toStringMap((Map) projected.get("locations")));
projected = writeAndRead("lat_only",
writeSchema, writeSchema.select("locations.lat"), record);
Assert.assertNull("Should not project id", projected.get("id"));
Map<String, ?> locations = toStringMap((Map) projected.get("locations"));
Assert.assertNotNull("Should project locations map", locations);
Assert.assertEquals("Should contain L1 and L2",
Sets.newHashSet("L1", "L2"), locations.keySet());
Record projectedL1 = (Record) locations.get("L1");
Assert.assertNotNull("L1 should not be null", projectedL1);
Assert.assertEquals("L1 should contain lat",
53.992811f, (float) projectedL1.get("lat"), 0.000001);
Assert.assertNull("L1 should not contain long", projectedL1.get("long"));
Record projectedL2 = (Record) locations.get("L2");
Assert.assertNotNull("L2 should not be null", projectedL2);
Assert.assertEquals("L2 should contain lat",
52.995143f, (float) projectedL2.get("lat"), 0.000001);
Assert.assertNull("L2 should not contain long", projectedL2.get("long"));
projected = writeAndRead("long_only",
writeSchema, writeSchema.select("locations.long"), record);
Assert.assertNull("Should not project id", projected.get("id"));
locations = toStringMap((Map) projected.get("locations"));
Assert.assertNotNull("Should project locations map", locations);
Assert.assertEquals("Should contain L1 and L2",
Sets.newHashSet("L1", "L2"), locations.keySet());
projectedL1 = (Record) locations.get("L1");
Assert.assertNotNull("L1 should not be null", projectedL1);
Assert.assertNull("L1 should not contain lat", projectedL1.get("lat"));
Assert.assertEquals("L1 should contain long",
-1.542616f, (float) projectedL1.get("long"), 0.000001);
projectedL2 = (Record) locations.get("L2");
Assert.assertNotNull("L2 should not be null", projectedL2);
Assert.assertNull("L2 should not contain lat", projectedL2.get("lat"));
Assert.assertEquals("L2 should contain long",
-1.539054f, (float) projectedL2.get("long"), 0.000001);
Schema latitiudeRenamed = new Schema(
Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7,
Types.StringType.get(),
Types.StructType.of(
Types.NestedField.required(1, "latitude", Types.FloatType.get())
)
))
);
projected = writeAndRead("latitude_renamed", writeSchema, latitiudeRenamed, record);
Assert.assertNull("Should not project id", projected.get("id"));
locations = toStringMap((Map) projected.get("locations"));
Assert.assertNotNull("Should project locations map", locations);
Assert.assertEquals("Should contain L1 and L2",
Sets.newHashSet("L1", "L2"), locations.keySet());
projectedL1 = (Record) locations.get("L1");
Assert.assertNotNull("L1 should not be null", projectedL1);
Assert.assertEquals("L1 should contain latitude",
53.992811f, (float) projectedL1.get("latitude"), 0.000001);
Assert.assertNull("L1 should not contain lat", projectedL1.get("lat"));
Assert.assertNull("L1 should not contain long", projectedL1.get("long"));
projectedL2 = (Record) locations.get("L2");
Assert.assertNotNull("L2 should not be null", projectedL2);
Assert.assertEquals("L2 should contain latitude",
52.995143f, (float) projectedL2.get("latitude"), 0.000001);
Assert.assertNull("L2 should not contain lat", projectedL2.get("lat"));
Assert.assertNull("L2 should not contain long", projectedL2.get("long"));
}
@Test
public void testListProjection() throws IOException {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(10, "values",
Types.ListType.ofOptional(11, Types.LongType.get()))
);
List<Long> values = ImmutableList.of(56L, 57L, 58L);
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
record.put("values", values);
Schema idOnly = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("id_only", writeSchema, idOnly, record);
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
Assert.assertNull("Should not project values list", projected.get("values"));
Schema elementOnly = writeSchema.select("values.element");
projected = writeAndRead("element_only", writeSchema, elementOnly, record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project entire list", values, projected.get("values"));
Schema listOnly = writeSchema.select("values");
projected = writeAndRead("list_only", writeSchema, listOnly, record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project entire list", values, projected.get("values"));
}
@Test
@SuppressWarnings("unchecked")
public void testListOfStructsProjection() throws IOException {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(22, "points",
Types.ListType.ofOptional(21, Types.StructType.of(
Types.NestedField.required(19, "x", Types.IntegerType.get()),
Types.NestedField.optional(18, "y", Types.IntegerType.get())
))
)
);
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
Record p1 = new Record(AvroSchemaUtil.fromOption(
AvroSchemaUtil.fromOption(record.getSchema().getField("points").schema())
.getElementType()));
p1.put("x", 1);
p1.put("y", 2);
Record p2 = new Record(p1.getSchema());
p2.put("x", 3);
p2.put("y", null);
record.put("points", ImmutableList.of(p1, p2));
Schema idOnly = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("id_only", writeSchema, idOnly, record);
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
Assert.assertNull("Should not project points list", projected.get("points"));
projected = writeAndRead("all_points", writeSchema, writeSchema.select("points"), record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project points list",
record.get("points"), projected.get("points"));
projected = writeAndRead("x_only", writeSchema, writeSchema.select("points.x"), record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertNotNull("Should project points list", projected.get("points"));
List<Record> points = (List<Record>) projected.get("points");
Assert.assertEquals("Should read 2 points", 2, points.size());
Record projectedP1 = points.get(0);
Assert.assertEquals("Should project x", 1, (int) projectedP1.get("x"));
Assert.assertNull("Should not project y", projectedP1.get("y"));
Record projectedP2 = points.get(1);
Assert.assertEquals("Should project x", 3, (int) projectedP2.get("x"));
Assert.assertNull("Should not project y", projectedP2.get("y"));
projected = writeAndRead("y_only", writeSchema, writeSchema.select("points.y"), record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertNotNull("Should project points list", projected.get("points"));
points = (List<Record>) projected.get("points");
Assert.assertEquals("Should read 2 points", 2, points.size());
projectedP1 = points.get(0);
Assert.assertNull("Should not project x", projectedP1.get("x"));
Assert.assertEquals("Should project y", 2, (int) projectedP1.get("y"));
projectedP2 = points.get(1);
Assert.assertNull("Should not project x", projectedP2.get("x"));
Assert.assertEquals("Should project null y", null, projectedP2.get("y"));
Schema yRenamed = new Schema(
Types.NestedField.optional(22, "points",
Types.ListType.ofOptional(21, Types.StructType.of(
Types.NestedField.optional(18, "z", Types.IntegerType.get())
))
)
);
projected = writeAndRead("y_renamed", writeSchema, yRenamed, record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertNotNull("Should project points list", projected.get("points"));
points = (List<Record>) projected.get("points");
Assert.assertEquals("Should read 2 points", 2, points.size());
projectedP1 = points.get(0);
Assert.assertNull("Should not project x", projectedP1.get("x"));
Assert.assertNull("Should not project y", projectedP1.get("y"));
Assert.assertEquals("Should project z", 2, (int) projectedP1.get("z"));
projectedP2 = points.get(1);
Assert.assertNull("Should not project x", projectedP2.get("x"));
Assert.assertNull("Should not project y", projectedP2.get("y"));
Assert.assertEquals("Should project null z", null, projectedP2.get("z"));
}
}
| 1,925 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/AvroDataTest.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.types.Types.ListType;
import com.netflix.iceberg.types.Types.LongType;
import com.netflix.iceberg.types.Types.MapType;
import com.netflix.iceberg.types.Types.StructType;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.IOException;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public abstract class AvroDataTest {
protected abstract void writeAndValidate(Schema schema) throws IOException;
private static final StructType SUPPORTED_PRIMITIVES = StructType.of(
required(100, "id", LongType.get()),
optional(101, "data", Types.StringType.get()),
required(102, "b", Types.BooleanType.get()),
optional(103, "i", Types.IntegerType.get()),
required(104, "l", LongType.get()),
optional(105, "f", Types.FloatType.get()),
required(106, "d", Types.DoubleType.get()),
optional(107, "date", Types.DateType.get()),
required(108, "ts", Types.TimestampType.withZone()),
required(110, "s", Types.StringType.get()),
required(111, "uuid", Types.UUIDType.get()),
required(112, "fixed", Types.FixedType.ofLength(7)),
optional(113, "bytes", Types.BinaryType.get()),
required(114, "dec_9_0", Types.DecimalType.of(9, 0)),
required(115, "dec_11_2", Types.DecimalType.of(11, 2)),
required(116, "dec_38_10", Types.DecimalType.of(38, 10)) // maximum precision
);
@Rule
public TemporaryFolder temp = new TemporaryFolder();
@Test
public void testSimpleStruct() throws IOException {
writeAndValidate(new Schema(SUPPORTED_PRIMITIVES.fields()));
}
@Test
public void testArray() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "data", ListType.ofOptional(2, Types.StringType.get())));
writeAndValidate(schema);
}
@Test
public void testArrayOfStructs() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "data", ListType.ofOptional(2, SUPPORTED_PRIMITIVES)));
writeAndValidate(schema);
}
@Test
public void testMap() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "data", MapType.ofOptional(2, 3,
Types.StringType.get(),
Types.StringType.get())));
writeAndValidate(schema);
}
@Test
public void testNumericMapKey() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "data", MapType.ofOptional(2, 3,
Types.LongType.get(),
Types.StringType.get())));
writeAndValidate(schema);
}
@Test
public void testComplexMapKey() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "data", MapType.ofOptional(2, 3,
Types.StructType.of(
required(4, "i", Types.IntegerType.get()),
optional(5, "s", Types.StringType.get())),
Types.StringType.get())));
writeAndValidate(schema);
}
@Test
public void testMapOfStructs() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "data", MapType.ofOptional(2, 3,
Types.StringType.get(),
SUPPORTED_PRIMITIVES)));
writeAndValidate(schema);
}
@Test
public void testMixedTypes() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "list_of_maps",
ListType.ofOptional(2, MapType.ofOptional(3, 4,
Types.StringType.get(),
SUPPORTED_PRIMITIVES))),
optional(5, "map_of_lists",
MapType.ofOptional(6, 7,
Types.StringType.get(),
ListType.ofOptional(8, SUPPORTED_PRIMITIVES))),
required(9, "list_of_lists",
ListType.ofOptional(10, ListType.ofOptional(11, SUPPORTED_PRIMITIVES))),
required(12, "map_of_maps",
MapType.ofOptional(13, 14,
Types.StringType.get(),
MapType.ofOptional(15, 16,
Types.StringType.get(),
SUPPORTED_PRIMITIVES))),
required(17, "list_of_struct_of_nested_types", ListType.ofOptional(19, StructType.of(
Types.NestedField.required(20, "m1", MapType.ofOptional(21, 22,
Types.StringType.get(),
SUPPORTED_PRIMITIVES)),
Types.NestedField.optional(23, "l1", ListType.ofRequired(24, SUPPORTED_PRIMITIVES)),
Types.NestedField.required(25, "l2", ListType.ofRequired(26, SUPPORTED_PRIMITIVES)),
Types.NestedField.optional(27, "m2", MapType.ofOptional(28, 29,
Types.StringType.get(),
SUPPORTED_PRIMITIVES))
)))
);
writeAndValidate(schema);
}
}
| 1,926 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/TestAvroReadProjection.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Iterables;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.Files;
import com.netflix.iceberg.io.FileAppender;
import org.apache.avro.generic.GenericData;
import java.io.File;
import java.io.IOException;
public class TestAvroReadProjection extends TestReadProjection {
protected GenericData.Record writeAndRead(String desc,
Schema writeSchema,
Schema readSchema,
GenericData.Record record)
throws IOException {
File file = temp.newFile(desc + ".avro");
file.delete();
try (FileAppender<GenericData.Record> appender = Avro.write(Files.localOutput(file))
.schema(writeSchema)
.build()) {
appender.add(record);
}
Iterable<GenericData.Record> records = Avro.read(Files.localInput(file))
.project(readSchema)
.build();
return Iterables.getOnlyElement(records);
}
}
| 1,927 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/hadoop/HadoopTableTestBase.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import com.netflix.iceberg.DataFile;
import com.netflix.iceberg.DataFiles;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.Table;
import com.netflix.iceberg.TableMetadata;
import com.netflix.iceberg.TableMetadataParser;
import com.netflix.iceberg.TestTables;
import com.netflix.iceberg.types.Types;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.io.IOException;
import java.util.List;
import static com.netflix.iceberg.Files.localInput;
import static com.netflix.iceberg.TableMetadataParser.getFileExtension;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class HadoopTableTestBase {
// Schema passed to create tables
static final Schema SCHEMA = new Schema(
required(3, "id", Types.IntegerType.get()),
required(4, "data", Types.StringType.get())
);
// This is the actual schema for the table, with column IDs reassigned
static final Schema TABLE_SCHEMA = new Schema(
required(1, "id", Types.IntegerType.get()),
required(2, "data", Types.StringType.get())
);
static final Schema UPDATED_SCHEMA = new Schema(
required(1, "id", Types.IntegerType.get()),
required(2, "data", Types.StringType.get()),
optional(3, "n", Types.IntegerType.get())
);
// Partition spec used to create tables
static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA)
.bucket("data", 16)
.build();
static final HadoopTables TABLES = new HadoopTables(new Configuration());
static final DataFile FILE_A = DataFiles.builder(SPEC)
.withPath("/path/to/data-a.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=0") // easy way to set partition data for now
.withRecordCount(2) // needs at least one record or else metrics will filter it out
.build();
static final DataFile FILE_B = DataFiles.builder(SPEC)
.withPath("/path/to/data-b.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=1") // easy way to set partition data for now
.withRecordCount(2) // needs at least one record or else metrics will filter it out
.build();
static final DataFile FILE_C = DataFiles.builder(SPEC)
.withPath("/path/to/data-a.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=2") // easy way to set partition data for now
.withRecordCount(2) // needs at least one record or else metrics will filter it out
.build();
static final DataFile FILE_D = DataFiles.builder(SPEC)
.withPath("/path/to/data-a.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=3") // easy way to set partition data for now
.withRecordCount(2) // needs at least one record or else metrics will filter it out
.build();
@Rule
public TemporaryFolder temp = new TemporaryFolder();
File tableDir = null;
String tableLocation = null;
File metadataDir = null;
File versionHintFile = null;
Table table = null;
@Before
public void setupTable() throws Exception {
this.tableDir = temp.newFolder();
tableDir.delete(); // created by table create
this.tableLocation = tableDir.toURI().toString();
this.metadataDir = new File(tableDir, "metadata");
this.versionHintFile = new File(metadataDir, "version-hint.text");
this.table = TABLES.create(SCHEMA, SPEC, tableLocation);
}
List<File> listManifestFiles() {
return Lists.newArrayList(metadataDir.listFiles((dir, name) ->
!name.startsWith("snap") && Files.getFileExtension(name).equalsIgnoreCase("avro")));
}
File version(int i) {
return new File(metadataDir, "v" + i + getFileExtension(new Configuration()));
}
TableMetadata readMetadataVersion(int version) {
return TableMetadataParser.read(new TestTables.TestTableOperations("table", tableDir),
localInput(version(version)));
}
int readVersionHint() throws IOException {
return Integer.parseInt(Files.readFirstLine(versionHintFile, Charsets.UTF_8));
}
void replaceVersionHint(int version) throws IOException {
// remove the checksum that will no longer match
new File(metadataDir, ".version-hint.text.crc").delete();
Files.write(String.valueOf(version), versionHintFile, Charsets.UTF_8);
}
}
| 1,928 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/hadoop/TestHadoopCommits.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.google.common.collect.Lists;
import com.netflix.iceberg.AssertHelpers;
import com.netflix.iceberg.FileScanTask;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Table;
import com.netflix.iceberg.TableMetadata;
import com.netflix.iceberg.UpdateSchema;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.util.List;
public class TestHadoopCommits extends HadoopTableTestBase {
@Test
public void testCreateTable() throws Exception {
PartitionSpec expectedSpec = PartitionSpec.builderFor(TABLE_SCHEMA)
.bucket("data", 16)
.build();
Assert.assertEquals("Table schema should match schema with reassigned ids",
TABLE_SCHEMA.asStruct(), table.schema().asStruct());
Assert.assertEquals("Table partition spec should match with reassigned ids",
expectedSpec, table.spec());
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
Assert.assertEquals("Should not create any scan tasks", 0, tasks.size());
Assert.assertTrue("Table location should exist",
tableDir.exists());
Assert.assertTrue("Should create metadata folder",
metadataDir.exists() && metadataDir.isDirectory());
Assert.assertTrue("Should create v1 metadata",
version(1).exists() && version(1).isFile());
Assert.assertFalse("Should not create v2 or newer verions",
version(2).exists());
Assert.assertTrue("Should create version hint file",
versionHintFile.exists());
Assert.assertEquals("Should write the current version to the hint file",
1, readVersionHint());
List<File> manifests = listManifestFiles();
Assert.assertEquals("Should contain 0 Avro manifest files", 0, manifests.size());
}
@Test
public void testSchemaUpdate() throws Exception {
Assert.assertTrue("Should create v1 metadata",
version(1).exists() && version(1).isFile());
Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
table.updateSchema()
.addColumn("n", Types.IntegerType.get())
.commit();
Assert.assertTrue("Should create v2 for the update",
version(2).exists() && version(2).isFile());
Assert.assertEquals("Should write the current version to the hint file",
2, readVersionHint());
Assert.assertEquals("Table schema should match schema with reassigned ids",
UPDATED_SCHEMA.asStruct(), table.schema().asStruct());
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
Assert.assertEquals("Should not create any scan tasks", 0, tasks.size());
List<File> manifests = listManifestFiles();
Assert.assertEquals("Should contain 0 Avro manifest files", 0, manifests.size());
}
@Test
public void testFailedCommit() throws Exception {
// apply the change to metadata without committing
UpdateSchema update = table.updateSchema().addColumn("n", Types.IntegerType.get());
update.apply();
Assert.assertTrue("Should create v1 metadata",
version(1).exists() && version(1).isFile());
Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
version(2).createNewFile();
AssertHelpers.assertThrows("Should fail to commit change based on v1 when v2 exists",
CommitFailedException.class, "Version 2 already exists", update::commit);
List<File> manifests = listManifestFiles();
Assert.assertEquals("Should contain 0 Avro manifest files", 0, manifests.size());
}
@Test
public void testStaleMetadata() throws Exception {
Table tableCopy = TABLES.load(tableLocation);
Assert.assertTrue("Should create v1 metadata",
version(1).exists() && version(1).isFile());
Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
// prepare changes on the copy without committing
UpdateSchema updateCopy = tableCopy.updateSchema()
.addColumn("m", Types.IntegerType.get());
updateCopy.apply();
table.updateSchema()
.addColumn("n", Types.IntegerType.get())
.commit();
Assert.assertTrue("Should create v2 for the update",
version(2).exists() && version(2).isFile());
Assert.assertNotEquals("Unmodified copy should be out of date after update",
table.schema().asStruct(), tableCopy.schema().asStruct());
// update the table
tableCopy.refresh();
Assert.assertEquals("Copy should be back in sync",
table.schema().asStruct(), tableCopy.schema().asStruct());
AssertHelpers.assertThrows("Should fail with stale base metadata",
CommitFailedException.class, "based on stale table metadata", updateCopy::commit);
List<File> manifests = listManifestFiles();
Assert.assertEquals("Should contain 0 Avro manifest files", 0, manifests.size());
}
@Test
public void testStaleVersionHint() throws Exception {
Table stale = TABLES.load(tableLocation);
Assert.assertTrue("Should create v1 metadata",
version(1).exists() && version(1).isFile());
Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
table.updateSchema()
.addColumn("n", Types.IntegerType.get())
.commit();
Assert.assertTrue("Should create v2 for the update",
version(2).exists() && version(2).isFile());
Assert.assertEquals("Should write the current version to the hint file",
2, readVersionHint());
Assert.assertNotEquals("Stable table schema should not match",
UPDATED_SCHEMA.asStruct(), stale.schema().asStruct());
// roll the version hint back to 1
replaceVersionHint(1);
Table reloaded = TABLES.load(tableLocation);
Assert.assertEquals("Updated schema for newly loaded table should match",
UPDATED_SCHEMA.asStruct(), reloaded.schema().asStruct());
stale.refresh();
Assert.assertEquals("Refreshed schema for stale table should match",
UPDATED_SCHEMA.asStruct(), reloaded.schema().asStruct());
}
@Test
public void testFastAppend() throws Exception {
// first append
table.newFastAppend()
.appendFile(FILE_A)
.commit();
Assert.assertTrue("Should create v2 for the update",
version(2).exists() && version(2).isFile());
Assert.assertEquals("Should write the current version to the hint file",
2, readVersionHint());
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
Assert.assertEquals("Should scan 1 file", 1, tasks.size());
List<File> manifests = listManifestFiles();
Assert.assertEquals("Should contain only one Avro manifest file", 1, manifests.size());
// second append
table.newFastAppend()
.appendFile(FILE_B)
.commit();
Assert.assertTrue("Should create v3 for the update",
version(3).exists() && version(3).isFile());
Assert.assertEquals("Should write the current version to the hint file",
3, readVersionHint());
tasks = Lists.newArrayList(table.newScan().planFiles());
Assert.assertEquals("Should scan 2 files", 2, tasks.size());
Assert.assertEquals("Should contain 2 Avro manifest files",
2, listManifestFiles().size());
TableMetadata metadata = readMetadataVersion(3);
Assert.assertEquals("Current snapshot should contain 2 manifests",
2, metadata.currentSnapshot().manifests().size());
}
@Test
public void testMergeAppend() throws Exception {
testFastAppend(); // create 2 compatible manifest files that will be merged
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
// third append
table.newAppend()
.appendFile(FILE_C)
.commit();
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
Assert.assertEquals("Should scan 3 files", 3, tasks.size());
Assert.assertEquals("Should contain 3 Avro manifest files",
3, listManifestFiles().size());
TableMetadata metadata = readMetadataVersion(5);
Assert.assertEquals("Current snapshot should contain 1 merged manifest",
1, metadata.currentSnapshot().manifests().size());
}
}
| 1,929 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ManifestEntry.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.netflix.iceberg.avro.AvroSchemaUtil;
import com.netflix.iceberg.types.Types.IntegerType;
import com.netflix.iceberg.types.Types.LongType;
import com.netflix.iceberg.types.Types.StructType;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData;
import java.util.Collection;
import static com.netflix.iceberg.types.Types.NestedField.required;
class ManifestEntry implements IndexedRecord, SpecificData.SchemaConstructable{
enum Status {
EXISTING(0),
ADDED(1),
DELETED(2);
public static Status[] values = new Status[3];
static {
for (Status status : Status.values()) {
values[status.id] = status;
}
}
private final int id;
Status(int id) {
this.id = id;
}
public int id() {
return id;
}
public static Status fromId(int id) {
return values[id];
}
}
private final org.apache.avro.Schema schema;
private Status status = Status.EXISTING;
private long snapshotId = 0L;
private DataFile file = null;
public ManifestEntry(org.apache.avro.Schema schema) {
this.schema = schema;
}
ManifestEntry(StructType partitionType) {
this.schema = AvroSchemaUtil.convert(getSchema(partitionType), "manifest_entry");
}
private ManifestEntry(ManifestEntry toCopy) {
this.schema = toCopy.schema;
this.status = toCopy.status;
this.snapshotId = toCopy.snapshotId;
this.file = toCopy.file().copy();
}
ManifestEntry wrapExisting(long snapshotId, DataFile file) {
this.status = Status.EXISTING;
this.snapshotId = snapshotId;
this.file = file;
return this;
}
ManifestEntry wrapAppend(long snapshotId, DataFile file) {
this.status = Status.ADDED;
this.snapshotId = snapshotId;
this.file = file;
return this;
}
ManifestEntry wrapDelete(long snapshotId, DataFile file) {
this.status = Status.DELETED;
this.snapshotId = snapshotId;
this.file = file;
return this;
}
/**
* @return the status of the file, whether EXISTING, ADDED, or DELETED
*/
public Status status() {
return status;
}
/**
* @return id of the snapshot in which the file was added to the table
*/
public long snapshotId() {
return snapshotId;
}
/**
* @return a file
*/
public DataFile file() {
return file;
}
public ManifestEntry copy() {
return new ManifestEntry(this);
}
@Override
public void put(int i, Object v) {
switch (i) {
case 0:
this.status = Status.fromId((Integer) v);
return;
case 1:
this.snapshotId = (Long) v;
return;
case 2:
this.file = (DataFile) v;
return;
default:
// ignore the object, it must be from a newer version of the format
}
}
@Override
public Object get(int i) {
switch (i) {
case 0:
return status.id();
case 1:
return snapshotId;
case 2:
return file;
default:
throw new UnsupportedOperationException("Unknown field ordinal: " + i);
}
}
@Override
public org.apache.avro.Schema getSchema() {
return schema;
}
static Schema projectSchema(StructType partitionType, Collection<String> columns) {
return wrapFileSchema(
new Schema(DataFile.getType(partitionType).fields()).select(columns).asStruct());
}
static Schema getSchema(StructType partitionType) {
return wrapFileSchema(DataFile.getType(partitionType));
}
private static Schema wrapFileSchema(StructType fileStruct) {
// ids for top-level columns are assigned from 1000
return new Schema(
required(0, "status", IntegerType.get()),
required(1, "snapshot_id", LongType.get()),
required(2, "data_file", fileStruct));
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("status", status)
.add("snapshot_id", snapshotId)
.add("file", file)
.toString();
}
}
| 1,930 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/FilteredManifest.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.netflix.iceberg.ManifestEntry.Status;
import com.netflix.iceberg.expressions.Evaluator;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Expressions;
import com.netflix.iceberg.expressions.InclusiveMetricsEvaluator;
import com.netflix.iceberg.expressions.Projections;
import java.util.Collection;
import java.util.Iterator;
public class FilteredManifest implements Filterable<FilteredManifest> {
private final ManifestReader reader;
private final Expression partFilter;
private final Expression rowFilter;
private final Collection<String> columns;
// lazy state
private Evaluator lazyEvaluator = null;
private InclusiveMetricsEvaluator lazyMetricsEvaluator = null;
FilteredManifest(ManifestReader reader, Expression partFilter, Expression rowFilter,
Collection<String> columns) {
Preconditions.checkNotNull(reader, "ManifestReader cannot be null");
this.reader = reader;
this.partFilter = partFilter;
this.rowFilter = rowFilter;
this.columns = columns;
}
@Override
public FilteredManifest select(Collection<String> columns) {
return new FilteredManifest(reader, partFilter, rowFilter, columns);
}
@Override
public FilteredManifest filterPartitions(Expression expr) {
return new FilteredManifest(reader,
Expressions.and(partFilter, expr),
rowFilter,
columns);
}
@Override
public FilteredManifest filterRows(Expression expr) {
Expression projected = Projections.inclusive(reader.spec()).project(expr);
return new FilteredManifest(reader,
Expressions.and(partFilter, projected),
Expressions.and(rowFilter, expr),
columns);
}
Iterable<ManifestEntry> allEntries() {
if (rowFilter != null && rowFilter != Expressions.alwaysTrue() &&
partFilter != null && partFilter != Expressions.alwaysTrue()) {
Evaluator evaluator = evaluator();
InclusiveMetricsEvaluator metricsEvaluator = metricsEvaluator();
return Iterables.filter(reader.entries(columns),
entry -> (entry != null &&
evaluator.eval(entry.file().partition()) &&
metricsEvaluator.eval(entry.file())));
} else {
return reader.entries(columns);
}
}
Iterable<ManifestEntry> liveEntries() {
if (rowFilter != null && rowFilter != Expressions.alwaysTrue() &&
partFilter != null && partFilter != Expressions.alwaysTrue()) {
Evaluator evaluator = evaluator();
InclusiveMetricsEvaluator metricsEvaluator = metricsEvaluator();
return Iterables.filter(reader.entries(columns),
entry -> (entry != null &&
entry.status() != Status.DELETED &&
evaluator.eval(entry.file().partition()) &&
metricsEvaluator.eval(entry.file())));
} else {
return Iterables.filter(reader.entries(columns),
entry -> entry != null && entry.status() != Status.DELETED);
}
}
@Override
public Iterator<DataFile> iterator() {
if (rowFilter != null && rowFilter != Expressions.alwaysTrue() &&
partFilter != null && partFilter != Expressions.alwaysTrue()) {
Evaluator evaluator = evaluator();
InclusiveMetricsEvaluator metricsEvaluator = metricsEvaluator();
return Iterators.transform(
Iterators.filter(reader.iterator(partFilter, columns),
input -> (input != null &&
evaluator.eval(input.partition()) &&
metricsEvaluator.eval(input))),
DataFile::copy);
} else {
return Iterators.transform(reader.iterator(partFilter, columns), DataFile::copy);
}
}
private Evaluator evaluator() {
if (lazyEvaluator == null) {
this.lazyEvaluator = new Evaluator(reader.spec().partitionType(), partFilter);
}
return lazyEvaluator;
}
private InclusiveMetricsEvaluator metricsEvaluator() {
if (lazyMetricsEvaluator == null) {
this.lazyMetricsEvaluator = new InclusiveMetricsEvaluator(reader.spec().schema(), rowFilter);
}
return lazyMetricsEvaluator;
}
}
| 1,931 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseMetastoreTables.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.exceptions.AlreadyExistsException;
import com.netflix.iceberg.exceptions.NoSuchTableException;
import org.apache.hadoop.conf.Configuration;
import java.util.Map;
import static com.netflix.iceberg.TableMetadata.newTableMetadata;
public abstract class BaseMetastoreTables implements Tables {
private final Configuration conf;
public BaseMetastoreTables(Configuration conf) {
this.conf = conf;
}
protected abstract BaseMetastoreTableOperations newTableOps(Configuration conf,
String database, String table);
public Table load(String database, String table) {
TableOperations ops = newTableOps(conf, database, table);
if (ops.current() == null) {
throw new NoSuchTableException("Table does not exist: " + database + "." + table);
}
return new BaseTable(ops, database + "." + table);
}
public Table create(Schema schema, String database, String table) {
return create(schema, PartitionSpec.unpartitioned(), database, table);
}
public Table create(Schema schema, PartitionSpec spec, String database, String table) {
return create(schema, spec, ImmutableMap.of(), database, table);
}
public Table create(Schema schema, PartitionSpec spec, Map<String, String> properties,
String database, String table) {
TableOperations ops = newTableOps(conf, database, table);
if (ops.current() != null) {
throw new AlreadyExistsException("Table already exists: " + database + "." + table);
}
String location = defaultWarehouseLocation(conf, database, table);
TableMetadata metadata = newTableMetadata(ops, schema, spec, location, properties);
ops.commit(null, metadata);
return new BaseTable(ops, database + "." + table);
}
public Transaction beginCreate(Schema schema, PartitionSpec spec, String database, String table) {
return beginCreate(schema, spec, ImmutableMap.of(), database, table);
}
public Transaction beginCreate(Schema schema, PartitionSpec spec, Map<String, String> properties,
String database, String table) {
TableOperations ops = newTableOps(conf, database, table);
if (ops.current() != null) {
throw new AlreadyExistsException("Table already exists: " + database + "." + table);
}
String location = defaultWarehouseLocation(conf, database, table);
TableMetadata metadata = newTableMetadata(ops, schema, spec, location, properties);
return BaseTransaction.createTableTransaction(ops, metadata);
}
public Transaction beginReplace(Schema schema, PartitionSpec spec,
String database, String table) {
return beginReplace(schema, spec, ImmutableMap.of(), database, table);
}
public Transaction beginReplace(Schema schema, PartitionSpec spec, Map<String, String> properties,
String database, String table) {
TableOperations ops = newTableOps(conf, database, table);
TableMetadata current = ops.current();
TableMetadata metadata;
if (current != null) {
metadata = current.buildReplacement(schema, spec, properties);
return BaseTransaction.replaceTableTransaction(ops, metadata);
} else {
String location = defaultWarehouseLocation(conf, database, table);
metadata = newTableMetadata(ops, schema, spec, location, properties);
return BaseTransaction.createTableTransaction(ops, metadata);
}
}
protected String defaultWarehouseLocation(Configuration conf,
String database, String table) {
String warehouseLocation = conf.get("hive.metastore.warehouse.dir");
Preconditions.checkNotNull(warehouseLocation,
"Warehouse location is not set: hive.metastore.warehouse.dir=null");
return String.format("%s/%s.db/%s", warehouseLocation, database, table);
}
}
| 1,932 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/SchemaUpdate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Multimap;
import com.google.common.collect.Multimaps;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* Schema evolution API implementation.
*/
class SchemaUpdate implements UpdateSchema {
private static final int TABLE_ROOT_ID = -1;
private final TableOperations ops;
private final TableMetadata base;
private final Schema schema;
private final List<Integer> deletes = Lists.newArrayList();
private final Map<Integer, Types.NestedField> updates = Maps.newHashMap();
private final Multimap<Integer, Types.NestedField> adds =
Multimaps.newListMultimap(Maps.newHashMap(), Lists::newArrayList);
private int lastColumnId;
SchemaUpdate(TableOperations ops) {
this.ops = ops;
this.base = ops.current();
this.schema = base.schema();
this.lastColumnId = base.lastColumnId();
}
/**
* For testing only.
*/
SchemaUpdate(Schema schema, int lastColumnId) {
this.ops = null;
this.base = null;
this.schema = schema;
this.lastColumnId = lastColumnId;
}
@Override
public UpdateSchema addColumn(String name, Type type) {
Preconditions.checkArgument(!name.contains("."),
"Cannot add column with ambiguous name: %s, use addColumn(parent, name, type)", name);
return addColumn(null, name, type);
}
@Override
public UpdateSchema addColumn(String parent, String name, Type type) {
int parentId = TABLE_ROOT_ID;
if (parent != null) {
Types.NestedField parentField = schema.findField(parent);
Preconditions.checkArgument(parentField != null, "Cannot find parent struct: %s", parent);
Type parentType = parentField.type();
if (parentType.isNestedType()) {
Type.NestedType nested = parentType.asNestedType();
if (nested.isMapType()) {
// fields are added to the map value type
parentField = nested.asMapType().fields().get(1);
} else if (nested.isListType()) {
// fields are added to the element type
parentField = nested.asListType().fields().get(0);
}
}
Preconditions.checkArgument(
parentField.type().isNestedType() && parentField.type().asNestedType().isStructType(),
"Cannot add to non-struct column: %s: %s", parent, parentField.type());
parentId = parentField.fieldId();
Preconditions.checkArgument(!deletes.contains(parentId),
"Cannot add to a column that will be deleted: %s", parent);
Preconditions.checkArgument(schema.findField(parent + "." + name) == null,
"Cannot add column, name already exists: " + parent + "." + name);
} else {
Preconditions.checkArgument(schema.findField(name) == null,
"Cannot add column, name already exists: " + name);
}
// assign new IDs in order
int newId = assignNewColumnId();
adds.put(parentId, Types.NestedField.optional(newId, name,
TypeUtil.assignFreshIds(type, this::assignNewColumnId)));
return this;
}
@Override
public UpdateSchema deleteColumn(String name) {
Types.NestedField field = schema.findField(name);
Preconditions.checkArgument(field != null, "Cannot delete missing column: %s", name);
Preconditions.checkArgument(!adds.containsKey(field.fieldId()),
"Cannot delete a column that has additions: %s", name);
Preconditions.checkArgument(!updates.containsKey(field.fieldId()),
"Cannot delete a column that has updates: %s", name);
deletes.add(field.fieldId());
return this;
}
@Override
public UpdateSchema renameColumn(String name, String newName) {
Types.NestedField field = schema.findField(name);
Preconditions.checkArgument(field != null, "Cannot rename missing column: %s", name);
Preconditions.checkArgument(!deletes.contains(field.fieldId()),
"Cannot rename a column that will be deleted: %s", field.name());
// merge with an update, if present
int fieldId = field.fieldId();
Types.NestedField update = updates.get(fieldId);
if (update != null) {
updates.put(fieldId, Types.NestedField.required(fieldId, newName, update.type()));
} else {
updates.put(fieldId, Types.NestedField.required(fieldId, newName, field.type()));
}
return this;
}
@Override
public UpdateSchema updateColumn(String name, Type.PrimitiveType newType) {
Types.NestedField field = schema.findField(name);
Preconditions.checkArgument(field != null, "Cannot update missing column: %s", name);
Preconditions.checkArgument(!deletes.contains(field.fieldId()),
"Cannot update a column that will be deleted: %s", field.name());
Preconditions.checkArgument(TypeUtil.isPromotionAllowed(field.type(), newType),
"Cannot change column type: %s: %s -> %s", name, field.type(), newType);
// merge with a rename, if present
int fieldId = field.fieldId();
Types.NestedField rename = updates.get(fieldId);
if (rename != null) {
updates.put(fieldId, Types.NestedField.required(fieldId, rename.name(), newType));
} else {
updates.put(fieldId, Types.NestedField.required(fieldId, field.name(), newType));
}
return this;
}
/**
* Apply the pending changes to the original schema and returns the result.
* <p>
* This does not result in a permanent update.
*
* @return the result Schema when all pending updates are applied
*/
@Override
public Schema apply() {
return applyChanges(schema, deletes, updates, adds);
}
@Override
public void commit() {
TableMetadata update = base.updateSchema(apply(), lastColumnId);
ops.commit(base, update);
}
private int assignNewColumnId() {
int next = lastColumnId + 1;
this.lastColumnId = next;
return next;
}
private static Schema applyChanges(Schema schema, List<Integer> deletes,
Map<Integer, Types.NestedField> updates,
Multimap<Integer, Types.NestedField> adds) {
Types.StructType struct = TypeUtil
.visit(schema, new ApplyChanges(deletes, updates, adds))
.asNestedType().asStructType();
return new Schema(struct.fields());
}
private static class ApplyChanges extends TypeUtil.SchemaVisitor<Type> {
private final List<Integer> deletes;
private final Map<Integer, Types.NestedField> updates;
private final Multimap<Integer, Types.NestedField> adds;
private ApplyChanges(List<Integer> deletes,
Map<Integer, Types.NestedField> updates,
Multimap<Integer, Types.NestedField> adds) {
this.deletes = deletes;
this.updates = updates;
this.adds = adds;
}
@Override
public Type schema(Schema schema, Type structResult) {
Collection<Types.NestedField> newColumns = adds.get(TABLE_ROOT_ID);
if (newColumns != null) {
return addFields(structResult.asNestedType().asStructType(), newColumns);
}
return structResult;
}
@Override
public Type struct(Types.StructType struct, List<Type> fieldResults) {
boolean hasChange = false;
List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(fieldResults.size());
for (int i = 0; i < fieldResults.size(); i += 1) {
Type resultType = fieldResults.get(i);
if (resultType == null) {
hasChange = true;
continue;
}
Types.NestedField field = struct.fields().get(i);
String name = field.name();
Types.NestedField update = updates.get(field.fieldId());
if (update != null && update.name() != null) {
name = update.name();
}
if (!name.equals(field.name()) || field.type() != resultType) {
hasChange = true;
if (field.isOptional()) {
newFields.add(Types.NestedField.optional(field.fieldId(), name, resultType));
} else {
newFields.add(Types.NestedField.required(field.fieldId(), name, resultType));
}
} else {
newFields.add(field);
}
}
if (hasChange) {
// TODO: What happens if there are no fields left?
return Types.StructType.of(newFields);
}
return struct;
}
@Override
public Type field(Types.NestedField field, Type fieldResult) {
// the API validates deletes, updates, and additions don't conflict
int fieldId = field.fieldId();
if (deletes.contains(fieldId)) {
return null;
}
Types.NestedField update = updates.get(field.fieldId());
if (update != null && update.type() != field.type()) {
// rename is handled in struct
return update.type();
}
Collection<Types.NestedField> newFields = adds.get(fieldId);
if (newFields != null && !newFields.isEmpty()) {
return addFields(fieldResult.asNestedType().asStructType(), newFields);
}
return fieldResult;
}
@Override
public Type list(Types.ListType list, Type result) {
// use field to apply updates
Type elementResult = field(list.fields().get(0), result);
if (elementResult == null) {
throw new IllegalArgumentException("Cannot delete element type from list: " + list);
}
if (list.elementType() == elementResult) {
return list;
}
if (list.isElementOptional()) {
return Types.ListType.ofOptional(list.elementId(), elementResult);
} else {
return Types.ListType.ofRequired(list.elementId(), elementResult);
}
}
@Override
public Type map(Types.MapType map, Type kResult, Type vResult) {
// if any updates are intended for the key, throw an exception
int keyId = map.fields().get(0).fieldId();
if (deletes.contains(keyId)) {
throw new IllegalArgumentException("Cannot delete map keys: " + map);
} else if (updates.containsKey(keyId)) {
throw new IllegalArgumentException("Cannot update map keys: " + map);
} else if (adds.containsKey(keyId)) {
throw new IllegalArgumentException("Cannot add fields to map keys: " + map);
} else if (!map.keyType().equals(kResult)) {
throw new IllegalArgumentException("Cannot alter map keys: " + map);
}
// use field to apply updates to the value
Type valueResult = field(map.fields().get(1), vResult);
if (valueResult == null) {
throw new IllegalArgumentException("Cannot delete value type from map: " + map);
}
if (map.valueType() == valueResult) {
return map;
}
if (map.isValueOptional()) {
return Types.MapType.ofOptional(map.keyId(), map.valueId(), map.keyType(), valueResult);
} else {
return Types.MapType.ofRequired(map.keyId(), map.valueId(), map.keyType(), valueResult);
}
}
@Override
public Type primitive(Type.PrimitiveType primitive) {
return primitive;
}
}
private static Types.StructType addFields(Types.StructType struct,
Collection<Types.NestedField> adds) {
List<Types.NestedField> newFields = Lists.newArrayList(struct.fields());
newFields.addAll(adds);
return Types.StructType.of(newFields);
}
}
| 1,933 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/SchemaParser.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.JsonUtil;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.ExecutionException;
public class SchemaParser {
private static final String TYPE = "type";
private static final String STRUCT = "struct";
private static final String LIST = "list";
private static final String MAP = "map";
private static final String FIELDS = "fields";
private static final String ELEMENT = "element";
private static final String KEY = "key";
private static final String VALUE = "value";
private static final String NAME = "name";
private static final String ID = "id";
private static final String ELEMENT_ID = "element-id";
private static final String KEY_ID = "key-id";
private static final String VALUE_ID = "value-id";
private static final String REQUIRED = "required";
private static final String ELEMENT_REQUIRED = "element-required";
private static final String VALUE_REQUIRED = "value-required";
static void toJson(Types.StructType struct, JsonGenerator generator) throws IOException {
generator.writeStartObject();
generator.writeStringField(TYPE, STRUCT);
generator.writeArrayFieldStart(FIELDS);
for (Types.NestedField field : struct.fields()) {
generator.writeStartObject();
generator.writeNumberField(ID, field.fieldId());
generator.writeStringField(NAME, field.name());
generator.writeBooleanField(REQUIRED, field.isRequired());
generator.writeFieldName(TYPE);
toJson(field.type(), generator);
generator.writeEndObject();
}
generator.writeEndArray();
generator.writeEndObject();
}
static void toJson(Types.ListType list, JsonGenerator generator) throws IOException {
generator.writeStartObject();
generator.writeStringField(TYPE, LIST);
generator.writeNumberField(ELEMENT_ID, list.elementId());
generator.writeFieldName(ELEMENT);
toJson(list.elementType(), generator);
generator.writeBooleanField(ELEMENT_REQUIRED, !list.isElementOptional());
generator.writeEndObject();
}
static void toJson(Types.MapType map, JsonGenerator generator) throws IOException {
generator.writeStartObject();
generator.writeStringField(TYPE, MAP);
generator.writeNumberField(KEY_ID, map.keyId());
generator.writeFieldName(KEY);
toJson(map.keyType(), generator);
generator.writeNumberField(VALUE_ID, map.valueId());
generator.writeFieldName(VALUE);
toJson(map.valueType(), generator);
generator.writeBooleanField(VALUE_REQUIRED, !map.isValueOptional());
generator.writeEndObject();
}
static void toJson(Type.PrimitiveType primitive, JsonGenerator generator) throws IOException {
generator.writeString(primitive.toString());
}
static void toJson(Type type, JsonGenerator generator) throws IOException {
if (type.isPrimitiveType()) {
toJson(type.asPrimitiveType(), generator);
} else {
Type.NestedType nested = type.asNestedType();
switch (type.typeId()) {
case STRUCT:
toJson(nested.asStructType(), generator);
break;
case LIST:
toJson(nested.asListType(), generator);
break;
case MAP:
toJson(nested.asMapType(), generator);
break;
default:
throw new IllegalArgumentException("Cannot write unknown type: " + type);
}
}
}
public static void toJson(Schema schema, JsonGenerator generator) throws IOException {
toJson(schema.asStruct(), generator);
}
public static String toJson(Schema schema) {
return toJson(schema, false);
}
public static String toJson(Schema schema, boolean pretty) {
try {
StringWriter writer = new StringWriter();
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
if (pretty) {
generator.useDefaultPrettyPrinter();
}
toJson(schema.asStruct(), generator);
generator.flush();
return writer.toString();
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
private static Type typeFromJson(JsonNode json) {
if (json.isTextual()) {
return Types.fromPrimitiveString(json.asText());
} else if (json.isObject()) {
String type = json.get(TYPE).asText();
if (STRUCT.equals(type)) {
return structFromJson(json);
} else if (LIST.equals(type)) {
return listFromJson(json);
} else if (MAP.equals(type)) {
return mapFromJson(json);
}
}
throw new IllegalArgumentException("Cannot parse type from json: " + json);
}
private static Types.StructType structFromJson(JsonNode json) {
JsonNode fieldArray = json.get(FIELDS);
Preconditions.checkArgument(fieldArray.isArray(),
"Cannot parse struct fields from non-array: %s", fieldArray);
List<Types.NestedField> fields = Lists.newArrayListWithExpectedSize(fieldArray.size());
Iterator<JsonNode> iterator = fieldArray.elements();
while (iterator.hasNext()) {
JsonNode field = iterator.next();
Preconditions.checkArgument(field.isObject(),
"Cannot parse struct field from non-object: %s", field);
int id = JsonUtil.getInt(ID, field);
String name = JsonUtil.getString(NAME, field);
Type type = typeFromJson(field.get(TYPE));
boolean isRequired = JsonUtil.getBool(REQUIRED, field);
if (isRequired) {
fields.add(Types.NestedField.required(id, name, type));
} else {
fields.add(Types.NestedField.optional(id, name, type));
}
}
return Types.StructType.of(fields);
}
private static Types.ListType listFromJson(JsonNode json) {
int elementId = JsonUtil.getInt(ELEMENT_ID, json);
Type elementType = typeFromJson(json.get(ELEMENT));
boolean isRequired = JsonUtil.getBool(ELEMENT_REQUIRED, json);
if (isRequired) {
return Types.ListType.ofRequired(elementId, elementType);
} else {
return Types.ListType.ofOptional(elementId, elementType);
}
}
private static Types.MapType mapFromJson(JsonNode json) {
int keyId = JsonUtil.getInt(KEY_ID, json);
Type keyType = typeFromJson(json.get(KEY));
int valueId = JsonUtil.getInt(VALUE_ID, json);
Type valueType = typeFromJson(json.get(VALUE));
boolean isRequired = JsonUtil.getBool(VALUE_REQUIRED, json);
if (isRequired) {
return Types.MapType.ofRequired(keyId, valueId, keyType, valueType);
} else {
return Types.MapType.ofOptional(keyId, valueId, keyType, valueType);
}
}
public static Schema fromJson(JsonNode json) {
Type type = typeFromJson(json);
Preconditions.checkArgument(type.isNestedType() && type.asNestedType().isStructType(),
"Cannot create schema, not a struct type: %s", type);
return new Schema(type.asNestedType().asStructType().fields());
}
private static Cache<String, Schema> SCHEMA_CACHE = CacheBuilder.newBuilder()
.weakValues()
.build();
public static Schema fromJson(String json) {
try {
return SCHEMA_CACHE.get(json,
() -> fromJson(JsonUtil.mapper().readValue(json, JsonNode.class)));
} catch (ExecutionException e) {
if (e.getCause() instanceof IOException) {
throw new RuntimeIOException(
(IOException) e.getCause(), "Failed to parse schema: %s", json);
} else {
throw new RuntimeException("Failed to parse schema: " + json, e.getCause());
}
}
}
}
| 1,934 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ScanSummary.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.expressions.And;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Expression.Operation;
import com.netflix.iceberg.expressions.Expressions;
import com.netflix.iceberg.expressions.Literal;
import com.netflix.iceberg.expressions.NamedReference;
import com.netflix.iceberg.expressions.UnboundPredicate;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.types.Comparators;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.Pair;
import java.io.IOException;
import java.util.Comparator;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.Function;
public class ScanSummary {
private ScanSummary() {
}
private static final List<String> SCAN_SUMMARY_COLUMNS = ImmutableList.of(
"partition", "record_count", "file_size_in_bytes");
/**
* Create a scan summary builder for a table scan.
*
* @param scan a TableScan
* @return a scan summary builder
*/
public static ScanSummary.Builder of(TableScan scan) {
return new Builder(scan);
}
public static class Builder {
private static final Set<String> TIMESTAMP_NAMES = Sets.newHashSet(
"dateCreated", "lastUpdated");
private final TableScan scan;
private final Table table;
private final TableOperations ops;
private final Map<Long, Long> snapshotTimestamps;
private int limit = Integer.MAX_VALUE;
private boolean throwIfLimited = false;
private List<UnboundPredicate<Long>> timeFilters = Lists.newArrayList();
public Builder(TableScan scan) {
this.scan = scan;
this.table = scan.table();
this.ops = ((HasTableOperations) table).operations();
ImmutableMap.Builder<Long, Long> builder = ImmutableMap.builder();
for (Snapshot snap : table.snapshots()) {
builder.put(snap.snapshotId(), snap.timestampMillis());
}
this.snapshotTimestamps = builder.build();
}
private void addTimestampFilter(UnboundPredicate<Long> filter) {
throwIfLimited(); // ensure all partitions can be returned
timeFilters.add(filter);
}
public Builder after(String timestamp) {
Literal<Long> tsLiteral = Literal.of(timestamp).to(Types.TimestampType.withoutZone());
return after(tsLiteral.value() / 1000);
}
public Builder after(long timestampMillis) {
addTimestampFilter(Expressions.greaterThanOrEqual("timestamp_ms", timestampMillis));
return this;
}
public Builder before(String timestamp) {
Literal<Long> tsLiteral = Literal.of(timestamp).to(Types.TimestampType.withoutZone());
return before(tsLiteral.value() / 1000);
}
public Builder before(long timestampMillis) {
addTimestampFilter(Expressions.lessThanOrEqual("timestamp_ms", timestampMillis));
return this;
}
public Builder throwIfLimited() {
this.throwIfLimited = true;
return this;
}
public Builder limit(int numPartitions) {
this.limit = numPartitions;
return this;
}
private void removeTimeFilters(List<Expression> expressions, Expression expression) {
if (expression.op() == Operation.AND) {
And and = (And) expression;
removeTimeFilters(expressions, and.left());
removeTimeFilters(expressions, and.right());
return;
} else if (expression instanceof UnboundPredicate) {
UnboundPredicate pred = (UnboundPredicate) expression;
NamedReference ref = (NamedReference) pred.ref();
Literal<?> lit = pred.literal();
if (TIMESTAMP_NAMES.contains(ref.name())) {
Literal<Long> tsLiteral = lit.to(Types.TimestampType.withoutZone());
long millis = toMillis(tsLiteral.value());
addTimestampFilter(Expressions.predicate(pred.op(), "timestamp_ms", millis));
return;
}
}
expressions.add(expression);
}
/**
* Summarizes a table scan as a map of partition key to metrics for that partition.
*
* @return a map from partition key to metrics for that partition.
*/
public Map<String, PartitionMetrics> build() {
if (table.currentSnapshot() == null) {
return ImmutableMap.of(); // no snapshots, so there are no partitions
}
TopN<String, PartitionMetrics> topN = new TopN<>(
limit, throwIfLimited, Comparators.charSequences());
List<Expression> filters = Lists.newArrayList();
removeTimeFilters(filters, Expressions.rewriteNot(scan.filter()));
Expression rowFilter = joinFilters(filters);
Iterable<ManifestFile> manifests = table.currentSnapshot().manifests();
boolean filterByTimestamp = !timeFilters.isEmpty();
Set<Long> snapshotsInTimeRange = Sets.newHashSet();
if (filterByTimestamp) {
Pair<Long, Long> range = timestampRange(timeFilters);
long minTimestamp = range.first();
long maxTimestamp = range.second();
Snapshot oldestSnapshot = table.currentSnapshot();
for (Map.Entry<Long, Long> entry : snapshotTimestamps.entrySet()) {
long snapshotId = entry.getKey();
long timestamp = entry.getValue();
if (timestamp < oldestSnapshot.timestampMillis()) {
oldestSnapshot = ops.current().snapshot(snapshotId);
}
if (timestamp >= minTimestamp && timestamp <= maxTimestamp) {
snapshotsInTimeRange.add(snapshotId);
}
}
// if oldest known snapshot is in the range, then there may be an expired snapshot that has
// been removed that matched the range. because the timestamp of that snapshot is unknown,
// it can't be included in the results and the results are not reliable.
if (snapshotsInTimeRange.contains(oldestSnapshot.snapshotId()) &&
minTimestamp < oldestSnapshot.timestampMillis()) {
throw new IllegalArgumentException(
"Cannot satisfy time filters: time range may include expired snapshots");
}
// filter down to the the set of manifest files that were added after the start of the
// time range. manifests after the end of the time range must be included because
// compaction may create a manifest after the time range that includes files added in the
// range.
manifests = Iterables.filter(manifests, manifest -> {
if (manifest.snapshotId() == null) {
return true; // can't tell when the manifest was written, so it may contain matches
}
Long timestamp = snapshotTimestamps.get(manifest.snapshotId());
// if the timestamp is null, then its snapshot has expired. the check for the oldest
// snapshot ensures that all expired snapshots are not in the time range.
return timestamp != null && timestamp >= minTimestamp;
});
}
try (CloseableIterable<ManifestEntry> entries = new ManifestGroup(ops, manifests)
.filterData(rowFilter)
.ignoreDeleted()
.select(SCAN_SUMMARY_COLUMNS)
.entries()) {
PartitionSpec spec = table.spec();
for (ManifestEntry entry : entries) {
Long timestamp = snapshotTimestamps.get(entry.snapshotId());
// if filtering, skip timestamps that are outside the range
if (filterByTimestamp && !snapshotsInTimeRange.contains(entry.snapshotId())) {
continue;
}
String partition = spec.partitionToPath(entry.file().partition());
topN.update(partition, metrics -> (metrics == null ? new PartitionMetrics() : metrics)
.updateFromFile(entry.file(), timestamp));
}
} catch (IOException e) {
throw new RuntimeIOException(e);
}
return topN.get();
}
}
public static class PartitionMetrics {
private int fileCount = 0;
private long recordCount = 0L;
private long totalSize = 0L;
private Long dataTimestampMillis = null;
public int fileCount() {
return fileCount;
}
public long recordCount() {
return recordCount;
}
public long totalSize() {
return totalSize;
}
public Long dataTimestampMillis() {
return dataTimestampMillis;
}
private PartitionMetrics updateFromFile(DataFile file, Long timestampMillis) {
this.fileCount += 1;
this.recordCount += file.recordCount();
this.totalSize += file.fileSizeInBytes();
if (timestampMillis != null &&
(dataTimestampMillis == null || dataTimestampMillis < timestampMillis)) {
this.dataTimestampMillis = timestampMillis;
}
return this;
}
@Override
public String toString() {
String dataTimestamp = dataTimestampMillis != null ?
new Date(dataTimestampMillis).toString() : null;
return "PartitionMetrics(fileCount=" + fileCount +
", recordCount=" + recordCount +
", totalSize=" + totalSize +
", dataTimestamp=" + dataTimestamp + ")";
}
}
private static class TopN<K, V> {
private final int maxSize;
private final boolean throwIfLimited;
private final TreeMap<K, V> map;
private final Comparator<? super K> keyComparator;
private K cut = null;
TopN(int N, boolean throwIfLimited, Comparator<? super K> keyComparator) {
this.maxSize = N;
this.throwIfLimited = throwIfLimited;
this.map = Maps.newTreeMap(keyComparator);
this.keyComparator = keyComparator;
}
public void update(K key, Function<V, V> updateFunc) {
// if there is a cut and it comes before the given key, do nothing
if (cut != null && keyComparator.compare(cut, key) <= 0) {
return;
}
// call the update function and add the result to the map
map.put(key, updateFunc.apply(map.get(key)));
// enforce the size constraint and update the cut if some keys are excluded
while (map.size() > maxSize) {
if (throwIfLimited) {
throw new IllegalStateException(
String.format("Too many matching keys: more than %d", maxSize));
}
this.cut = map.lastKey();
map.remove(cut);
}
}
public Map<K, V> get() {
return ImmutableMap.copyOf(map);
}
}
static Expression joinFilters(List<Expression> expressions) {
Expression result = Expressions.alwaysTrue();
for (Expression expression : expressions) {
result = Expressions.and(result, expression);
}
return result;
}
static long toMillis(long timestamp) {
if (timestamp < 10000000000L) {
// in seconds
return timestamp * 1000;
} else if (timestamp < 10000000000000L) {
// in millis
return timestamp;
}
// in micros
return timestamp / 1000;
}
static Pair<Long, Long> timestampRange(List<UnboundPredicate<Long>> timeFilters) {
// evaluation is inclusive
long minTimestamp = Long.MIN_VALUE;
long maxTimestamp = Long.MAX_VALUE;
for (UnboundPredicate<Long> pred : timeFilters) {
long value = pred.literal().value();
switch (pred.op()) {
case LT:
if (value - 1 < maxTimestamp) {
maxTimestamp = value - 1;
}
break;
case LT_EQ:
if (value < maxTimestamp) {
maxTimestamp = value;
}
break;
case GT:
if (value + 1 > minTimestamp) {
minTimestamp = value + 1;
}
break;
case GT_EQ:
if (value > minTimestamp) {
minTimestamp = value;
}
break;
case EQ:
if (value < maxTimestamp) {
maxTimestamp = value;
}
if (value > minTimestamp) {
minTimestamp = value;
}
break;
default:
throw new UnsupportedOperationException(
"Cannot filter timestamps using predicate: " + pred);
}
}
if (maxTimestamp < minTimestamp) {
throw new IllegalArgumentException(
"No timestamps can match filters: " + Joiner.on(", ").join(timeFilters));
}
return Pair.of(minTimestamp, maxTimestamp);
}
}
| 1,935 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseMetastoreTableOperations.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.hadoop.HadoopFileIO;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.util.Tasks;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.UUID;
import static com.netflix.iceberg.TableMetadataParser.getFileExtension;
import static com.netflix.iceberg.TableMetadataParser.read;
import static com.netflix.iceberg.hadoop.HadoopInputFile.fromLocation;
public abstract class BaseMetastoreTableOperations implements TableOperations {
private static final Logger LOG = LoggerFactory.getLogger(BaseMetastoreTableOperations.class);
public static final String TABLE_TYPE_PROP = "table_type";
public static final String ICEBERG_TABLE_TYPE_VALUE = "iceberg";
public static final String METADATA_LOCATION_PROP = "metadata_location";
public static final String PREVIOUS_METADATA_LOCATION_PROP = "previous_metadata_location";
private static final String METADATA_FOLDER_NAME = "metadata";
private static final String DATA_FOLDER_NAME = "data";
private static final String HIVE_LOCATION_FOLDER_NAME = "empty";
private final Configuration conf;
private final FileIO fileIo;
private TableMetadata currentMetadata = null;
private String currentMetadataLocation = null;
private boolean shouldRefresh = true;
private String baseLocation = null;
private int version = -1;
protected BaseMetastoreTableOperations(Configuration conf) {
this.conf = conf;
this.fileIo = new HadoopFileIO(conf);
}
@Override
public TableMetadata current() {
if (shouldRefresh) {
return refresh();
}
return currentMetadata;
}
public String currentMetadataLocation() {
return currentMetadataLocation;
}
public int currentVersion() {
return version;
}
protected void requestRefresh() {
this.shouldRefresh = true;
}
public String hiveTableLocation() {
return String.format("%s/%s", baseLocation, HIVE_LOCATION_FOLDER_NAME);
}
protected String writeNewMetadata(TableMetadata metadata, int version) {
if (baseLocation == null) {
baseLocation = metadata.location();
}
String newTableMetadataFilePath = newTableMetadataFilePath(baseLocation, version);
OutputFile newMetadataLocation = fileIo.newOutputFile(newTableMetadataFilePath);
// write the new metadata
TableMetadataParser.write(metadata, newMetadataLocation);
return newTableMetadataFilePath;
}
protected void refreshFromMetadataLocation(String newLocation) {
refreshFromMetadataLocation(newLocation, 20);
}
protected void refreshFromMetadataLocation(String newLocation, int numRetries) {
// use null-safe equality check because new tables have a null metadata location
if (!Objects.equal(currentMetadataLocation, newLocation)) {
LOG.info("Refreshing table metadata from new version: " + newLocation);
Tasks.foreach(newLocation)
.retry(numRetries).exponentialBackoff(100, 5000, 600000, 4.0 /* 100, 400, 1600, ... */ )
.suppressFailureWhenFinished()
.run(location -> {
this.currentMetadata = read(this, fromLocation(location, conf));
this.currentMetadataLocation = location;
this.baseLocation = currentMetadata.location();
this.version = parseVersion(location);
});
}
this.shouldRefresh = false;
}
@Override
public String metadataFileLocation(String fileName) {
return String.format("%s/%s/%s", baseLocation, METADATA_FOLDER_NAME, fileName);
}
@Override
public FileIO io() {
return fileIo;
}
@Override
public long newSnapshotId() {
return System.currentTimeMillis();
}
private String newTableMetadataFilePath(String baseLocation, int newVersion) {
return String.format("%s/%s/%05d-%s%s",
baseLocation,
METADATA_FOLDER_NAME,
newVersion,
UUID.randomUUID(),
getFileExtension(this.conf));
}
private static int parseVersion(String metadataLocation) {
int versionStart = metadataLocation.lastIndexOf('/') + 1; // if '/' isn't found, this will be 0
int versionEnd = metadataLocation.indexOf('-', versionStart);
try {
return Integer.valueOf(metadataLocation.substring(versionStart, versionEnd));
} catch (NumberFormatException e) {
LOG.warn("Unable to parse version from metadata location: " + metadataLocation);
return -1;
}
}
private static FileSystem getFS(Path path, Configuration conf) {
try {
return path.getFileSystem(conf);
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
}
| 1,936 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/StreamingDelete.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.expressions.Expression;
/**
* {@link DeleteFiles Delete} implementation that avoids loading full manifests in memory.
* <p>
* This implementation will attempt to commit 5 times before throwing {@link CommitFailedException}.
*/
class StreamingDelete extends MergingSnapshotUpdate implements DeleteFiles {
StreamingDelete(TableOperations ops) {
super(ops);
}
@Override
public StreamingDelete deleteFile(CharSequence path) {
delete(path);
return this;
}
@Override
public StreamingDelete deleteFromRowFilter(Expression expr) {
deleteByRowFilter(expr);
return this;
}
}
| 1,937 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/RollbackToSnapshot.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.netflix.iceberg.exceptions.ValidationException;
class RollbackToSnapshot implements Rollback {
private final TableOperations ops;
private TableMetadata base = null;
private Long targetSnapshotId = null;
RollbackToSnapshot(TableOperations ops) {
this.ops = ops;
this.base = ops.current(); // do not retry
}
@Override
public Rollback toSnapshotId(long snapshotId) {
Preconditions.checkArgument(base.snapshot(snapshotId) != null,
"Cannot roll back to unknown snapshot id: %s", snapshotId);
this.targetSnapshotId = snapshotId;
return this;
}
@Override
public Rollback toSnapshotAtTime(long timestampMillis) {
long snapshotId = 0;
long snapshotTimestamp = 0;
// find the latest snapshot by timestamp older than timestampMillis
for (Snapshot snapshot : base.snapshots()) {
if (snapshot.timestampMillis() < timestampMillis &&
snapshot.timestampMillis() > snapshotTimestamp) {
snapshotId = snapshot.snapshotId();
snapshotTimestamp = snapshot.timestampMillis();
}
}
Preconditions.checkArgument(base.snapshot(snapshotId) != null,
"Cannot roll back, no valid snapshot older than: %s", timestampMillis);
this.targetSnapshotId = snapshotId;
return this;
}
@Override
public Snapshot apply() {
ValidationException.check(targetSnapshotId != null,
"Cannot roll back to unknown version: call toSnapshotId or toSnapshotAtTime");
return base.snapshot(targetSnapshotId);
}
@Override
public void commit() {
// rollback does not refresh or retry. it only operates on the state of the table when rollback
// was called to create the transaction.
ops.commit(base, base.rollbackTo(apply()));
}
}
| 1,938 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseSnapshot.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.netflix.iceberg.avro.Avro;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.io.InputFile;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
class BaseSnapshot implements Snapshot {
private final TableOperations ops;
private final long snapshotId;
private final Long parentId;
private final long timestampMillis;
private final InputFile manifestList;
// lazily initialized
private List<ManifestFile> manifests = null;
private List<DataFile> adds = null;
private List<DataFile> deletes = null;
/**
* For testing only.
*/
BaseSnapshot(TableOperations ops,
long snapshotId,
String... manifestFiles) {
this(ops, snapshotId, null, System.currentTimeMillis(),
Lists.transform(Arrays.asList(manifestFiles),
path -> new GenericManifestFile(ops.io().newInputFile(path), 0)));
}
BaseSnapshot(TableOperations ops,
long snapshotId,
Long parentId,
long timestampMillis,
InputFile manifestList) {
this.ops = ops;
this.snapshotId = snapshotId;
this.parentId = parentId;
this.timestampMillis = timestampMillis;
this.manifestList = manifestList;
}
BaseSnapshot(TableOperations ops,
long snapshotId,
Long parentId,
long timestampMillis,
List<ManifestFile> manifests) {
this(ops, snapshotId, parentId, timestampMillis, (InputFile) null);
this.manifests = manifests;
}
@Override
public long snapshotId() {
return snapshotId;
}
@Override
public Long parentId() {
return parentId;
}
@Override
public long timestampMillis() {
return timestampMillis;
}
@Override
public List<ManifestFile> manifests() {
if (manifests == null) {
// if manifests isn't set, then the snapshotFile is set and should be read to get the list
try (CloseableIterable<ManifestFile> files = Avro.read(manifestList)
.rename("manifest_file", GenericManifestFile.class.getName())
.rename("partitions", GenericPartitionFieldSummary.class.getName())
.rename("r508", GenericPartitionFieldSummary.class.getName())
.project(ManifestFile.schema())
.reuseContainers(false)
.build()) {
this.manifests = Lists.newLinkedList(files);
} catch (IOException e) {
throw new RuntimeIOException(e, "Cannot read snapshot file: %s", manifestList.location());
}
}
return manifests;
}
@Override
public List<DataFile> addedFiles() {
if (adds == null) {
cacheChanges();
}
return adds;
}
@Override
public List<DataFile> deletedFiles() {
if (deletes == null) {
cacheChanges();
}
return deletes;
}
@Override
public String manifestListLocation() {
return manifestList != null ? manifestList.location() : null;
}
private void cacheChanges() {
List<DataFile> adds = Lists.newArrayList();
List<DataFile> deletes = Lists.newArrayList();
// accumulate adds and deletes from all manifests.
// because manifests can be reused in newer snapshots, filter the changes by snapshot id.
for (String manifest : Iterables.transform(manifests(), ManifestFile::path)) {
try (ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest))) {
for (ManifestEntry add : reader.addedFiles()) {
if (add.snapshotId() == snapshotId) {
adds.add(add.file().copy());
}
}
for (ManifestEntry delete : reader.deletedFiles()) {
if (delete.snapshotId() == snapshotId) {
deletes.add(delete.file().copy());
}
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to close reader while caching changes");
}
}
this.adds = adds;
this.deletes = deletes;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("id", snapshotId)
.add("timestamp_ms", timestampMillis)
.add("manifests", manifests())
.toString();
}
}
| 1,939 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/RemoveSnapshots.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.util.Tasks;
import com.netflix.iceberg.util.ThreadPools;
import io.netty.util.internal.ConcurrentSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Date;
import java.util.List;
import java.util.Set;
import java.util.function.Consumer;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT;
class RemoveSnapshots implements ExpireSnapshots {
private static final Logger LOG = LoggerFactory.getLogger(RemoveSnapshots.class);
private final Consumer<String> defaultDelete = new Consumer<String>() {
@Override
public void accept(String file) {
ops.io().deleteFile(file);
}
};
private final TableOperations ops;
private final Set<Long> idsToRemove = Sets.newHashSet();
private TableMetadata base;
private Long expireOlderThan = null;
private Consumer<String> deleteFunc = defaultDelete;
RemoveSnapshots(TableOperations ops) {
this.ops = ops;
this.base = ops.current();
}
@Override
public ExpireSnapshots expireSnapshotId(long snapshotId) {
LOG.info("Expiring snapshot with id: {}", snapshotId);
idsToRemove.add(snapshotId);
return this;
}
@Override
public ExpireSnapshots expireOlderThan(long timestampMillis) {
LOG.info("Expiring snapshots older than: {} ({})", new Date(timestampMillis), timestampMillis);
this.expireOlderThan = timestampMillis;
return this;
}
@Override
public ExpireSnapshots deleteWith(Consumer<String> deleteFunc) {
this.deleteFunc = deleteFunc;
return this;
}
@Override
public List<Snapshot> apply() {
TableMetadata updated = internalApply();
List<Snapshot> removed = Lists.newArrayList(base.snapshots());
removed.removeAll(updated.snapshots());
return removed;
}
private TableMetadata internalApply() {
this.base = ops.refresh();
return base.removeSnapshotsIf(snapshot -> (
idsToRemove.contains(snapshot.snapshotId()) ||
(expireOlderThan != null && snapshot.timestampMillis() < expireOlderThan)
));
}
@Override
public void commit() {
Tasks.foreach(ops)
.retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */ )
.onlyRetryOn(CommitFailedException.class)
.run(item -> {
TableMetadata updated = internalApply();
// only commit the updated metadata if at least one snapshot was removed
if (updated.snapshots().size() != base.snapshots().size()) {
ops.commit(base, updated);
}
});
LOG.info("Committed snapshot changes; cleaning up expired manifests and data files.");
// clean up the expired snapshots:
// 1. Get a list of the snapshots that were removed
// 2. Delete any data files that were deleted by those snapshots and are not in the table
// 3. Delete any manifests that are no longer used by current snapshots
// Reads and deletes are done using Tasks.foreach(...).suppressFailureWhenFinished to complete
// as much of the delete work as possible and avoid orphaned data or manifest files.
TableMetadata current = ops.refresh();
Set<Long> currentIds = Sets.newHashSet();
Set<ManifestFile> currentManifests = Sets.newHashSet();
for (Snapshot snapshot : current.snapshots()) {
currentIds.add(snapshot.snapshotId());
currentManifests.addAll(snapshot.manifests());
}
Set<ManifestFile> allManifests = Sets.newHashSet(currentManifests);
Set<String> manifestsToDelete = Sets.newHashSet();
for (Snapshot snapshot : base.snapshots()) {
long snapshotId = snapshot.snapshotId();
if (!currentIds.contains(snapshotId)) {
// the snapshot was removed, find any manifests that are no longer needed
LOG.info("Removing snapshot: {}", snapshot);
for (ManifestFile manifest : snapshot.manifests()) {
if (!currentManifests.contains(manifest)) {
manifestsToDelete.add(manifest.path());
allManifests.add(manifest);
}
}
}
}
Set<String> filesToDelete = new ConcurrentSet<>();
Tasks.foreach(allManifests)
.noRetry().suppressFailureWhenFinished()
.executeWith(ThreadPools.getWorkerPool())
.onFailure((item, exc) ->
LOG.warn("Failed to get deleted files: this may cause orphaned data files", exc)
).run(manifest -> {
if (manifest.deletedFilesCount() != null && manifest.deletedFilesCount() == 0) {
return;
}
// the manifest has deletes, scan it to find files to delete
try (ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest.path()))) {
for (ManifestEntry entry : reader.entries()) {
// if the snapshot ID of the DELETE entry is no longer valid, the data can be deleted
if (entry.status() == ManifestEntry.Status.DELETED &&
!currentIds.contains(entry.snapshotId())) {
// use toString to ensure the path will not change (Utf8 is reused)
filesToDelete.add(entry.file().path().toString());
}
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read manifest file: " + manifest.path());
}
});
LOG.warn("Manifests to delete: {}", Joiner.on(", ").join(manifestsToDelete));
Tasks.foreach(filesToDelete)
.noRetry().suppressFailureWhenFinished()
.onFailure((file, exc) -> LOG.warn("Delete failed for data file: " + file, exc))
.run(file -> deleteFunc.accept(file));
Tasks.foreach(manifestsToDelete)
.noRetry().suppressFailureWhenFinished()
.onFailure((manifest, exc) -> LOG.warn("Delete failed for manifest: " + manifest, exc))
.run(deleteFunc::accept);
}
}
| 1,940 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/PartitionData.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.netflix.iceberg.avro.AvroSchemaUtil;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.util.Utf8;
import java.io.Serializable;
import java.util.Arrays;
class PartitionData
implements IndexedRecord, StructLike, SpecificData.SchemaConstructable, Serializable {
static Schema getSchema(Types.StructType partitionType) {
return AvroSchemaUtil.convert(partitionType, PartitionData.class.getName());
}
private final Types.StructType partitionType;
private final int size;
private final Object[] data;
private final String stringSchema;
private transient Schema schema = null;
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
public PartitionData(Schema schema) {
this.partitionType = AvroSchemaUtil.convert(schema).asNestedType().asStructType();
this.size = partitionType.fields().size();
this.data = new Object[size];
this.stringSchema = schema.toString();
this.schema = schema;
}
PartitionData(Types.StructType partitionType) {
for (Types.NestedField field : partitionType.fields()) {
Preconditions.checkArgument(field.type().isPrimitiveType(),
"Partitions cannot contain nested types: " + field.type());
}
this.partitionType = partitionType;
this.size = partitionType.fields().size();
this.data = new Object[size];
this.schema = getSchema(partitionType);
this.stringSchema = schema.toString();
}
/**
* Copy constructor
*/
private PartitionData(PartitionData toCopy) {
this.partitionType = toCopy.partitionType;
this.size = toCopy.size;
this.data = Arrays.copyOf(toCopy.data, toCopy.data.length);
this.stringSchema = toCopy.stringSchema;
this.schema = toCopy.schema;
}
public Types.StructType getPartitionType() {
return partitionType;
}
public Schema getSchema() {
if (schema == null) {
this.schema = new Schema.Parser().parse(stringSchema);
}
return schema;
}
public Type getType(int pos) {
return partitionType.fields().get(pos).type();
}
public void clear() {
Arrays.fill(data, null);
}
@Override
public int size() {
return size;
}
@Override
@SuppressWarnings("unchecked")
public <T> T get(int pos, Class<T> javaClass) {
Object v = get(pos);
if (v == null || javaClass.isInstance(v)) {
return javaClass.cast(v);
}
throw new IllegalArgumentException(String.format(
"Wrong class, %s, for object: %s",
javaClass.getName(), String.valueOf(v)));
}
@Override
public <T> void set(int pos, T value) {
if (value instanceof Utf8) {
// Utf8 is not Serializable
data[pos] = value.toString();
} else {
data[pos] = value;
}
}
@Override
public void put(int i, Object v) {
set(i, v);
}
@Override
public Object get(int i) {
if (i >= data.length) {
return null;
}
return data[i];
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("PartitionData{");
for (int i = 0; i < data.length; i += 1) {
if (i > 0) {
sb.append(", ");
}
sb.append(partitionType.fields().get(i).name())
.append("=")
.append(data[i]);
}
sb.append("}");
return sb.toString();
}
public PartitionData copy() {
return new PartitionData(this);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
PartitionData that = (PartitionData) o;
return partitionType.equals(that.partitionType) && Arrays.equals(data, that.data);
}
@Override
public int hashCode() {
return Objects.hashCode(partitionType, data);
}
}
| 1,941 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/OverwriteData.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.expressions.Evaluator;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Projections;
import com.netflix.iceberg.expressions.StrictMetricsEvaluator;
import java.util.List;
public class OverwriteData extends MergingSnapshotUpdate implements OverwriteFiles {
private boolean validateAddedFiles = false;
protected OverwriteData(TableOperations ops) {
super(ops);
}
@Override
public OverwriteFiles overwriteByRowFilter(Expression expr) {
deleteByRowFilter(expr);
return this;
}
@Override
public OverwriteFiles addFile(DataFile file) {
add(file);
return this;
}
@Override
public OverwriteFiles validateAddedFiles() {
this.validateAddedFiles = true;
return this;
}
@Override
public List<ManifestFile> apply(TableMetadata base) {
if (validateAddedFiles) {
PartitionSpec spec = writeSpec();
Expression rowFilter = rowFilter();
Expression inclusiveExpr = Projections.inclusive(spec).project(rowFilter);
Evaluator inclusive = new Evaluator(spec.partitionType(), inclusiveExpr);
Expression strictExpr = Projections.strict(spec).project(rowFilter);
Evaluator strict = new Evaluator(spec.partitionType(), strictExpr);
StrictMetricsEvaluator metrics = new StrictMetricsEvaluator(
base.schema(), rowFilter);
for (DataFile file : addedFiles()) {
// the real test is that the strict or metrics test matches the file, indicating that all
// records in the file match the filter. inclusive is used to avoid testing the metrics,
// which is more complicated
ValidationException.check(
inclusive.eval(file.partition()) &&
(strict.eval(file.partition()) || metrics.eval(file)),
"Cannot append file with rows that do not match filter: %s: %s",
rowFilter, file.path());
}
}
return super.apply(base);
}
}
| 1,942 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/GenericDataFile.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.avro.AvroSchemaUtil;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
class GenericDataFile
implements DataFile, IndexedRecord, StructLike, SpecificData.SchemaConstructable, Serializable {
private static final Types.StructType EMPTY_STRUCT_TYPE = Types.StructType.of();
private static final PartitionData EMPTY_PARTITION_DATA = new PartitionData(EMPTY_STRUCT_TYPE) {
@Override
public PartitionData copy() {
return this; // this does not change
}
};
private int[] fromProjectionPos;
private Types.StructType partitionType;
private String filePath = null;
private FileFormat format = null;
private PartitionData partitionData = null;
private Long recordCount = null;
private long fileSizeInBytes = -1L;
private long blockSizeInBytes = -1L;
// optional fields
private Integer fileOrdinal = null; // boxed for nullability
private List<Integer> sortColumns = null;
private Map<Integer, Long> columnSizes = null;
private Map<Integer, Long> valueCounts = null;
private Map<Integer, Long> nullValueCounts = null;
private Map<Integer, ByteBuffer> lowerBounds = null;
private Map<Integer, ByteBuffer> upperBounds = null;
// cached schema
private transient org.apache.avro.Schema avroSchema = null;
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
public GenericDataFile(org.apache.avro.Schema avroSchema) {
this.avroSchema = avroSchema;
Types.StructType schema = AvroSchemaUtil.convert(avroSchema).asNestedType().asStructType();
// partition type may be null if the field was not projected
Type partType = schema.fieldType("partition");
if (partType != null) {
this.partitionType = partType.asNestedType().asStructType();
} else {
this.partitionType = EMPTY_STRUCT_TYPE;
}
List<Types.NestedField> fields = schema.fields();
List<Types.NestedField> allFields = DataFile.getType(partitionType).fields();
this.fromProjectionPos = new int[fields.size()];
for (int i = 0; i < fromProjectionPos.length; i += 1) {
boolean found = false;
for (int j = 0; j < allFields.size(); j += 1) {
if (fields.get(i).fieldId() == allFields.get(j).fieldId()) {
found = true;
fromProjectionPos[i] = j;
}
}
if (!found) {
throw new IllegalArgumentException("Cannot find projected field: " + fields.get(i));
}
}
this.partitionData = new PartitionData(partitionType);
}
GenericDataFile(String filePath, FileFormat format, long recordCount,
long fileSizeInBytes, long blockSizeInBytes) {
this.filePath = filePath;
this.format = format;
this.partitionData = EMPTY_PARTITION_DATA;
this.partitionType = EMPTY_PARTITION_DATA.getPartitionType();
this.recordCount = recordCount;
this.fileSizeInBytes = fileSizeInBytes;
this.blockSizeInBytes = blockSizeInBytes;
this.fileOrdinal = null;
this.sortColumns = null;
this.columnSizes = null;
this.valueCounts = null;
this.nullValueCounts = null;
this.lowerBounds = null;
this.upperBounds = null;
this.fromProjectionPos = null;
}
GenericDataFile(String filePath, FileFormat format, PartitionData partition,
long recordCount, long fileSizeInBytes, long blockSizeInBytes) {
this.filePath = filePath;
this.format = format;
this.partitionData = partition;
this.partitionType = partition.getPartitionType();
this.recordCount = recordCount;
this.fileSizeInBytes = fileSizeInBytes;
this.blockSizeInBytes = blockSizeInBytes;
this.fileOrdinal = null;
this.sortColumns = null;
this.columnSizes = null;
this.valueCounts = null;
this.nullValueCounts = null;
this.lowerBounds = null;
this.upperBounds = null;
this.fromProjectionPos = null;
}
GenericDataFile(String filePath, FileFormat format, PartitionData partition,
long fileSizeInBytes, long blockSizeInBytes, Metrics metrics) {
this.filePath = filePath;
this.format = format;
// this constructor is used by DataFiles.Builder, which passes null for unpartitioned data
if (partition == null) {
this.partitionData = EMPTY_PARTITION_DATA;
this.partitionType = EMPTY_PARTITION_DATA.getPartitionType();
} else {
this.partitionData = partition;
this.partitionType = partition.getPartitionType();
}
// this will throw NPE if metrics.recordCount is null
this.recordCount = metrics.recordCount();
this.fileSizeInBytes = fileSizeInBytes;
this.blockSizeInBytes = blockSizeInBytes;
this.fileOrdinal = null;
this.sortColumns = null;
this.columnSizes = metrics.columnSizes();
this.valueCounts = metrics.valueCounts();
this.nullValueCounts = metrics.nullValueCounts();
this.lowerBounds = SerializableByteBufferMap.wrap(metrics.lowerBounds());
this.upperBounds = SerializableByteBufferMap.wrap(metrics.upperBounds());
this.fromProjectionPos = null;
}
/**
* Copy constructor.
*
* @param toCopy a generic data file to copy.
*/
private GenericDataFile(GenericDataFile toCopy) {
this.filePath = toCopy.filePath;
this.format = toCopy.format;
this.partitionData = toCopy.partitionData.copy();
this.partitionType = toCopy.partitionType;
this.recordCount = toCopy.recordCount;
this.fileSizeInBytes = toCopy.fileSizeInBytes;
this.blockSizeInBytes = toCopy.blockSizeInBytes;
this.fileOrdinal = toCopy.fileOrdinal;
this.sortColumns = toCopy.sortColumns;
// TODO: support lazy conversion to/from map
this.columnSizes = toCopy.columnSizes;
this.valueCounts = toCopy.valueCounts;
this.nullValueCounts = toCopy.nullValueCounts;
this.lowerBounds = toCopy.lowerBounds;
this.upperBounds = toCopy.upperBounds;
this.fromProjectionPos = toCopy.fromProjectionPos;
}
/**
* Constructor for Java serialization.
*/
GenericDataFile() {
}
@Override
public CharSequence path() {
return filePath;
}
@Override
public FileFormat format() {
return format;
}
@Override
public StructLike partition() {
return partitionData;
}
@Override
public long recordCount() {
return recordCount;
}
@Override
public long fileSizeInBytes() {
return fileSizeInBytes;
}
@Override
public long blockSizeInBytes() {
return blockSizeInBytes;
}
@Override
public Integer fileOrdinal() {
return fileOrdinal;
}
@Override
public List<Integer> sortColumns() {
return sortColumns;
}
@Override
public Map<Integer, Long> columnSizes() {
return columnSizes;
}
@Override
public Map<Integer, Long> valueCounts() {
return valueCounts;
}
@Override
public Map<Integer, Long> nullValueCounts() {
return nullValueCounts;
}
@Override
public Map<Integer, ByteBuffer> lowerBounds() {
return lowerBounds;
}
@Override
public Map<Integer, ByteBuffer> upperBounds() {
return upperBounds;
}
@Override
public org.apache.avro.Schema getSchema() {
if (avroSchema == null) {
this.avroSchema = getAvroSchema(partitionType);
}
return avroSchema;
}
@Override
@SuppressWarnings("unchecked")
public void put(int i, Object v) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
// always coerce to String for Serializable
this.filePath = v.toString();
return;
case 1:
this.format = FileFormat.valueOf(v.toString());
return;
case 2:
this.partitionData = (PartitionData) v;
return;
case 3:
this.recordCount = (Long) v;
return;
case 4:
this.fileSizeInBytes = (Long) v;
return;
case 5:
this.blockSizeInBytes = (Long) v;
return;
case 6:
this.fileOrdinal = (Integer) v;
return;
case 7:
this.sortColumns = (List<Integer>) v;
return;
case 8:
this.columnSizes = (Map<Integer, Long>) v;
return;
case 9:
this.valueCounts = (Map<Integer, Long>) v;
return;
case 10:
this.nullValueCounts = (Map<Integer, Long>) v;
return;
case 11:
this.lowerBounds = SerializableByteBufferMap.wrap((Map<Integer, ByteBuffer>) v);
return;
case 12:
this.upperBounds= SerializableByteBufferMap.wrap((Map<Integer, ByteBuffer>) v);
return;
default:
// ignore the object, it must be from a newer version of the format
}
}
@Override
public Object get(int i) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
return filePath;
case 1:
return format != null ? format.toString() : null;
case 2:
return partitionData;
case 3:
return recordCount;
case 4:
return fileSizeInBytes;
case 5:
return blockSizeInBytes;
case 6:
return fileOrdinal;
case 7:
return sortColumns;
case 8:
return columnSizes;
case 9:
return valueCounts;
case 10:
return nullValueCounts;
case 11:
return lowerBounds;
case 12:
return upperBounds;
default:
throw new UnsupportedOperationException("Unknown field ordinal: " + pos);
}
}
private static org.apache.avro.Schema getAvroSchema(Types.StructType partitionType) {
Types.StructType type = DataFile.getType(partitionType);
return AvroSchemaUtil.convert(type, ImmutableMap.of(
type, GenericDataFile.class.getName(),
partitionType, PartitionData.class.getName()));
}
@Override
public int size() {
return 13;
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(get(pos));
}
@Override
public <T> void set(int pos, T value) {
put(pos, value);
}
@Override
public DataFile copy() {
return new GenericDataFile(this);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("file_path", filePath)
.add("file_format", format)
.add("partition", partitionData)
.add("record_count", recordCount)
.add("file_size_in_bytes", fileSizeInBytes)
.add("block_size_in_bytes", blockSizeInBytes)
.add("column_sizes", columnSizes)
.add("value_counts", valueCounts)
.add("null_value_counts", nullValueCounts)
.add("lower_bounds", lowerBounds)
.add("upper_bounds", upperBounds)
.toString();
}
}
| 1,943 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/TableMetadata.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.types.TypeUtil;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Predicate;
/**
* Metadata for a table.
*/
public class TableMetadata {
static final int TABLE_FORMAT_VERSION = 1;
static final int INITIAL_SPEC_ID = 0;
public static TableMetadata newTableMetadata(TableOperations ops,
Schema schema,
PartitionSpec spec,
String location) {
return newTableMetadata(ops, schema, spec, location, ImmutableMap.of());
}
public static TableMetadata newTableMetadata(TableOperations ops,
Schema schema,
PartitionSpec spec,
String location,
Map<String, String> properties) {
// reassign all column ids to ensure consistency
AtomicInteger lastColumnId = new AtomicInteger(0);
Schema freshSchema = TypeUtil.assignFreshIds(schema, lastColumnId::incrementAndGet);
// rebuild the partition spec using the new column ids
PartitionSpec.Builder specBuilder = PartitionSpec.builderFor(freshSchema)
.withSpecId(INITIAL_SPEC_ID);
for (PartitionField field : spec.fields()) {
// look up the name of the source field in the old schema to get the new schema's id
String sourceName = schema.findColumnName(field.sourceId());
specBuilder.add(
freshSchema.findField(sourceName).fieldId(),
field.name(),
field.transform().toString());
}
PartitionSpec freshSpec = specBuilder.build();
return new TableMetadata(ops, null, location,
System.currentTimeMillis(),
lastColumnId.get(), freshSchema, INITIAL_SPEC_ID, ImmutableList.of(freshSpec),
ImmutableMap.copyOf(properties), -1, ImmutableList.of(), ImmutableList.of());
}
public static class SnapshotLogEntry {
private final long timestampMillis;
private final long snapshotId;
SnapshotLogEntry(long timestampMillis, long snapshotId) {
this.timestampMillis = timestampMillis;
this.snapshotId = snapshotId;
}
public long timestampMillis() {
return timestampMillis;
}
public long snapshotId() {
return snapshotId;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
SnapshotLogEntry that = (SnapshotLogEntry) other;
return timestampMillis == that.timestampMillis && snapshotId == that.snapshotId;
}
@Override
public int hashCode() {
return Objects.hashCode(timestampMillis, snapshotId);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("timestampMillis", timestampMillis)
.add("snapshotId", snapshotId)
.toString();
}
}
private final TableOperations ops;
private final InputFile file;
// stored metadata
private final String location;
private final long lastUpdatedMillis;
private final int lastColumnId;
private final Schema schema;
private final int defaultSpecId;
private final List<PartitionSpec> specs;
private final Map<String, String> properties;
private final long currentSnapshotId;
private final List<Snapshot> snapshots;
private final Map<Long, Snapshot> snapshotsById;
private final Map<Integer, PartitionSpec> specsById;
private final List<SnapshotLogEntry> snapshotLog;
TableMetadata(TableOperations ops,
InputFile file,
String location,
long lastUpdatedMillis,
int lastColumnId,
Schema schema,
int defaultSpecId,
List<PartitionSpec> specs,
Map<String, String> properties,
long currentSnapshotId,
List<Snapshot> snapshots,
List<SnapshotLogEntry> snapshotLog) {
this.ops = ops;
this.file = file;
this.location = location;
this.lastUpdatedMillis = lastUpdatedMillis;
this.lastColumnId = lastColumnId;
this.schema = schema;
this.specs = specs;
this.defaultSpecId = defaultSpecId;
this.properties = properties;
this.currentSnapshotId = currentSnapshotId;
this.snapshots = snapshots;
this.snapshotLog = snapshotLog;
this.snapshotsById = indexSnapshots(snapshots);
this.specsById = indexSpecs(specs);
SnapshotLogEntry last = null;
for (SnapshotLogEntry logEntry : snapshotLog) {
if (last != null) {
Preconditions.checkArgument(
(logEntry.timestampMillis() - last.timestampMillis()) >= 0,
"[BUG] Expected sorted snapshot log entries.");
}
last = logEntry;
}
Preconditions.checkArgument(
currentSnapshotId < 0 || snapshotsById.containsKey(currentSnapshotId),
"Invalid table metadata: Cannot find current version");
}
public InputFile file() {
return file;
}
public long lastUpdatedMillis() {
return lastUpdatedMillis;
}
public int lastColumnId() {
return lastColumnId;
}
public Schema schema() {
return schema;
}
public PartitionSpec spec() {
return specsById.get(defaultSpecId);
}
public int defaultSpecId() {
return defaultSpecId;
}
public PartitionSpec spec(int id) {
return specsById.get(id);
}
public List<PartitionSpec> specs() {
return specs;
}
public String location() {
return location;
}
public Map<String, String> properties() {
return properties;
}
public boolean propertyAsBoolean(String property, boolean defaultValue) {
String value = properties.get(property);
if (value != null) {
return Boolean.parseBoolean(properties.get(property));
}
return defaultValue;
}
public int propertyAsInt(String property, int defaultValue) {
String value = properties.get(property);
if (value != null) {
return Integer.parseInt(properties.get(property));
}
return defaultValue;
}
public long propertyAsLong(String property, long defaultValue) {
String value = properties.get(property);
if (value != null) {
return Long.parseLong(properties.get(property));
}
return defaultValue;
}
public Snapshot snapshot(long snapshotId) {
return snapshotsById.get(snapshotId);
}
public Snapshot currentSnapshot() {
return snapshotsById.get(currentSnapshotId);
}
public List<Snapshot> snapshots() {
return snapshots;
}
public List<SnapshotLogEntry> snapshotLog() {
return snapshotLog;
}
public TableMetadata updateTableLocation(String newLocation) {
return new TableMetadata(ops, null, newLocation,
System.currentTimeMillis(), lastColumnId, schema, defaultSpecId, specs, properties,
currentSnapshotId, snapshots, snapshotLog);
}
public TableMetadata updateSchema(Schema schema, int lastColumnId) {
PartitionSpec.checkCompatibility(spec(), schema);
return new TableMetadata(ops, null, location,
System.currentTimeMillis(), lastColumnId, schema, defaultSpecId, specs, properties,
currentSnapshotId, snapshots, snapshotLog);
}
public TableMetadata updatePartitionSpec(PartitionSpec partitionSpec) {
PartitionSpec.checkCompatibility(partitionSpec, schema);
// if the spec already exists, use the same ID. otherwise, use 1 more than the highest ID.
int newDefaultSpecId = INITIAL_SPEC_ID;
for (PartitionSpec spec : specs) {
if (partitionSpec.compatibleWith(spec)) {
newDefaultSpecId = spec.specId();
break;
} else if (newDefaultSpecId <= spec.specId()) {
newDefaultSpecId = spec.specId() + 1;
}
}
Preconditions.checkArgument(defaultSpecId != newDefaultSpecId,
"Cannot set default partition spec to the current default");
ImmutableList.Builder<PartitionSpec> builder = ImmutableList.<PartitionSpec>builder()
.addAll(specs);
if (!specsById.containsKey(newDefaultSpecId)) {
// get a fresh spec to ensure the spec ID is set to the new default
builder.add(freshSpec(newDefaultSpecId, schema, partitionSpec));
}
return new TableMetadata(ops, null, location,
System.currentTimeMillis(), lastColumnId, schema, newDefaultSpecId,
builder.build(), properties,
currentSnapshotId, snapshots, snapshotLog);
}
public TableMetadata replaceCurrentSnapshot(Snapshot snapshot) {
List<Snapshot> newSnapshots = ImmutableList.<Snapshot>builder()
.addAll(snapshots)
.add(snapshot)
.build();
List<SnapshotLogEntry> newSnapshotLog = ImmutableList.<SnapshotLogEntry>builder()
.addAll(snapshotLog)
.add(new SnapshotLogEntry(snapshot.timestampMillis(), snapshot.snapshotId()))
.build();
return new TableMetadata(ops, null, location,
snapshot.timestampMillis(), lastColumnId, schema, defaultSpecId, specs, properties,
snapshot.snapshotId(), newSnapshots, newSnapshotLog);
}
public TableMetadata removeSnapshotsIf(Predicate<Snapshot> removeIf) {
List<Snapshot> filtered = Lists.newArrayListWithExpectedSize(snapshots.size());
for (Snapshot snapshot : snapshots) {
// keep the current snapshot and any snapshots that do not match the removeIf condition
if (snapshot.snapshotId() == currentSnapshotId || !removeIf.test(snapshot)) {
filtered.add(snapshot);
}
}
// update the snapshot log
Set<Long> validIds = Sets.newHashSet(Iterables.transform(filtered, Snapshot::snapshotId));
List<SnapshotLogEntry> newSnapshotLog = Lists.newArrayList();
for (SnapshotLogEntry logEntry : snapshotLog) {
if (validIds.contains(logEntry.snapshotId())) {
// copy the log entries that are still valid
newSnapshotLog.add(logEntry);
} else {
// any invalid entry causes the history before it to be removed. otherwise, there could be
// history gaps that cause time-travel queries to produce incorrect results. for example,
// if history is [(t1, s1), (t2, s2), (t3, s3)] and s2 is removed, the history cannot be
// [(t1, s1), (t3, s3)] because it appears that s3 was current during the time between t2
// and t3 when in fact s2 was the current snapshot.
newSnapshotLog.clear();
}
}
return new TableMetadata(ops, null, location,
System.currentTimeMillis(), lastColumnId, schema, defaultSpecId, specs, properties,
currentSnapshotId, filtered, ImmutableList.copyOf(newSnapshotLog));
}
public TableMetadata rollbackTo(Snapshot snapshot) {
ValidationException.check(snapshotsById.containsKey(snapshot.snapshotId()),
"Cannot set current snapshot to unknown: %s", snapshot.snapshotId());
long nowMillis = System.currentTimeMillis();
List<SnapshotLogEntry> newSnapshotLog = ImmutableList.<SnapshotLogEntry>builder()
.addAll(snapshotLog)
.add(new SnapshotLogEntry(nowMillis, snapshot.snapshotId()))
.build();
return new TableMetadata(ops, null, location,
nowMillis, lastColumnId, schema, defaultSpecId, specs, properties,
snapshot.snapshotId(), snapshots, newSnapshotLog);
}
public TableMetadata replaceProperties(Map<String, String> newProperties) {
ValidationException.check(newProperties != null, "Cannot set properties to null");
return new TableMetadata(ops, null, location,
System.currentTimeMillis(), lastColumnId, schema, defaultSpecId, specs, newProperties,
currentSnapshotId, snapshots, snapshotLog);
}
public TableMetadata removeSnapshotLogEntries(Set<Long> snapshotIds) {
List<SnapshotLogEntry> newSnapshotLog = Lists.newArrayList();
for (SnapshotLogEntry logEntry : snapshotLog) {
if (!snapshotIds.contains(logEntry.snapshotId())) {
// copy the log entries that are still valid
newSnapshotLog.add(logEntry);
}
}
ValidationException.check(currentSnapshotId < 0 || // not set
Iterables.getLast(newSnapshotLog).snapshotId() == currentSnapshotId,
"Cannot set invalid snapshot log: latest entry is not the current snapshot");
return new TableMetadata(ops, null, location,
System.currentTimeMillis(), lastColumnId, schema, defaultSpecId, specs, properties,
currentSnapshotId, snapshots, newSnapshotLog);
}
public TableMetadata buildReplacement(Schema schema, PartitionSpec partitionSpec,
Map<String, String> properties) {
AtomicInteger lastColumnId = new AtomicInteger(0);
Schema freshSchema = TypeUtil.assignFreshIds(schema, lastColumnId::incrementAndGet);
int nextSpecId = TableMetadata.INITIAL_SPEC_ID;
for (Integer specId : specsById.keySet()) {
if (nextSpecId <= specId) {
nextSpecId = specId + 1;
}
}
// rebuild the partition spec using the new column ids
PartitionSpec freshSpec = freshSpec(nextSpecId, freshSchema, partitionSpec);
// if the spec already exists, use the same ID. otherwise, use 1 more than the highest ID.
int specId = nextSpecId;
for (PartitionSpec spec : specs) {
if (freshSpec.compatibleWith(spec)) {
specId = spec.specId();
break;
}
}
ImmutableList.Builder<PartitionSpec> builder = ImmutableList.<PartitionSpec>builder()
.addAll(specs);
if (!specsById.containsKey(specId)) {
builder.add(freshSpec);
}
Map<String, String> newProperties = Maps.newHashMap();
newProperties.putAll(this.properties);
newProperties.putAll(properties);
return new TableMetadata(ops, null, location,
System.currentTimeMillis(), lastColumnId.get(), freshSchema,
specId, builder.build(), ImmutableMap.copyOf(newProperties),
-1, snapshots, ImmutableList.of());
}
private static PartitionSpec freshSpec(int specId, Schema schema, PartitionSpec partitionSpec) {
PartitionSpec.Builder specBuilder = PartitionSpec.builderFor(schema)
.withSpecId(specId);
for (PartitionField field : partitionSpec.fields()) {
// look up the name of the source field in the old schema to get the new schema's id
String sourceName = partitionSpec.schema().findColumnName(field.sourceId());
specBuilder.add(
schema.findField(sourceName).fieldId(),
field.name(),
field.transform().toString());
}
return specBuilder.build();
}
private static Map<Long, Snapshot> indexSnapshots(List<Snapshot> snapshots) {
ImmutableMap.Builder<Long, Snapshot> builder = ImmutableMap.builder();
for (Snapshot version : snapshots) {
builder.put(version.snapshotId(), version);
}
return builder.build();
}
private static Map<Integer, PartitionSpec> indexSpecs(List<PartitionSpec> specs) {
ImmutableMap.Builder<Integer, PartitionSpec> builder = ImmutableMap.builder();
for (PartitionSpec spec : specs) {
builder.put(spec.specId(), spec);
}
return builder.build();
}
}
| 1,944 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/MergeAppend.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
/**
* Append implementation that produces a minimal number of manifest files.
* <p>
* This implementation will attempt to commit 5 times before throwing {@link CommitFailedException}.
*/
class MergeAppend extends MergingSnapshotUpdate implements AppendFiles {
MergeAppend(TableOperations ops) {
super(ops);
}
@Override
public MergeAppend appendFile(DataFile file) {
add(file);
return this;
}
}
| 1,945 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/TableProperties.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
public class TableProperties {
public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries";
public static final int COMMIT_NUM_RETRIES_DEFAULT = 4;
public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms";
public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100;
public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms";
public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB
public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge";
public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100;
public static final String DEFAULT_FILE_FORMAT = "write.format.default";
public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB
public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes";
public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB
public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes";
public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
public static final String SPLIT_LOOKBACK = "read.split.planning-lookback";
public static final int SPLIT_LOOKBACK_DEFAULT = 10;
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
// This only applies to files written after this property is set. Files previously written aren't relocated to
// reflect this parameter.
// If not set, defaults to a "data" folder underneath the root path of the table.
public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path";
public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled";
public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true;
}
| 1,946 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/HasTableOperations.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
/**
* Used to expose a table's TableOperations.
*/
public interface HasTableOperations {
TableOperations operations();
}
| 1,947 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/SnapshotParser.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.util.JsonUtil;
import java.io.IOException;
import java.io.StringWriter;
import java.util.List;
public class SnapshotParser {
private static final String SNAPSHOT_ID = "snapshot-id";
private static final String PARENT_SNAPSHOT_ID = "parent-snapshot-id";
private static final String TIMESTAMP_MS = "timestamp-ms";
private static final String MANIFESTS = "manifests";
private static final String MANIFEST_LIST = "manifest-list";
static void toJson(Snapshot snapshot, JsonGenerator generator)
throws IOException {
generator.writeStartObject();
generator.writeNumberField(SNAPSHOT_ID, snapshot.snapshotId());
if (snapshot.parentId() != null) {
generator.writeNumberField(PARENT_SNAPSHOT_ID, snapshot.parentId());
}
generator.writeNumberField(TIMESTAMP_MS, snapshot.timestampMillis());
String manifestList = snapshot.manifestListLocation();
if (manifestList != null) {
// write just the location. manifests should not be embedded in JSON along with a list
generator.writeStringField(MANIFEST_LIST, manifestList);
} else {
// embed the manifest list in the JSON
generator.writeArrayFieldStart(MANIFESTS);
for (ManifestFile file : snapshot.manifests()) {
generator.writeString(file.path());
}
generator.writeEndArray();
}
generator.writeEndObject();
}
public static String toJson(Snapshot snapshot) {
try {
StringWriter writer = new StringWriter();
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
generator.useDefaultPrettyPrinter();
toJson(snapshot, generator);
generator.flush();
return writer.toString();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write json for: %s", snapshot);
}
}
static Snapshot fromJson(TableOperations ops, JsonNode node) {
Preconditions.checkArgument(node.isObject(),
"Cannot parse table version from a non-object: %s", node);
long versionId = JsonUtil.getLong(SNAPSHOT_ID, node);
Long parentId = null;
if (node.has(PARENT_SNAPSHOT_ID)) {
parentId = JsonUtil.getLong(PARENT_SNAPSHOT_ID, node);
}
long timestamp = JsonUtil.getLong(TIMESTAMP_MS, node);
if (node.has(MANIFEST_LIST)) {
// the manifest list is stored in a manifest list file
String manifestList = JsonUtil.getString(MANIFEST_LIST, node);
return new BaseSnapshot(ops, versionId, parentId, timestamp, ops.io().newInputFile(manifestList));
} else {
// fall back to an embedded manifest list. pass in the manifest's InputFile so length can be
// loaded lazily, if it is needed
List<ManifestFile> manifests = Lists.transform(JsonUtil.getStringList(MANIFESTS, node),
location -> new GenericManifestFile(ops.io().newInputFile(location), 0));
return new BaseSnapshot(ops, versionId, parentId, timestamp, manifests);
}
}
public static Snapshot fromJson(TableOperations ops, String json) {
try {
return fromJson(ops, JsonUtil.mapper().readValue(json, JsonNode.class));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read version from json: %s", json);
}
}
}
| 1,948 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/SnapshotUpdate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.util.Exceptions;
import com.netflix.iceberg.util.Tasks;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.MANIFEST_LISTS_ENABLED;
import static com.netflix.iceberg.TableProperties.MANIFEST_LISTS_ENABLED_DEFAULT;
import static com.netflix.iceberg.util.ThreadPools.getWorkerPool;
abstract class SnapshotUpdate implements PendingUpdate<Snapshot> {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotUpdate.class);
static final Set<ManifestFile> EMPTY_SET = Sets.newHashSet();
/**
* Cache used to enrich ManifestFile instances that are written to a ManifestListWriter.
*/
private final LoadingCache<ManifestFile, ManifestFile> manifestsWithMetadata = CacheBuilder
.newBuilder()
.build(new CacheLoader<ManifestFile, ManifestFile>() {
@Override
public ManifestFile load(ManifestFile file) {
if (file.snapshotId() != null) {
return file;
}
return addMetadata(ops, file);
}
});
private final TableOperations ops;
private final String commitUUID = UUID.randomUUID().toString();
private final AtomicInteger attempt = new AtomicInteger(0);
private final List<String> manifestLists = Lists.newArrayList();
private Long snapshotId = null;
private TableMetadata base = null;
protected SnapshotUpdate(TableOperations ops) {
this.ops = ops;
this.base = ops.current();
}
/**
* Apply the update's changes to the base table metadata and return the new manifest list.
*
* @param base the base table metadata to apply changes to
* @return a manifest list for the new snapshot.
*/
protected abstract List<ManifestFile> apply(TableMetadata base);
/**
* Clean up any uncommitted manifests that were created.
* <p>
* Manifests may not be committed if apply is called more because a commit conflict has occurred.
* Implementations may keep around manifests because the same changes will be made by both apply
* calls. This method instructs the implementation to clean up those manifests and passes the
* paths of the manifests that were actually committed.
*
* @param committed a set of manifest paths that were actually committed
*/
protected abstract void cleanUncommitted(Set<ManifestFile> committed);
@Override
public Snapshot apply() {
this.base = ops.refresh();
Long parentSnapshotId = base.currentSnapshot() != null ?
base.currentSnapshot().snapshotId() : null;
List<ManifestFile> manifests = apply(base);
if (base.propertyAsBoolean(MANIFEST_LISTS_ENABLED, MANIFEST_LISTS_ENABLED_DEFAULT)) {
OutputFile manifestList = manifestListPath();
try (ManifestListWriter writer = new ManifestListWriter(
manifestList, snapshotId(), parentSnapshotId)) {
// keep track of the manifest lists created
manifestLists.add(manifestList.location());
ManifestFile[] manifestFiles = new ManifestFile[manifests.size()];
Tasks.range(manifestFiles.length)
.stopOnFailure().throwFailureWhenFinished()
.retry(4).exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */ )
.executeWith(getWorkerPool())
.run(index ->
manifestFiles[index] = manifestsWithMetadata.getUnchecked(manifests.get(index)));
writer.addAll(Arrays.asList(manifestFiles));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write manifest list file");
}
return new BaseSnapshot(ops,
snapshotId(), parentSnapshotId, System.currentTimeMillis(),
ops.io().newInputFile(manifestList.location()));
} else {
return new BaseSnapshot(ops,
snapshotId(), parentSnapshotId, System.currentTimeMillis(), manifests);
}
}
@Override
public void commit() {
// this is always set to the latest commit attempt's snapshot id.
AtomicLong newSnapshotId = new AtomicLong(-1L);
try {
Tasks.foreach(ops)
.retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */ )
.onlyRetryOn(CommitFailedException.class)
.run(ops -> {
Snapshot newSnapshot = apply();
newSnapshotId.set(newSnapshot.snapshotId());
TableMetadata updated = base.replaceCurrentSnapshot(newSnapshot);
ops.commit(base, updated);
});
} catch (RuntimeException e) {
Exceptions.suppressAndThrow(e, this::cleanAll);
}
LOG.info("Committed snapshot {} ({})", newSnapshotId.get(), getClass().getSimpleName());
try {
// at this point, the commit must have succeeded. after a refresh, the snapshot is loaded by
// id in case another commit was added between this commit and the refresh.
Snapshot saved = ops.refresh().snapshot(newSnapshotId.get());
if (saved != null) {
cleanUncommitted(Sets.newHashSet(saved.manifests()));
// also clean up unused manifest lists created by multiple attempts
for (String manifestList : manifestLists) {
if (!saved.manifestListLocation().equals(manifestList)) {
ops.io().deleteFile(manifestList);
}
}
} else {
// saved may not be present if the latest metadata couldn't be loaded due to eventual
// consistency problems in refresh. in that case, don't clean up.
LOG.info("Failed to load committed snapshot, skipping manifest clean-up");
}
} catch (RuntimeException e) {
LOG.info("Failed to load committed table metadata, skipping manifest clean-up", e);
}
}
protected void cleanAll() {
for (String manifestList : manifestLists) {
ops.io().deleteFile(manifestList);
}
manifestLists.clear();
cleanUncommitted(EMPTY_SET);
}
protected void deleteFile(String path) {
ops.io().deleteFile(path);
}
protected OutputFile manifestListPath() {
return ops.io().newOutputFile(ops.metadataFileLocation(FileFormat.AVRO.addExtension(
String.format("snap-%d-%d-%s", snapshotId(), attempt.incrementAndGet(), commitUUID))));
}
protected OutputFile manifestPath(int i) {
return ops.io().newOutputFile(
ops.metadataFileLocation(FileFormat.AVRO.addExtension(commitUUID + "-m" + i)));
}
protected long snapshotId() {
if (snapshotId == null) {
this.snapshotId = ops.newSnapshotId();
}
return snapshotId;
}
private static ManifestFile addMetadata(TableOperations ops, ManifestFile manifest) {
try (ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest.path()))) {
PartitionSummary stats = new PartitionSummary(ops.current().spec(manifest.partitionSpecId()));
int addedFiles = 0;
int existingFiles = 0;
int deletedFiles = 0;
Long snapshotId = null;
long maxSnapshotId = Long.MIN_VALUE;
for (ManifestEntry entry : reader.entries()) {
if (entry.snapshotId() > maxSnapshotId) {
maxSnapshotId = entry.snapshotId();
}
switch (entry.status()) {
case ADDED:
addedFiles += 1;
if (snapshotId == null) {
snapshotId = entry.snapshotId();
}
break;
case EXISTING:
existingFiles += 1;
break;
case DELETED:
deletedFiles += 1;
if (snapshotId == null) {
snapshotId = entry.snapshotId();
}
break;
}
stats.update(entry.file().partition());
}
if (snapshotId == null) {
// if no files were added or deleted, use the largest snapshot ID in the manifest
snapshotId = maxSnapshotId;
}
return new GenericManifestFile(manifest.path(), manifest.length(), manifest.partitionSpecId(),
snapshotId, addedFiles, existingFiles, deletedFiles, stats.summaries());
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read manifest: %s", manifest.path());
}
}
}
| 1,949 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ConfigProperties.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import org.apache.hadoop.conf.Configuration;
public class ConfigProperties {
public static final String COMPRESS_METADATA = "iceberg.compress.metadata";
public static final boolean COMPRESS_METADATA_DEFAULT = false;
public static final boolean shouldCompress(Configuration configuration) {
return configuration.getBoolean(COMPRESS_METADATA, COMPRESS_METADATA_DEFAULT);
}
}
| 1,950 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/DataFiles.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.netflix.iceberg.hadoop.HadoopInputFile;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.types.Conversions;
import org.apache.hadoop.fs.FileStatus;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Locale;
import java.util.Map;
public class DataFiles {
private static final long DEFAULT_BLOCK_SIZE = 64*1024*1024;
private static PartitionData newPartitionData(PartitionSpec spec) {
return new PartitionData(spec.partitionType());
}
private static PartitionData copyPartitionData(PartitionSpec spec, StructLike partitionData, PartitionData reuse) {
PartitionData data = reuse;
if (data == null) {
data = newPartitionData(spec);
}
Class<?>[] javaClasses = spec.javaClasses();
List<PartitionField> fields = spec.fields();
for (int i = 0; i < fields.size(); i += 1) {
data.set(i, partitionData.get(i, javaClasses[i]));
}
return data;
}
private static PartitionData fillFromPath(PartitionSpec spec, String partitionPath, PartitionData reuse) {
PartitionData data = reuse;
if (data == null) {
data = newPartitionData(spec);
}
String[] partitions = partitionPath.split("/", -1);
Preconditions.checkArgument(partitions.length <= spec.fields().size(),
"Invalid partition data, too many fields (expecting %s): %s",
spec.fields().size(), partitionPath);
Preconditions.checkArgument(partitions.length >= spec.fields().size(),
"Invalid partition data, not enough fields (expecting %s): %s",
spec.fields().size(), partitionPath);
for (int i = 0; i < partitions.length; i += 1) {
PartitionField field = spec.fields().get(i);
String[] parts = partitions[i].split("=", 2);
Preconditions.checkArgument(
parts.length == 2 &&
parts[0] != null &&
field.name().equals(parts[0]),
"Invalid partition: " + partitions[i]);
data.set(i, Conversions.fromPartitionString(data.getType(i), parts[1]));
}
return data;
}
public static PartitionData data(PartitionSpec spec, String partitionPath) {
return fillFromPath(spec, partitionPath, null);
}
public static PartitionData copy(PartitionSpec spec, StructLike partition) {
return copyPartitionData(spec, partition, null);
}
public static DataFile fromInputFile(InputFile file, long rowCount) {
if (file instanceof HadoopInputFile) {
return fromStat(((HadoopInputFile) file).getStat(), rowCount);
}
String location = file.location();
FileFormat format = FileFormat.fromFileName(location);
return new GenericDataFile(location, format, rowCount, file.getLength(), DEFAULT_BLOCK_SIZE);
}
public static DataFile fromStat(FileStatus stat, long rowCount) {
String location = stat.getPath().toString();
FileFormat format = FileFormat.fromFileName(location);
return new GenericDataFile(location, format, rowCount, stat.getLen(), stat.getBlockSize());
}
public static DataFile fromInputFile(InputFile file, PartitionData partition, long rowCount) {
if (file instanceof HadoopInputFile) {
return fromStat(((HadoopInputFile) file).getStat(), partition, rowCount);
}
String location = file.location();
FileFormat format = FileFormat.fromFileName(location);
return new GenericDataFile(
location, format, partition, rowCount, file.getLength(), DEFAULT_BLOCK_SIZE);
}
public static DataFile fromStat(FileStatus stat, PartitionData partition, long rowCount) {
String location = stat.getPath().toString();
FileFormat format = FileFormat.fromFileName(location);
return new GenericDataFile(
location, format, partition, rowCount, stat.getLen(), stat.getBlockSize());
}
public static DataFile fromInputFile(InputFile file, PartitionData partition, Metrics metrics) {
if (file instanceof HadoopInputFile) {
return fromStat(((HadoopInputFile) file).getStat(), partition, metrics);
}
String location = file.location();
FileFormat format = FileFormat.fromFileName(location);
return new GenericDataFile(
location, format, partition, file.getLength(), DEFAULT_BLOCK_SIZE, metrics);
}
public static DataFile fromStat(FileStatus stat, PartitionData partition, Metrics metrics) {
String location = stat.getPath().toString();
FileFormat format = FileFormat.fromFileName(location);
return new GenericDataFile(
location, format, partition, stat.getLen(), stat.getBlockSize(), metrics);
}
public static DataFile fromParquetInputFile(InputFile file,
PartitionData partition,
Metrics metrics) {
if (file instanceof HadoopInputFile) {
return fromParquetStat(((HadoopInputFile) file).getStat(), partition, metrics);
}
String location = file.location();
FileFormat format = FileFormat.PARQUET;
return new GenericDataFile(
location, format, partition, file.getLength(), DEFAULT_BLOCK_SIZE, metrics);
}
public static DataFile fromParquetStat(FileStatus stat, PartitionData partition, Metrics metrics) {
String location = stat.getPath().toString();
FileFormat format = FileFormat.PARQUET;
return new GenericDataFile(
location, format, partition, stat.getLen(), stat.getBlockSize(), metrics);
}
public static Builder builder(PartitionSpec spec) {
return new Builder(spec);
}
static Builder builder() {
return new Builder();
}
public static class Builder {
private final PartitionSpec spec;
private final boolean isPartitioned;
private PartitionData partitionData;
private String filePath = null;
private FileFormat format = null;
private long recordCount = -1L;
private long fileSizeInBytes = -1L;
private long blockSizeInBytes = -1L;
// optional fields
private Map<Integer, Long> columnSizes = null;
private Map<Integer, Long> valueCounts = null;
private Map<Integer, Long> nullValueCounts = null;
private Map<Integer, ByteBuffer> lowerBounds = null;
private Map<Integer, ByteBuffer> upperBounds = null;
public Builder() {
this.spec = null;
this.partitionData = null;
this.isPartitioned = false;
}
public Builder(PartitionSpec spec) {
this.spec = spec;
this.partitionData = newPartitionData(spec);
this.isPartitioned = true;
}
public void clear() {
if (isPartitioned) {
partitionData.clear();
}
this.filePath = null;
this.format = null;
this.recordCount = -1L;
this.fileSizeInBytes = -1L;
this.blockSizeInBytes = -1L;
this.columnSizes = null;
this.valueCounts = null;
this.nullValueCounts = null;
this.lowerBounds = null;
this.upperBounds = null;
}
public Builder copy(DataFile toCopy) {
if (isPartitioned) {
this.partitionData = copyPartitionData(spec, toCopy.partition(), partitionData);
}
this.filePath = toCopy.path().toString();
this.format = toCopy.format();
this.recordCount = toCopy.recordCount();
this.fileSizeInBytes = toCopy.fileSizeInBytes();
this.blockSizeInBytes = toCopy.blockSizeInBytes();
this.columnSizes = toCopy.columnSizes();
this.valueCounts = toCopy.valueCounts();
this.nullValueCounts = toCopy.nullValueCounts();
this.lowerBounds = toCopy.lowerBounds();
this.upperBounds = toCopy.upperBounds();
return this;
}
public Builder withStatus(FileStatus stat) {
this.filePath = stat.getPath().toString();
this.fileSizeInBytes = stat.getLen();
this.blockSizeInBytes = stat.getBlockSize();
return this;
}
public Builder withInputFile(InputFile file) {
if (file instanceof HadoopInputFile) {
return withStatus(((HadoopInputFile) file).getStat());
}
this.filePath = file.location();
this.fileSizeInBytes = file.getLength();
return this;
}
public Builder withPath(String filePath) {
this.filePath = filePath;
return this;
}
public Builder withFormat(String format) {
this.format = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH));
return this;
}
public Builder withFormat(FileFormat format) {
this.format = format;
return this;
}
public Builder withPartition(StructLike partition) {
this.partitionData = copyPartitionData(spec, partition, partitionData);
return this;
}
public Builder withRecordCount(long recordCount) {
this.recordCount = recordCount;
return this;
}
public Builder withFileSizeInBytes(long fileSizeInBytes) {
this.fileSizeInBytes = fileSizeInBytes;
return this;
}
public Builder withBlockSizeInBytes(long blockSizeInBytes) {
this.blockSizeInBytes = blockSizeInBytes;
return this;
}
public Builder withPartitionPath(String partitionPath) {
Preconditions.checkArgument(isPartitioned,
"Cannot add partition data for an unpartitioned table");
this.partitionData = fillFromPath(spec, partitionPath, partitionData);
return this;
}
public Builder withMetrics(Metrics metrics) {
// check for null to avoid NPE when unboxing
this.recordCount = metrics.recordCount() == null ? -1 : metrics.recordCount();
this.columnSizes = metrics.columnSizes();
this.valueCounts = metrics.valueCounts();
this.nullValueCounts = metrics.nullValueCounts();
this.lowerBounds = metrics.lowerBounds();
this.upperBounds = metrics.upperBounds();
return this;
}
public DataFile build() {
Preconditions.checkArgument(filePath != null, "File path is required");
if (format == null) {
this.format = FileFormat.fromFileName(filePath);
}
Preconditions.checkArgument(format != null, "File format is required");
Preconditions.checkArgument(fileSizeInBytes >= 0, "File size is required");
Preconditions.checkArgument(recordCount >= 0, "Record count is required");
if (blockSizeInBytes < 0) {
this.blockSizeInBytes = DEFAULT_BLOCK_SIZE; // assume 64MB blocks
}
return new GenericDataFile(
filePath, format, isPartitioned ? partitionData.copy() : null,
fileSizeInBytes, blockSizeInBytes, new Metrics(
recordCount, columnSizes, valueCounts, nullValueCounts, lowerBounds, upperBounds));
}
}
}
| 1,951 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/PartitionSpecParser.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.JsonUtil;
import com.netflix.iceberg.util.Pair;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Iterator;
import java.util.concurrent.ExecutionException;
public class PartitionSpecParser {
private PartitionSpecParser() {
}
private static final String SPEC_ID = "spec-id";
private static final String FIELDS = "fields";
private static final String SOURCE_ID = "source-id";
private static final String TRANSFORM = "transform";
private static final String NAME = "name";
public static void toJson(PartitionSpec spec, JsonGenerator generator) throws IOException {
generator.writeStartObject();
generator.writeNumberField(SPEC_ID, spec.specId());
generator.writeFieldName(FIELDS);
toJsonFields(spec, generator);
generator.writeEndObject();
}
public static String toJson(PartitionSpec spec) {
return toJson(spec, false);
}
public static String toJson(PartitionSpec spec, boolean pretty) {
try {
StringWriter writer = new StringWriter();
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
if (pretty) {
generator.useDefaultPrettyPrinter();
}
toJson(spec, generator);
generator.flush();
return writer.toString();
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
public static PartitionSpec fromJson(Schema schema, JsonNode json) {
Preconditions.checkArgument(json.isObject(), "Cannot parse spec from non-object: %s", json);
int specId = JsonUtil.getInt(SPEC_ID, json);
PartitionSpec.Builder builder = PartitionSpec.builderFor(schema).withSpecId(specId);
buildFromJsonFields(builder, json.get(FIELDS));
return builder.build();
}
private static Cache<Pair<Types.StructType, String>, PartitionSpec> SPEC_CACHE = CacheBuilder
.newBuilder()
.weakValues()
.build();
public static PartitionSpec fromJson(Schema schema, String json) {
try {
return SPEC_CACHE.get(Pair.of(schema.asStruct(), json),
() -> fromJson(schema, JsonUtil.mapper().readValue(json, JsonNode.class)));
} catch (ExecutionException e) {
if (e.getCause() instanceof IOException) {
throw new RuntimeIOException(
(IOException) e.getCause(), "Failed to parse partition spec: %s", json);
} else {
throw new RuntimeException("Failed to parse partition spec: " + json, e.getCause());
}
}
}
static void toJsonFields(PartitionSpec spec, JsonGenerator generator) throws IOException {
generator.writeStartArray();
for (PartitionField field : spec.fields()) {
generator.writeStartObject();
generator.writeStringField(NAME, field.name());
generator.writeStringField(TRANSFORM, field.transform().toString());
generator.writeNumberField(SOURCE_ID, field.sourceId());
generator.writeEndObject();
}
generator.writeEndArray();
}
static String toJsonFields(PartitionSpec spec) {
try {
StringWriter writer = new StringWriter();
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
toJsonFields(spec, generator);
generator.flush();
return writer.toString();
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
static PartitionSpec fromJsonFields(Schema schema, int specId, JsonNode json) {
PartitionSpec.Builder builder = PartitionSpec.builderFor(schema).withSpecId(specId);
buildFromJsonFields(builder, json);
return builder.build();
}
static PartitionSpec fromJsonFields(Schema schema, int specId, String json) {
try {
return fromJsonFields(schema, specId, JsonUtil.mapper().readValue(json, JsonNode.class));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to parse partition spec fields: " + json);
}
}
private static void buildFromJsonFields(PartitionSpec.Builder builder, JsonNode json) {
Preconditions.checkArgument(json.isArray(),
"Cannot parse partition spec fields, not an array: %s", json);
Iterator<JsonNode> elements = json.elements();
while (elements.hasNext()) {
JsonNode element = elements.next();
Preconditions.checkArgument(element.isObject(),
"Cannot parse partition field, not an object: %s", element);
String name = JsonUtil.getString(NAME, element);
String transform = JsonUtil.getString(TRANSFORM, element);
int sourceId = JsonUtil.getInt(SOURCE_ID, element);
builder.add(sourceId, name, transform);
}
}
}
| 1,952 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/MergingSnapshotUpdate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.iceberg.ManifestEntry.Status;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.expressions.Evaluator;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Expressions;
import com.netflix.iceberg.expressions.Projections;
import com.netflix.iceberg.expressions.StrictMetricsEvaluator;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.util.BinPacking.ListPacker;
import com.netflix.iceberg.util.CharSequenceWrapper;
import com.netflix.iceberg.util.StructLikeWrapper;
import com.netflix.iceberg.util.Tasks;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.lang.reflect.Array;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import static com.google.common.collect.Iterables.filter;
import static com.google.common.collect.Iterables.transform;
import static com.netflix.iceberg.TableProperties.MANIFEST_MIN_MERGE_COUNT;
import static com.netflix.iceberg.TableProperties.MANIFEST_MIN_MERGE_COUNT_DEFAULT;
import static com.netflix.iceberg.TableProperties.MANIFEST_TARGET_SIZE_BYTES;
import static com.netflix.iceberg.TableProperties.MANIFEST_TARGET_SIZE_BYTES_DEFAULT;
import static com.netflix.iceberg.util.ThreadPools.getWorkerPool;
abstract class MergingSnapshotUpdate extends SnapshotUpdate {
private final Logger LOG = LoggerFactory.getLogger(getClass());
private static final Joiner COMMA = Joiner.on(",");
protected static class DeleteException extends ValidationException {
private final String partition;
private DeleteException(String partition) {
super("Operation would delete existing data");
this.partition = partition;
}
public String partition() {
return partition;
}
}
private final TableOperations ops;
private final PartitionSpec spec;
private final long manifestTargetSizeBytes;
private final int minManifestsCountToMerge;
// update data
private final AtomicInteger manifestCount = new AtomicInteger(0);
private final List<DataFile> newFiles = Lists.newArrayList();
private final Set<CharSequenceWrapper> deletePaths = Sets.newHashSet();
private final Set<StructLikeWrapper> dropPartitions = Sets.newHashSet();
private Expression deleteExpression = Expressions.alwaysFalse();
private boolean failAnyDelete = false;
private boolean failMissingDeletePaths = false;
// cache the new manifest once it is written
private ManifestFile newManifest = null;
private boolean hasNewFiles = false;
// cache merge results to reuse when retrying
private final Map<List<ManifestFile>, ManifestFile> mergeManifests = Maps.newConcurrentMap();
// cache filtered manifests to avoid extra work when commits fail.
private final Map<ManifestFile, ManifestFile> filteredManifests = Maps.newConcurrentMap();
// tracking where files were deleted to validate retries quickly
private final Map<ManifestFile, Set<CharSequenceWrapper>> filteredManifestToDeletedFiles =
Maps.newConcurrentMap();
private boolean filterUpdated = false; // used to clear caches of filtered and merged manifests
MergingSnapshotUpdate(TableOperations ops) {
super(ops);
this.ops = ops;
this.spec = ops.current().spec();
this.manifestTargetSizeBytes = ops.current()
.propertyAsLong(MANIFEST_TARGET_SIZE_BYTES, MANIFEST_TARGET_SIZE_BYTES_DEFAULT);
this.minManifestsCountToMerge = ops.current()
.propertyAsInt(MANIFEST_MIN_MERGE_COUNT, MANIFEST_MIN_MERGE_COUNT_DEFAULT);
}
protected PartitionSpec writeSpec() {
// the spec is set when the write is started
return spec;
}
protected Expression rowFilter() {
return deleteExpression;
}
protected List<DataFile> addedFiles() {
return newFiles;
}
protected void failAnyDelete() {
this.failAnyDelete = true;
}
protected void failMissingDeletePaths() {
this.failMissingDeletePaths = true;
}
/**
* Add a filter to match files to delete. A file will be deleted if all of the rows it contains
* match this or any other filter passed to this method.
*
* @param expr an expression to match rows.
*/
protected void deleteByRowFilter(Expression expr) {
Preconditions.checkNotNull(expr, "Cannot delete files using filter: null");
this.filterUpdated = true;
this.deleteExpression = Expressions.or(deleteExpression, expr);
}
/**
* Add a partition tuple to drop from the table during the delete phase.
*/
protected void dropPartition(StructLike partition) {
dropPartitions.add(StructLikeWrapper.wrap(partition));
}
/**
* Add a specific path to be deleted in the new snapshot.
*/
protected void delete(CharSequence path) {
Preconditions.checkNotNull(path, "Cannot delete file path: null");
this.filterUpdated = true;
deletePaths.add(CharSequenceWrapper.wrap(path));
}
/**
* Add a file to the new snapshot.
*/
protected void add(DataFile file) {
hasNewFiles = true;
newFiles.add(file);
}
@Override
public List<ManifestFile> apply(TableMetadata base) {
if (filterUpdated) {
cleanUncommittedFilters(SnapshotUpdate.EMPTY_SET);
this.filterUpdated = false;
}
Snapshot current = base.currentSnapshot();
Map<Integer, List<ManifestFile>> groups = Maps.newTreeMap(Comparator.<Integer>reverseOrder());
// use a common metrics evaluator for all manifests because it is bound to the table schema
StrictMetricsEvaluator metricsEvaluator = new StrictMetricsEvaluator(
ops.current().schema(), deleteExpression);
// add the current spec as the first group. files are added to the beginning.
try {
if (newFiles.size() > 0) {
ManifestFile newManifest = newFilesAsManifest();
List<ManifestFile> manifestGroup = Lists.newArrayList();
manifestGroup.add(newManifest);
groups.put(newManifest.partitionSpecId(), manifestGroup);
}
Set<CharSequenceWrapper> deletedFiles = Sets.newHashSet();
// group manifests by compatible partition specs to be merged
if (current != null) {
List<ManifestFile> manifests = current.manifests();
ManifestFile[] filtered = new ManifestFile[manifests.size()];
// open all of the manifest files in parallel, use index to avoid reordering
Tasks.range(filtered.length)
.stopOnFailure().throwFailureWhenFinished()
.executeWith(getWorkerPool())
.run(index -> {
ManifestFile manifest = filterManifest(
deleteExpression, metricsEvaluator,
manifests.get(index));
filtered[index] = manifest;
}, IOException.class);
for (ManifestFile manifest : filtered) {
Set<CharSequenceWrapper> manifestDeletes = filteredManifestToDeletedFiles.get(manifest);
if (manifestDeletes != null) {
deletedFiles.addAll(manifestDeletes);
}
List<ManifestFile> group = groups.get(manifest.partitionSpecId());
if (group != null) {
group.add(manifest);
} else {
group = Lists.newArrayList();
group.add(manifest);
groups.put(manifest.partitionSpecId(), group);
}
}
}
List<ManifestFile> manifests = Lists.newArrayList();
for (Map.Entry<Integer, List<ManifestFile>> entry : groups.entrySet()) {
for (ManifestFile manifest : mergeGroup(entry.getKey(), entry.getValue())) {
manifests.add(manifest);
}
}
ValidationException.check(!failMissingDeletePaths || deletedFiles.containsAll(deletePaths),
"Missing required files to delete: %s",
COMMA.join(transform(filter(deletePaths,
path -> !deletedFiles.contains(path)),
CharSequenceWrapper::get)));
return manifests;
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to create snapshot manifest list");
}
}
private void cleanUncommittedMerges(Set<ManifestFile> committed) {
// iterate over a copy of entries to avoid concurrent modification
List<Map.Entry<List<ManifestFile>, ManifestFile>> entries =
Lists.newArrayList(mergeManifests.entrySet());
for (Map.Entry<List<ManifestFile>, ManifestFile> entry : entries) {
// delete any new merged manifests that aren't in the committed list
ManifestFile merged = entry.getValue();
if (!committed.contains(merged)) {
deleteFile(merged.path());
// remove the deleted file from the cache
mergeManifests.remove(entry.getKey());
}
}
}
private void cleanUncommittedFilters(Set<ManifestFile> committed) {
// iterate over a copy of entries to avoid concurrent modification
List<Map.Entry<ManifestFile, ManifestFile>> filterEntries =
Lists.newArrayList(filteredManifests.entrySet());
for (Map.Entry<ManifestFile, ManifestFile> entry : filterEntries) {
// remove any new filtered manifests that aren't in the committed list
ManifestFile manifest = entry.getKey();
ManifestFile filtered = entry.getValue();
if (!committed.contains(filtered)) {
// only delete if the filtered copy was created
if (!manifest.equals(filtered)) {
deleteFile(filtered.path());
}
// remove the entry from the cache
filteredManifests.remove(manifest);
}
}
}
@Override
protected void cleanUncommitted(Set<ManifestFile> committed) {
if (newManifest != null && !committed.contains(newManifest)) {
deleteFile(newManifest.path());
this.newManifest = null;
}
cleanUncommittedMerges(committed);
cleanUncommittedFilters(committed);
}
private boolean nothingToFilter() {
return (deleteExpression == null || deleteExpression == Expressions.alwaysFalse()) &&
deletePaths.isEmpty() && dropPartitions.isEmpty();
}
/**
* @return a ManifestReader that is a filtered version of the input manifest.
*/
private ManifestFile filterManifest(Expression deleteExpression,
StrictMetricsEvaluator metricsEvaluator,
ManifestFile manifest) throws IOException {
ManifestFile cached = filteredManifests.get(manifest);
if (cached != null) {
return cached;
}
if (nothingToFilter()) {
filteredManifests.put(manifest, manifest);
return manifest;
}
try (ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest.path()))) {
Expression inclusiveExpr = Projections
.inclusive(reader.spec())
.project(deleteExpression);
Evaluator inclusive = new Evaluator(reader.spec().partitionType(), inclusiveExpr);
Expression strictExpr = Projections
.strict(reader.spec())
.project(deleteExpression);
Evaluator strict = new Evaluator(reader.spec().partitionType(), strictExpr);
// this is reused to compare file paths with the delete set
CharSequenceWrapper pathWrapper = CharSequenceWrapper.wrap("");
// reused to compare file partitions with the drop set
StructLikeWrapper partitionWrapper = StructLikeWrapper.wrap(null);
// this assumes that the manifest doesn't have files to remove and streams through the
// manifest without copying data. if a manifest does have a file to remove, this will break
// out of the loop and move on to filtering the manifest.
boolean hasDeletedFiles = false;
for (ManifestEntry entry : reader.entries()) {
DataFile file = entry.file();
boolean fileDelete = (deletePaths.contains(pathWrapper.set(file.path())) ||
dropPartitions.contains(partitionWrapper.set(file.partition())));
if (fileDelete || inclusive.eval(file.partition())) {
ValidationException.check(
fileDelete || strict.eval(file.partition()) || metricsEvaluator.eval(file),
"Cannot delete file where some, but not all, rows match filter %s: %s",
this.deleteExpression, file.path());
hasDeletedFiles = true;
if (failAnyDelete) {
throw new DeleteException(writeSpec().partitionToPath(file.partition()));
}
break; // as soon as a deleted file is detected, stop scanning
}
}
if (!hasDeletedFiles) {
filteredManifests.put(manifest, manifest);
return manifest;
}
// when this point is reached, there is at least one file that will be deleted in the
// manifest. produce a copy of the manifest with all deleted files removed.
Set<CharSequenceWrapper> deletedPaths = Sets.newHashSet();
OutputFile filteredCopy = manifestPath(manifestCount.getAndIncrement());
ManifestWriter writer = new ManifestWriter(reader.spec(), filteredCopy, snapshotId());
try {
for (ManifestEntry entry : reader.entries()) {
DataFile file = entry.file();
boolean fileDelete = (deletePaths.contains(pathWrapper.set(file.path())) ||
dropPartitions.contains(partitionWrapper.set(file.partition())));
if (entry.status() != Status.DELETED) {
if (fileDelete || inclusive.eval(file.partition())) {
ValidationException.check(
fileDelete || strict.eval(file.partition()) || metricsEvaluator.eval(file),
"Cannot delete file where some, but not all, rows match filter %s: %s",
this.deleteExpression, file.path());
writer.delete(entry);
CharSequenceWrapper wrapper = CharSequenceWrapper.wrap(entry.file().path());
if (deletedPaths.contains(wrapper)) {
LOG.warn("Deleting a duplicate path from manifest {}: {}",
manifest.path(), wrapper.get());
}
deletedPaths.add(wrapper);
} else {
writer.addExisting(entry);
}
}
}
} finally {
writer.close();
}
// return the filtered manifest as a reader
ManifestFile filtered = writer.toManifestFile();
// update caches
filteredManifests.put(manifest, filtered);
filteredManifestToDeletedFiles.put(filtered, deletedPaths);
return filtered;
}
}
@SuppressWarnings("unchecked")
private Iterable<ManifestFile> mergeGroup(int specId, List<ManifestFile> group)
throws IOException {
// use a lookback of 1 to avoid reordering the manifests. using 1 also means this should pack
// from the end so that the manifest that gets under-filled is the first one, which will be
// merged the next time.
ListPacker<ManifestFile> packer = new ListPacker<>(manifestTargetSizeBytes, 1);
List<List<ManifestFile>> bins = packer.packEnd(group, manifest -> manifest.length());
// process bins in parallel, but put results in the order of the bins into an array to preserve
// the order of manifests and contents. preserving the order helps avoid random deletes when
// data files are eventually aged off.
List<ManifestFile>[] binResults = (List<ManifestFile>[])
Array.newInstance(List.class, bins.size());
Tasks.range(bins.size())
.stopOnFailure().throwFailureWhenFinished()
.executeWith(getWorkerPool())
.run(index -> {
List<ManifestFile> bin = bins.get(index);
List<ManifestFile> outputManifests = Lists.newArrayList();
binResults[index] = outputManifests;
if (bin.size() == 1) {
// no need to rewrite
outputManifests.add(bin.get(0));
return;
}
// if the bin has a new manifest (the new data files) then only merge it if the number of
// manifests is above the minimum count. this is applied only to bins with an in-memory
// manifest so that large manifests don't prevent merging older groups.
if (bin.contains(newManifest) && bin.size() < minManifestsCountToMerge) {
// not enough to merge, add all manifest files to the output list
outputManifests.addAll(bin);
} else {
// merge the group
outputManifests.add(createManifest(specId, bin));
}
}, IOException.class);
return Iterables.concat(binResults);
}
private ManifestFile createManifest(int specId, List<ManifestFile> bin) throws IOException {
// if this merge was already rewritten, use the existing file.
// if the new files are in this merge, then the ManifestFile for the new files has changed and
// will be a cache miss.
if (mergeManifests.containsKey(bin)) {
return mergeManifests.get(bin);
}
OutputFile out = manifestPath(manifestCount.getAndIncrement());
ManifestWriter writer = new ManifestWriter(ops.current().spec(specId), out, snapshotId());
try {
for (ManifestFile manifest : bin) {
try (ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest.path()))) {
for (ManifestEntry entry : reader.entries()) {
if (entry.status() == Status.DELETED) {
// suppress deletes from previous snapshots. only files deleted by this snapshot
// should be added to the new manifest
if (entry.snapshotId() == snapshotId()) {
writer.add(entry);
}
} else if (entry.status() == Status.ADDED && entry.snapshotId() == snapshotId()) {
// adds from this snapshot are still adds, otherwise they should be existing
writer.add(entry);
} else {
// add all files from the old manifest as existing files
writer.addExisting(entry);
}
}
}
}
} finally {
writer.close();
}
ManifestFile manifest = writer.toManifestFile();
// update the cache
mergeManifests.put(bin, manifest);
return manifest;
}
private ManifestFile newFilesAsManifest() throws IOException {
if (hasNewFiles && newManifest != null) {
deleteFile(newManifest.path());
newManifest = null;
}
if (newManifest == null) {
OutputFile out = manifestPath(manifestCount.getAndIncrement());
ManifestWriter writer = new ManifestWriter(spec, out, snapshotId());
try {
writer.addAll(newFiles);
} finally {
writer.close();
}
this.newManifest = writer.toManifestFile();
this.hasNewFiles = false;
}
return newManifest;
}
}
| 1,953 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseTableScan.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.netflix.iceberg.TableMetadata.SnapshotLogEntry;
import com.netflix.iceberg.events.Listeners;
import com.netflix.iceberg.events.ScanEvent;
import com.netflix.iceberg.expressions.Binder;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Expressions;
import com.netflix.iceberg.expressions.InclusiveManifestEvaluator;
import com.netflix.iceberg.expressions.ResidualEvaluator;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.util.BinPacking;
import com.netflix.iceberg.util.ParallelIterable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.text.SimpleDateFormat;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import static com.netflix.iceberg.util.ThreadPools.getPlannerPool;
import static com.netflix.iceberg.util.ThreadPools.getWorkerPool;
/**
* Base class for {@link TableScan} implementations.
*/
class BaseTableScan implements TableScan {
private static final Logger LOG = LoggerFactory.getLogger(TableScan.class);
private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
private static final List<String> SNAPSHOT_COLUMNS = ImmutableList.of(
"snapshot_id", "file_path", "file_ordinal", "file_format", "block_size_in_bytes",
"file_size_in_bytes", "record_count", "partition", "value_counts", "null_value_counts",
"lower_bounds", "upper_bounds"
);
private static final boolean PLAN_SCANS_WITH_WORKER_POOL =
SystemProperties.getBoolean(SystemProperties.SCAN_THREAD_POOL_ENABLED, true);
private final TableOperations ops;
private final Table table;
private final Long snapshotId;
private final Schema schema;
private final Expression rowFilter;
BaseTableScan(TableOperations ops, Table table) {
this(ops, table, null, table.schema(), Expressions.alwaysTrue());
}
private BaseTableScan(TableOperations ops, Table table, Long snapshotId, Schema schema, Expression rowFilter) {
this.ops = ops;
this.table = table;
this.snapshotId = snapshotId;
this.schema = schema;
this.rowFilter = rowFilter;
}
@Override
public Table table() {
return table;
}
@Override
public TableScan useSnapshot(long snapshotId) {
Preconditions.checkArgument(this.snapshotId == null,
"Cannot override snapshot, already set to id=%s", snapshotId);
Preconditions.checkArgument(ops.current().snapshot(snapshotId) != null,
"Cannot find snapshot with ID %s", snapshotId);
return new BaseTableScan(ops, table, snapshotId, schema, rowFilter);
}
@Override
public TableScan asOfTime(long timestampMillis) {
Preconditions.checkArgument(this.snapshotId == null,
"Cannot override snapshot, already set to id=%s", snapshotId);
Long lastSnapshotId = null;
for (SnapshotLogEntry logEntry : ops.current().snapshotLog()) {
if (logEntry.timestampMillis() <= timestampMillis) {
lastSnapshotId = logEntry.snapshotId();
}
}
// the snapshot ID could be null if no entries were older than the requested time. in that case,
// there is no valid snapshot to read.
Preconditions.checkArgument(lastSnapshotId != null,
"Cannot find a snapshot older than %s", DATE_FORMAT.format(new Date(timestampMillis)));
return useSnapshot(lastSnapshotId);
}
public TableScan project(Schema schema) {
return new BaseTableScan(ops, table, snapshotId, schema, rowFilter);
}
@Override
public TableScan select(Collection<String> columns) {
Set<Integer> requiredFieldIds = Sets.newHashSet();
// all of the filter columns are required
requiredFieldIds.addAll(
Binder.boundReferences(table.schema().asStruct(), Collections.singletonList(rowFilter)));
// all of the projection columns are required
requiredFieldIds.addAll(TypeUtil.getProjectedIds(table.schema().select(columns)));
Schema projection = TypeUtil.select(table.schema(), requiredFieldIds);
return new BaseTableScan(ops, table, snapshotId, projection, rowFilter);
}
@Override
public TableScan filter(Expression expr) {
return new BaseTableScan(ops, table, snapshotId, schema, Expressions.and(rowFilter, expr));
}
private final LoadingCache<Integer, InclusiveManifestEvaluator> EVAL_CACHE = CacheBuilder
.newBuilder()
.build(new CacheLoader<Integer, InclusiveManifestEvaluator>() {
@Override
public InclusiveManifestEvaluator load(Integer specId) {
PartitionSpec spec = ops.current().spec(specId);
return new InclusiveManifestEvaluator(spec, rowFilter);
}
});
@Override
public CloseableIterable<FileScanTask> planFiles() {
Snapshot snapshot = snapshotId != null ?
ops.current().snapshot(snapshotId) :
ops.current().currentSnapshot();
if (snapshot != null) {
LOG.info("Scanning table {} snapshot {} created at {} with filter {}", table,
snapshot.snapshotId(), DATE_FORMAT.format(new Date(snapshot.timestampMillis())),
rowFilter);
Listeners.notifyAll(
new ScanEvent(table.toString(), snapshot.snapshotId(), rowFilter, schema));
Iterable<ManifestFile> matchingManifests = Iterables.filter(snapshot.manifests(),
manifest -> EVAL_CACHE.getUnchecked(manifest.partitionSpecId()).eval(manifest));
ConcurrentLinkedQueue<Closeable> toClose = new ConcurrentLinkedQueue<>();
Iterable<Iterable<FileScanTask>> readers = Iterables.transform(
matchingManifests,
manifest -> {
ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest.path()));
toClose.add(reader);
String schemaString = SchemaParser.toJson(reader.spec().schema());
String specString = PartitionSpecParser.toJson(reader.spec());
ResidualEvaluator residuals = new ResidualEvaluator(reader.spec(), rowFilter);
return Iterables.transform(
reader.filterRows(rowFilter).select(SNAPSHOT_COLUMNS),
file -> new BaseFileScanTask(file, schemaString, specString, residuals)
);
});
if (PLAN_SCANS_WITH_WORKER_POOL && snapshot.manifests().size() > 1) {
return CloseableIterable.combine(
new ParallelIterable<>(readers, getPlannerPool(), getWorkerPool()),
toClose);
} else {
return CloseableIterable.combine(Iterables.concat(readers), toClose);
}
} else {
LOG.info("Scanning empty table {}", table);
return CloseableIterable.empty();
}
}
@Override
public CloseableIterable<CombinedScanTask> planTasks() {
long splitSize = ops.current().propertyAsLong(
TableProperties.SPLIT_SIZE, TableProperties.SPLIT_SIZE_DEFAULT);
int lookback = ops.current().propertyAsInt(
TableProperties.SPLIT_LOOKBACK, TableProperties.SPLIT_LOOKBACK_DEFAULT);
return CloseableIterable.transform(
CloseableIterable.wrap(planFiles(), files ->
new BinPacking.PackingIterable<>(files, splitSize, lookback, FileScanTask::length)),
BaseCombinedScanTask::new);
}
@Override
public Schema schema() {
return schema;
}
@Override
public Expression filter() {
return rowFilter;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("table", table)
.add("projection", schema.asStruct())
.add("filter", rowFilter)
.toString();
}
}
| 1,954 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/FileHistory.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.expressions.Literal;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.CharSequenceWrapper;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Iterables.transform;
public class FileHistory {
private static final List<String> HISTORY_COLUMNS = ImmutableList.of("file_path");
private FileHistory() {
}
public static Builder table(Table table) {
return new Builder(table);
}
public static class Builder {
private final Table table;
private final Set<CharSequenceWrapper> locations = Sets.newHashSet();
private Long startTime = null;
private Long endTime = null;
public Builder(Table table) {
this.table = table;
}
public Builder location(String location) {
locations.add(CharSequenceWrapper.wrap(location));
return this;
}
public Builder after(String timestamp) {
Literal<Long> tsLiteral = Literal.of(timestamp).to(Types.TimestampType.withoutZone());
this.startTime = tsLiteral.value() / 1000;
return this;
}
public Builder after(long timestampMillis) {
this.startTime = timestampMillis;
return this;
}
public Builder before(String timestamp) {
Literal<Long> tsLiteral = Literal.of(timestamp).to(Types.TimestampType.withoutZone());
this.endTime = tsLiteral.value() / 1000;
return this;
}
public Builder before(long timestampMillis) {
this.endTime = timestampMillis;
return this;
}
@SuppressWarnings("unchecked")
public Iterable<ManifestEntry> build() {
Iterable<Snapshot> snapshots = table.snapshots();
if (startTime != null) {
snapshots = Iterables.filter(snapshots, snap -> snap.timestampMillis() >= startTime);
}
if (endTime != null) {
snapshots = Iterables.filter(snapshots, snap -> snap.timestampMillis() <= endTime);
}
// only use manifests that were added in the matching snapshots
Set<Long> matchingIds = Sets.newHashSet(transform(snapshots, snap -> snap.snapshotId()));
Iterable<ManifestFile> manifests = Iterables.filter(
concat(transform(snapshots, Snapshot::manifests)),
manifest -> manifest.snapshotId() == null || matchingIds.contains(manifest.snapshotId()));
// a manifest group will only read each manifest once
ManifestGroup group = new ManifestGroup(((HasTableOperations) table).operations(), manifests);
List<ManifestEntry> results = Lists.newArrayList();
try (CloseableIterable<ManifestEntry> entries = group.select(HISTORY_COLUMNS).entries()) {
// TODO: replace this with an IN predicate
CharSequenceWrapper locationWrapper = CharSequenceWrapper.wrap(null);
for (ManifestEntry entry : entries) {
if (entry != null && locations.contains(locationWrapper.set(entry.file().path()))) {
results.add(entry.copy());
}
}
} catch (IOException e) {
throw new RuntimeIOException(e);
}
return results;
}
}
}
| 1,955 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/FileIO.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import java.io.Serializable;
/**
* Pluggable module for reading, writing, and deleting files.
* <p>
* Both table metadata files and data files can be written and read by this module. Implementations
* must be serializable because various clients of Spark tables may initialize this once and pass
* it off to a separate module that would then interact with the streams.
*/
public interface FileIO extends Serializable {
/**
* Get a {@link InputFile} instance to read bytes from the file at the given path.
*/
InputFile newInputFile(String path);
/**
* Get a {@link OutputFile} instance to write bytes to the file at the given path.
*/
OutputFile newOutputFile(String path);
/**
* Delete the file at the given path.
*/
void deleteFile(String path);
}
| 1,956 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ManifestWriter.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.netflix.iceberg.avro.Avro;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.FileAppender;
import com.netflix.iceberg.io.OutputFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import static com.netflix.iceberg.ManifestEntry.Status.DELETED;
/**
* Writer for manifest files.
*/
class ManifestWriter implements FileAppender<DataFile> {
private static final Logger LOG = LoggerFactory.getLogger(ManifestWriter.class);
private final String location;
private final OutputFile file;
private final int specId;
private final FileAppender<ManifestEntry> writer;
private final long snapshotId;
private final ManifestEntry reused;
private final PartitionSummary stats;
private boolean closed = false;
private int addedFiles = 0;
private int existingFiles = 0;
private int deletedFiles = 0;
ManifestWriter(PartitionSpec spec, OutputFile file, long snapshotId) {
this.location = file.location();
this.file = file;
this.specId = spec.specId();
this.writer = newAppender(FileFormat.AVRO, spec, file);
this.snapshotId = snapshotId;
this.reused = new ManifestEntry(spec.partitionType());
this.stats = new PartitionSummary(spec);
}
public void addExisting(Iterable<ManifestEntry> entries) {
for (ManifestEntry entry : entries) {
if (entry.status() != DELETED) {
addExisting(entry);
}
}
}
public void addExisting(ManifestEntry entry) {
add(reused.wrapExisting(entry.snapshotId(), entry.file()));
}
public void addExisting(long snapshotId, DataFile file) {
add(reused.wrapExisting(snapshotId, file));
}
public void delete(ManifestEntry entry) {
// Use the current Snapshot ID for the delete. It is safe to delete the data file from disk
// when this Snapshot has been removed or when there are no Snapshots older than this one.
add(reused.wrapDelete(snapshotId, entry.file()));
}
public void delete(DataFile file) {
add(reused.wrapDelete(snapshotId, file));
}
public void add(ManifestEntry entry) {
switch (entry.status()) {
case ADDED:
addedFiles += 1;
break;
case EXISTING:
existingFiles += 1;
break;
case DELETED:
deletedFiles += 1;
break;
}
stats.update(entry.file().partition());
writer.add(entry);
}
public void addEntries(Iterable<ManifestEntry> entries) {
for (ManifestEntry entry : entries) {
add(entry);
}
}
@Override
public void add(DataFile file) {
// TODO: this assumes that file is a GenericDataFile that can be written directly to Avro
// Eventually, this should check in case there are other DataFile implementations.
add(reused.wrapAppend(snapshotId, file));
}
@Override
public Metrics metrics() {
return writer.metrics();
}
public ManifestFile toManifestFile() {
Preconditions.checkState(closed, "Cannot build ManifestFile, writer is not closed");
return new GenericManifestFile(location, file.toInputFile().getLength(), specId, snapshotId,
addedFiles, existingFiles, deletedFiles, stats.summaries());
}
@Override
public void close() throws IOException {
this.closed = true;
writer.close();
}
private static <D> FileAppender<D> newAppender(FileFormat format, PartitionSpec spec,
OutputFile file) {
Schema manifestSchema = ManifestEntry.getSchema(spec.partitionType());
try {
switch (format) {
case AVRO:
return Avro.write(file)
.schema(manifestSchema)
.named("manifest_entry")
.meta("schema", SchemaParser.toJson(spec.schema()))
.meta("partition-spec", PartitionSpecParser.toJsonFields(spec))
.meta("partition-spec-id", String.valueOf(spec.specId()))
.build();
default:
throw new IllegalArgumentException("Unsupported format: " + format);
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to create manifest writer for path: " + file);
}
}
}
| 1,957 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/FastAppend.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Lists;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.OutputFile;
import java.io.IOException;
import java.util.List;
import java.util.Set;
/**
* {@link AppendFiles Append} implementation that adds a new manifest file for the write.
* <p>
* This implementation will attempt to commit 5 times before throwing {@link CommitFailedException}.
*/
class FastAppend extends SnapshotUpdate implements AppendFiles {
private final PartitionSpec spec;
private final List<DataFile> newFiles = Lists.newArrayList();
private ManifestFile newManifest = null;
private boolean hasNewFiles = false;
FastAppend(TableOperations ops) {
super(ops);
this.spec = ops.current().spec();
}
@Override
public FastAppend appendFile(DataFile file) {
this.hasNewFiles = true;
newFiles.add(file);
return this;
}
@Override
public List<ManifestFile> apply(TableMetadata base) {
List<ManifestFile> newManifests = Lists.newArrayList();
try {
newManifests.add(writeManifest());
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write manifest");
}
if (base.currentSnapshot() != null) {
newManifests.addAll(base.currentSnapshot().manifests());
}
return newManifests;
}
@Override
protected void cleanUncommitted(Set<ManifestFile> committed) {
if (!committed.contains(newManifest)) {
deleteFile(newManifest.path());
}
}
private ManifestFile writeManifest() throws IOException {
if (hasNewFiles && newManifest != null) {
deleteFile(newManifest.path());
newManifest = null;
}
if (newManifest == null) {
OutputFile out = manifestPath(0);
ManifestWriter writer = new ManifestWriter(spec, out, snapshotId());
try {
writer.addAll(newFiles);
} finally {
writer.close();
}
this.newManifest = writer.toManifestFile();
hasNewFiles = false;
}
return newManifest;
}
}
| 1,958 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/GenericPartitionFieldSummary.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.netflix.iceberg.ManifestFile.PartitionFieldSummary;
import com.netflix.iceberg.avro.AvroSchemaUtil;
import com.netflix.iceberg.types.Types;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData.SchemaConstructable;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.List;
public class GenericPartitionFieldSummary
implements PartitionFieldSummary, StructLike, IndexedRecord, SchemaConstructable, Serializable {
private static final Schema AVRO_SCHEMA = AvroSchemaUtil.convert(PartitionFieldSummary.getType());
private transient Schema avroSchema; // not final for Java serialization
private int[] fromProjectionPos;
// data fields
private boolean containsNull = false;
private ByteBuffer lowerBound = null;
private ByteBuffer upperBound = null;
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
public GenericPartitionFieldSummary(Schema avroSchema) {
this.avroSchema = avroSchema;
List<Types.NestedField> fields = AvroSchemaUtil.convert(avroSchema)
.asNestedType()
.asStructType()
.fields();
List<Types.NestedField> allFields = PartitionFieldSummary.getType().fields();
this.fromProjectionPos = new int[fields.size()];
for (int i = 0; i < fromProjectionPos.length; i += 1) {
boolean found = false;
for (int j = 0; j < allFields.size(); j += 1) {
if (fields.get(i).fieldId() == allFields.get(j).fieldId()) {
found = true;
fromProjectionPos[i] = j;
}
}
if (!found) {
throw new IllegalArgumentException("Cannot find projected field: " + fields.get(i));
}
}
}
public GenericPartitionFieldSummary(boolean containsNull, ByteBuffer lowerBound,
ByteBuffer upperBound) {
this.avroSchema = AVRO_SCHEMA;
this.containsNull = containsNull;
this.lowerBound = lowerBound;
this.upperBound = upperBound;
this.fromProjectionPos = null;
}
/**
* Copy constructor.
*
* @param toCopy a generic manifest file to copy.
*/
private GenericPartitionFieldSummary(GenericPartitionFieldSummary toCopy) {
this.avroSchema = toCopy.avroSchema;
this.containsNull = toCopy.containsNull;
this.lowerBound = toCopy.lowerBound;
this.upperBound = toCopy.upperBound;
this.fromProjectionPos = toCopy.fromProjectionPos;
}
/**
* Constructor for Java serialization.
*/
GenericPartitionFieldSummary() {
}
@Override
public boolean containsNull() {
return containsNull;
}
@Override
public ByteBuffer lowerBound() {
return lowerBound;
}
@Override
public ByteBuffer upperBound() {
return upperBound;
}
@Override
public int size() {
return PartitionFieldSummary.getType().fields().size();
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(get(pos));
}
@Override
public void put(int i, Object v) {
set(i, v);
}
@Override
public Object get(int i) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
return containsNull;
case 1:
return lowerBound;
case 2:
return upperBound;
default:
throw new UnsupportedOperationException("Unknown field ordinal: " + pos);
}
}
@Override
@SuppressWarnings("unchecked")
public <T> void set(int i, T value) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
this.containsNull = (Boolean) value;
return;
case 1:
this.lowerBound = (ByteBuffer) value;
return;
case 2:
this.upperBound = (ByteBuffer) value;
return;
default:
// ignore the object, it must be from a newer version of the format
}
}
@Override
public PartitionFieldSummary copy() {
return new GenericPartitionFieldSummary(this);
}
@Override
public Schema getSchema() {
return avroSchema;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("contains_null", containsNull)
.add("lower_bound", lowerBound)
.add("upper_bound", upperBound)
.toString();
}
}
| 1,959 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/SystemProperties.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
/**
* Configuration properties that are controlled by Java system properties.
*/
public class SystemProperties {
/**
* Sets the size of the planner pool. The planner pool limits the number of concurrent planning
* operations in the base table implementation.
*/
public static final String PLANNER_THREAD_POOL_SIZE_PROP = "iceberg.planner.num-threads";
/**
* Sets the size of the worker pool. The worker pool limits the number of tasks concurrently
* processing manifests in the base table implementation across all concurrent planning or commit
* operations.
*/
public static final String WORKER_THREAD_POOL_SIZE_PROP = "iceberg.worker.num-threads";
/**
* Whether to use the shared worker pool when planning table scans.
*/
public static final String SCAN_THREAD_POOL_ENABLED = "iceberg.scan.plan-in-worker-pool";
static boolean getBoolean(String systemProperty, boolean defaultValue) {
String value = System.getProperty(systemProperty);
if (value != null) {
return Boolean.parseBoolean(value);
}
return defaultValue;
}
}
| 1,960 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/TableMetadataParser.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.TableMetadata.SnapshotLogEntry;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.util.JsonUtil;
import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
import org.apache.commons.compress.compressors.gzip.GzipCompressorOutputStream;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.SortedSet;
public class TableMetadataParser {
// visible for testing
static final String FORMAT_VERSION = "format-version";
static final String LOCATION = "location";
static final String LAST_UPDATED_MILLIS = "last-updated-ms";
static final String LAST_COLUMN_ID = "last-column-id";
static final String SCHEMA = "schema";
static final String PARTITION_SPEC = "partition-spec";
static final String PARTITION_SPECS = "partition-specs";
static final String DEFAULT_SPEC_ID = "default-spec-id";
static final String PROPERTIES = "properties";
static final String CURRENT_SNAPSHOT_ID = "current-snapshot-id";
static final String SNAPSHOTS = "snapshots";
static final String SNAPSHOT_ID = "snapshot-id";
static final String TIMESTAMP_MS = "timestamp-ms";
static final String SNAPSHOT_LOG = "snapshot-log";
public static String toJson(TableMetadata metadata) {
StringWriter writer = new StringWriter();
try {
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
toJson(metadata, generator);
generator.flush();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write json for: %s", metadata);
}
return writer.toString();
}
public static void write(TableMetadata metadata, OutputFile outputFile) {
try (OutputStreamWriter writer = new OutputStreamWriter(
outputFile.location().endsWith(".gz") ?
new GzipCompressorOutputStream(outputFile.create()) :
outputFile.create())) {
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
generator.useDefaultPrettyPrinter();
toJson(metadata, generator);
generator.flush();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write json to file: %s", outputFile);
}
}
public static String getFileExtension(Configuration configuration) {
return ConfigProperties.shouldCompress(configuration) ? ".metadata.json.gz" : ".metadata.json";
}
private static void toJson(TableMetadata metadata, JsonGenerator generator) throws IOException {
generator.writeStartObject();
generator.writeNumberField(FORMAT_VERSION, TableMetadata.TABLE_FORMAT_VERSION);
generator.writeStringField(LOCATION, metadata.location());
generator.writeNumberField(LAST_UPDATED_MILLIS, metadata.lastUpdatedMillis());
generator.writeNumberField(LAST_COLUMN_ID, metadata.lastColumnId());
generator.writeFieldName(SCHEMA);
SchemaParser.toJson(metadata.schema(), generator);
// for older readers, continue writing the default spec as "partition-spec"
generator.writeFieldName(PARTITION_SPEC);
PartitionSpecParser.toJsonFields(metadata.spec(), generator);
// write the default spec ID and spec list
generator.writeNumberField(DEFAULT_SPEC_ID, metadata.defaultSpecId());
generator.writeArrayFieldStart(PARTITION_SPECS);
for (PartitionSpec spec : metadata.specs()) {
PartitionSpecParser.toJson(spec, generator);
}
generator.writeEndArray();
generator.writeObjectFieldStart(PROPERTIES);
for (Map.Entry<String, String> keyValue : metadata.properties().entrySet()) {
generator.writeStringField(keyValue.getKey(), keyValue.getValue());
}
generator.writeEndObject();
generator.writeNumberField(CURRENT_SNAPSHOT_ID,
metadata.currentSnapshot() != null ? metadata.currentSnapshot().snapshotId() : -1);
generator.writeArrayFieldStart(SNAPSHOTS);
for (Snapshot snapshot : metadata.snapshots()) {
SnapshotParser.toJson(snapshot, generator);
}
generator.writeEndArray();
generator.writeArrayFieldStart(SNAPSHOT_LOG);
for (SnapshotLogEntry logEntry : metadata.snapshotLog()) {
generator.writeStartObject();
generator.writeNumberField(TIMESTAMP_MS, logEntry.timestampMillis());
generator.writeNumberField(SNAPSHOT_ID, logEntry.snapshotId());
generator.writeEndObject();
}
generator.writeEndArray();
generator.writeEndObject();
}
public static TableMetadata read(TableOperations ops, InputFile file) {
try {
InputStream is = file.location().endsWith("gz") ? new GzipCompressorInputStream(file.newStream()): file.newStream();
return fromJson(ops, file, JsonUtil.mapper().readValue(is, JsonNode.class));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read file: %s", file);
}
}
static TableMetadata fromJson(TableOperations ops, InputFile file, JsonNode node) {
Preconditions.checkArgument(node.isObject(),
"Cannot parse metadata from a non-object: %s", node);
int formatVersion = JsonUtil.getInt(FORMAT_VERSION, node);
Preconditions.checkArgument(formatVersion == TableMetadata.TABLE_FORMAT_VERSION,
"Cannot read unsupported version %d", formatVersion);
String location = JsonUtil.getString(LOCATION, node);
int lastAssignedColumnId = JsonUtil.getInt(LAST_COLUMN_ID, node);
Schema schema = SchemaParser.fromJson(node.get(SCHEMA));
JsonNode specArray = node.get(PARTITION_SPECS);
List<PartitionSpec> specs;
int defaultSpecId;
if (specArray != null) {
Preconditions.checkArgument(specArray.isArray(),
"Cannot parse partition specs from non-array: %s", specArray);
// default spec ID is required when the spec array is present
defaultSpecId = JsonUtil.getInt(DEFAULT_SPEC_ID, node);
// parse the spec array
ImmutableList.Builder<PartitionSpec> builder = ImmutableList.builder();
for (JsonNode spec : specArray) {
builder.add(PartitionSpecParser.fromJson(schema, spec));
}
specs = builder.build();
} else {
// partition spec is required for older readers, but is always set to the default if the spec
// array is set. it is only used to default the spec map is missing, indicating that the
// table metadata was written by an older writer.
defaultSpecId = TableMetadata.INITIAL_SPEC_ID;
specs = ImmutableList.of(PartitionSpecParser.fromJsonFields(
schema, TableMetadata.INITIAL_SPEC_ID, node.get(PARTITION_SPEC)));
}
Map<String, String> properties = JsonUtil.getStringMap(PROPERTIES, node);
long currentVersionId = JsonUtil.getLong(CURRENT_SNAPSHOT_ID, node);
long lastUpdatedMillis = JsonUtil.getLong(LAST_UPDATED_MILLIS, node);
JsonNode snapshotArray = node.get(SNAPSHOTS);
Preconditions.checkArgument(snapshotArray.isArray(),
"Cannot parse snapshots from non-array: %s", snapshotArray);
List<Snapshot> snapshots = Lists.newArrayListWithExpectedSize(snapshotArray.size());
Iterator<JsonNode> iterator = snapshotArray.elements();
while (iterator.hasNext()) {
snapshots.add(SnapshotParser.fromJson(ops, iterator.next()));
}
SortedSet<SnapshotLogEntry> entries =
Sets.newTreeSet(Comparator.comparingLong(SnapshotLogEntry::timestampMillis));
if (node.has(SNAPSHOT_LOG)) {
Iterator<JsonNode> logIterator = node.get(SNAPSHOT_LOG).elements();
while (logIterator.hasNext()) {
JsonNode entryNode = logIterator.next();
entries.add(new SnapshotLogEntry(
JsonUtil.getLong(TIMESTAMP_MS, entryNode), JsonUtil.getLong(SNAPSHOT_ID, entryNode)));
}
}
return new TableMetadata(ops, file, location,
lastUpdatedMillis, lastAssignedColumnId, schema, defaultSpecId, specs, properties,
currentVersionId, snapshots, ImmutableList.copyOf(entries.iterator()));
}
}
| 1,961 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/PropertiesUpdate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.util.Tasks;
import java.util.Map;
import java.util.Set;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT;
class PropertiesUpdate implements UpdateProperties {
private final TableOperations ops;
private final Map<String, String> updates = Maps.newHashMap();
private final Set<String> removals = Sets.newHashSet();
private TableMetadata base;
PropertiesUpdate(TableOperations ops) {
this.ops = ops;
this.base = ops.current();
}
@Override
public UpdateProperties set(String key, String value) {
Preconditions.checkNotNull(key, "Key cannot be null");
Preconditions.checkNotNull(key, "Value cannot be null");
Preconditions.checkArgument(!removals.contains(key),
"Cannot remove and update the same key: %s", key);
updates.put(key, value);
return this;
}
@Override
public UpdateProperties remove(String key) {
Preconditions.checkNotNull(key, "Key cannot be null");
Preconditions.checkArgument(!updates.keySet().contains(key),
"Cannot remove and update the same key: %s", key);
removals.add(key);
return this;
}
@Override
public UpdateProperties defaultFormat(FileFormat format) {
set(TableProperties.DEFAULT_FILE_FORMAT, format.name());
return this;
}
@Override
public Map<String, String> apply() {
this.base = ops.refresh();
Map<String, String> newProperties = Maps.newHashMap();
for (Map.Entry<String, String> entry : base.properties().entrySet()) {
if (!removals.contains(entry.getKey())) {
newProperties.put(entry.getKey(), entry.getValue());
}
}
newProperties.putAll(updates);
return newProperties;
}
@Override
public void commit() {
Tasks.foreach(ops)
.retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */ )
.onlyRetryOn(CommitFailedException.class)
.run(ops -> {
Map<String, String> newProperties = apply();
TableMetadata updated = base.replaceProperties(newProperties);
ops.commit(base, updated);
});
}
}
| 1,962 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseTable.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import java.util.Map;
/**
* Base {@link Table} implementation.
* <p>
* This can be extended by providing a {@link TableOperations} to the constructor.
*/
public class BaseTable implements Table, HasTableOperations {
private final TableOperations ops;
private final String name;
public BaseTable(TableOperations ops, String name) {
this.ops = ops;
this.name = name;
}
@Override
public TableOperations operations() {
return ops;
}
@Override
public void refresh() {
ops.refresh();
}
@Override
public TableScan newScan() {
return new BaseTableScan(ops, this);
}
@Override
public Schema schema() {
return ops.current().schema();
}
@Override
public PartitionSpec spec() {
return ops.current().spec();
}
@Override
public Map<String, String> properties() {
return ops.current().properties();
}
@Override
public String location() {
return ops.current().location();
}
@Override
public Snapshot currentSnapshot() {
return ops.current().currentSnapshot();
}
@Override
public Iterable<Snapshot> snapshots() {
return ops.current().snapshots();
}
@Override
public UpdateSchema updateSchema() {
return new SchemaUpdate(ops);
}
@Override
public UpdateProperties updateProperties() {
return new PropertiesUpdate(ops);
}
@Override
public AppendFiles newAppend() {
return new MergeAppend(ops);
}
@Override
public AppendFiles newFastAppend() {
return new FastAppend(ops);
}
@Override
public RewriteFiles newRewrite() {
return new ReplaceFiles(ops);
}
@Override
public OverwriteFiles newOverwrite() {
return new OverwriteData(ops);
}
@Override
public ReplacePartitions newReplacePartitions() {
return new ReplacePartitionsOperation(ops);
}
@Override
public DeleteFiles newDelete() {
return new StreamingDelete(ops);
}
@Override
public ExpireSnapshots expireSnapshots() {
return new RemoveSnapshots(ops);
}
@Override
public Rollback rollback() {
return new RollbackToSnapshot(ops);
}
@Override
public Transaction newTransaction() {
return BaseTransaction.newTransaction(ops);
}
@Override
public String toString() {
return name;
}
}
| 1,963 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseFileScanTask.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.ResidualEvaluator;
class BaseFileScanTask implements FileScanTask {
private final DataFile file;
private final String schemaString;
private final String specString;
private final ResidualEvaluator residuals;
private transient PartitionSpec spec = null;
BaseFileScanTask(DataFile file, String schemaString, String specString, ResidualEvaluator residuals) {
this.file = file;
this.schemaString = schemaString;
this.specString = specString;
this.residuals = residuals;
}
@Override
public DataFile file() {
return file;
}
@Override
public PartitionSpec spec() {
if (spec == null) {
this.spec = PartitionSpecParser.fromJson(SchemaParser.fromJson(schemaString), specString);
}
return spec;
}
@Override
public long start() {
return 0;
}
@Override
public long length() {
return file.fileSizeInBytes();
}
@Override
public Expression residual() {
return residuals.residualFor(file.partition());
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("file", file.path())
.add("partition_data", file.partition())
.add("residual", residual())
.toString();
}
}
| 1,964 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/PartitionSummary.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Lists;
import com.netflix.iceberg.ManifestFile.PartitionFieldSummary;
import com.netflix.iceberg.types.Comparators;
import com.netflix.iceberg.types.Conversions;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
class PartitionSummary {
private final PartitionFieldStats<?>[] fields;
private final Class<?>[] javaClasses;
PartitionSummary(PartitionSpec spec) {
this.javaClasses = spec.javaClasses();
this.fields = new PartitionFieldStats[javaClasses.length];
List<Types.NestedField> partitionFields = spec.partitionType().fields();
for (int i = 0; i < fields.length; i += 1) {
this.fields[i] = new PartitionFieldStats<>(partitionFields.get(i).type());
}
}
List<PartitionFieldSummary> summaries() {
return Lists.transform(Arrays.asList(fields), PartitionFieldStats::toSummary);
}
public void update(StructLike partitionKey) {
updateFields(partitionKey);
}
@SuppressWarnings("unchecked")
private <T> void updateFields(StructLike key) {
for (int i = 0; i < javaClasses.length; i += 1) {
PartitionFieldStats<T> stats = (PartitionFieldStats<T>) fields[i];
Class<T> javaClass = (Class<T>) javaClasses[i];
stats.update(key.get(i, javaClass));
}
}
private static class PartitionFieldStats<T> {
private final Type type;
private final Comparator<T> comparator;
private boolean containsNull = false;
private T min = null;
private T max = null;
private PartitionFieldStats(Type type) {
this.type = type;
this.comparator = Comparators.forType(type.asPrimitiveType());
}
public PartitionFieldSummary toSummary() {
return new GenericPartitionFieldSummary(containsNull,
min != null ? Conversions.toByteBuffer(type, min) : null,
max != null ? Conversions.toByteBuffer(type, max) : null);
}
void update(T value) {
if (value == null) {
this.containsNull = true;
} else if (min == null) {
this.min = value;
this.max = value;
} else {
if (comparator.compare(value, min) < 0) {
this.min = value;
}
if (comparator.compare(max, value) < 0) {
this.max = value;
}
}
}
}
}
| 1,965 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/GenericManifestFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.netflix.iceberg.avro.AvroSchemaUtil;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.types.Types;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData.SchemaConstructable;
import java.io.Serializable;
import java.util.List;
import static com.google.common.collect.ImmutableList.copyOf;
import static com.google.common.collect.Iterables.transform;
public class GenericManifestFile
implements ManifestFile, StructLike, IndexedRecord, SchemaConstructable, Serializable {
private static final Schema AVRO_SCHEMA = AvroSchemaUtil.convert(
ManifestFile.schema(), "manifest_file");
private transient Schema avroSchema; // not final for Java serialization
private int[] fromProjectionPos;
// data fields
private InputFile file = null;
private String manifestPath = null;
private Long length = null;
private int specId = -1;
private Long snapshotId = null;
private Integer addedFilesCount = null;
private Integer existingFilesCount = null;
private Integer deletedFilesCount = null;
private List<PartitionFieldSummary> partitions = null;
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
public GenericManifestFile(org.apache.avro.Schema avroSchema) {
this.avroSchema = avroSchema;
List<Types.NestedField> fields = AvroSchemaUtil.convert(avroSchema)
.asNestedType()
.asStructType()
.fields();
List<Types.NestedField> allFields = ManifestFile.schema().asStruct().fields();
this.fromProjectionPos = new int[fields.size()];
for (int i = 0; i < fromProjectionPos.length; i += 1) {
boolean found = false;
for (int j = 0; j < allFields.size(); j += 1) {
if (fields.get(i).fieldId() == allFields.get(j).fieldId()) {
found = true;
fromProjectionPos[i] = j;
}
}
if (!found) {
throw new IllegalArgumentException("Cannot find projected field: " + fields.get(i));
}
}
}
GenericManifestFile(InputFile file, int specId) {
this.avroSchema = AVRO_SCHEMA;
this.file = file;
this.manifestPath = file.location();
this.length = null; // lazily loaded from file
this.specId = specId;
this.snapshotId = null;
this.addedFilesCount = null;
this.existingFilesCount = null;
this.deletedFilesCount = null;
this.partitions = null;
this.fromProjectionPos = null;
}
public GenericManifestFile(String path, long length, int specId, long snapshotId,
int addedFilesCount, int existingFilesCount, int deletedFilesCount,
List<PartitionFieldSummary> partitions) {
this.avroSchema = AVRO_SCHEMA;
this.manifestPath = path;
this.length = length;
this.specId = specId;
this.snapshotId = snapshotId;
this.addedFilesCount = addedFilesCount;
this.existingFilesCount = existingFilesCount;
this.deletedFilesCount = deletedFilesCount;
this.partitions = partitions;
this.fromProjectionPos = null;
}
/**
* Copy constructor.
*
* @param toCopy a generic manifest file to copy.
*/
private GenericManifestFile(GenericManifestFile toCopy) {
this.avroSchema = toCopy.avroSchema;
this.manifestPath = toCopy.manifestPath;
this.length = toCopy.length;
this.specId = toCopy.specId;
this.snapshotId = toCopy.snapshotId;
this.addedFilesCount = toCopy.addedFilesCount;
this.existingFilesCount = toCopy.existingFilesCount;
this.deletedFilesCount = toCopy.deletedFilesCount;
this.partitions = copyOf(transform(toCopy.partitions, PartitionFieldSummary::copy));
this.fromProjectionPos = toCopy.fromProjectionPos;
}
/**
* Constructor for Java serialization.
*/
GenericManifestFile() {
}
@Override
public String path() {
return manifestPath;
}
public Long lazyLength() {
if (length == null) {
if (file != null) {
// this was created from an input file and length is lazily loaded
this.length = file.getLength();
} else {
// this was loaded from a file without projecting length, throw an exception
return null;
}
}
return length;
}
@Override
public long length() {
return lazyLength();
}
@Override
public int partitionSpecId() {
return specId;
}
@Override
public Long snapshotId() {
return snapshotId;
}
@Override
public Integer addedFilesCount() {
return addedFilesCount;
}
@Override
public Integer existingFilesCount() {
return existingFilesCount;
}
@Override
public Integer deletedFilesCount() {
return deletedFilesCount;
}
@Override
public List<PartitionFieldSummary> partitions() {
return partitions;
}
@Override
public int size() {
return ManifestFile.schema().columns().size();
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(get(pos));
}
@Override
public void put(int i, Object v) {
set(i, v);
}
@Override
public Object get(int i) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
return manifestPath;
case 1:
return lazyLength();
case 2:
return specId;
case 3:
return snapshotId;
case 4:
return addedFilesCount;
case 5:
return existingFilesCount;
case 6:
return deletedFilesCount;
case 7:
return partitions;
default:
throw new UnsupportedOperationException("Unknown field ordinal: " + pos);
}
}
@Override
@SuppressWarnings("unchecked")
public <T> void set(int i, T value) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
// always coerce to String for Serializable
this.manifestPath = value.toString();
return;
case 1:
this.length = (Long) value;
return;
case 2:
this.specId = (Integer) value;
return;
case 3:
this.snapshotId = (Long) value;
return;
case 4:
this.addedFilesCount = (Integer) value;
return;
case 5:
this.existingFilesCount = (Integer) value;
return;
case 6:
this.deletedFilesCount = (Integer) value;
return;
case 7:
this.partitions = (List<PartitionFieldSummary>) value;
return;
default:
// ignore the object, it must be from a newer version of the format
}
}
@Override
public ManifestFile copy() {
return new GenericManifestFile(this);
}
@Override
public Schema getSchema() {
return avroSchema;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
GenericManifestFile that = (GenericManifestFile) other;
return Objects.equal(manifestPath, that.manifestPath);
}
@Override
public int hashCode() {
return Objects.hashCode(manifestPath);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("path", manifestPath)
.add("length", length)
.add("partition_spec_id", specId)
.add("added_snapshot_id", snapshotId)
.add("added_data_files_count", addedFilesCount)
.add("existing_data_files_count", existingFilesCount)
.add("deleted_data_files_count", deletedFilesCount)
.add("partitions", partitions)
.toString();
}
}
| 1,966 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ManifestReader.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.netflix.iceberg.avro.Avro;
import com.netflix.iceberg.avro.AvroIterable;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Projections;
import com.netflix.iceberg.io.CloseableGroup;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.types.Types;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import static com.netflix.iceberg.ManifestEntry.Status.DELETED;
import static com.netflix.iceberg.expressions.Expressions.alwaysTrue;
/**
* Reader for manifest files.
* <p>
* Readers are created using the builder from {@link #read(InputFile)}.
*/
public class ManifestReader extends CloseableGroup implements Filterable<FilteredManifest> {
private static final Logger LOG = LoggerFactory.getLogger(ManifestReader.class);
private static final List<String> ALL_COLUMNS = Lists.newArrayList("*");
private static final List<String> CHANGE_COLUNNS = Lists.newArrayList(
"file_path", "file_format", "partition", "record_count", "file_size_in_bytes");
/**
* Returns a new {@link ManifestReader} for an {@link InputFile}.
*
* @param file an InputFile
* @return a manifest reader
*/
public static ManifestReader read(InputFile file) {
return new ManifestReader(file);
}
/**
* Returns a new {@link ManifestReader} for an in-memory list of {@link ManifestEntry}.
*
* @param spec a partition spec for the entries
* @param entries an in-memory list of entries for this manifest
* @return a manifest reader
*/
public static ManifestReader inMemory(PartitionSpec spec, Iterable<ManifestEntry> entries) {
return new ManifestReader(spec, entries);
}
private final InputFile file;
private final Iterable<ManifestEntry> entries;
private final Map<String, String> metadata;
private final PartitionSpec spec;
private final Schema schema;
// lazily initialized
private List<ManifestEntry> adds = null;
private List<ManifestEntry> deletes = null;
private ManifestReader(InputFile file) {
this.file = file;
try {
try (AvroIterable<ManifestEntry> headerReader = Avro.read(file)
.project(ManifestEntry.getSchema(Types.StructType.of()).select("status"))
.build()) {
this.metadata = headerReader.getMetadata();
}
} catch (IOException e) {
throw new RuntimeIOException(e);
}
this.schema = SchemaParser.fromJson(metadata.get("schema"));
int specId = TableMetadata.INITIAL_SPEC_ID;
String specProperty = metadata.get("partition-spec-id");
if (specProperty != null) {
specId = Integer.parseInt(specProperty);
}
this.spec = PartitionSpecParser.fromJsonFields(schema, specId, metadata.get("partition-spec"));
this.entries = null;
}
private ManifestReader(PartitionSpec spec, Iterable<ManifestEntry> entries) {
this.file = null;
this.metadata = ImmutableMap.of();
this.spec = spec;
this.schema = spec.schema();
this.entries = entries;
}
public InputFile file() {
return file;
}
public Schema schema() {
return schema;
}
public PartitionSpec spec() {
return spec;
}
@Override
public Iterator<DataFile> iterator() {
return iterator(alwaysTrue(), ALL_COLUMNS);
}
@Override
public FilteredManifest select(Collection<String> columns) {
return new FilteredManifest(this, alwaysTrue(), alwaysTrue(), Lists.newArrayList(columns));
}
@Override
public FilteredManifest filterPartitions(Expression expr) {
return new FilteredManifest(this, expr, alwaysTrue(), ALL_COLUMNS);
}
@Override
public FilteredManifest filterRows(Expression expr) {
return new FilteredManifest(this, Projections.inclusive(spec).project(expr), expr, ALL_COLUMNS);
}
public List<ManifestEntry> addedFiles() {
if (adds == null) {
cacheChanges();
}
return adds;
}
public List<ManifestEntry> deletedFiles() {
if (deletes == null) {
cacheChanges();
}
return deletes;
}
private void cacheChanges() {
List<ManifestEntry> adds = Lists.newArrayList();
List<ManifestEntry> deletes = Lists.newArrayList();
for (ManifestEntry entry : entries(CHANGE_COLUNNS)) {
switch (entry.status()) {
case ADDED:
adds.add(entry.copy());
break;
case DELETED:
deletes.add(entry.copy());
break;
default:
}
}
this.adds = adds;
this.deletes = deletes;
}
CloseableIterable<ManifestEntry> entries() {
return entries(ALL_COLUMNS);
}
CloseableIterable<ManifestEntry> entries(Collection<String> columns) {
if (entries != null) {
// if this reader is an in-memory list or if the entries have been cached, return the list.
return CloseableIterable.withNoopClose(entries);
}
FileFormat format = FileFormat.fromFileName(file.location());
Preconditions.checkArgument(format != null, "Unable to determine format of manifest: " + file);
Schema schema = ManifestEntry.projectSchema(spec.partitionType(), columns);
switch (format) {
case AVRO:
AvroIterable<ManifestEntry> reader = Avro.read(file)
.project(schema)
.rename("manifest_entry", ManifestEntry.class.getName())
.rename("partition", PartitionData.class.getName())
.rename("r102", PartitionData.class.getName())
.rename("data_file", GenericDataFile.class.getName())
.rename("r2", GenericDataFile.class.getName())
.reuseContainers()
.build();
addCloseable(reader);
return reader;
default:
throw new UnsupportedOperationException("Invalid format for manifest file: " + format);
}
}
// visible for use by PartialManifest
Iterator<DataFile> iterator(Expression partFilter, Collection<String> columns) {
return Iterables.transform(Iterables.filter(
entries(columns),
entry -> entry.status() != DELETED),
ManifestEntry::file).iterator();
}
}
| 1,967 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/TableOperations.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.io.OutputFile;
/**
* SPI interface to abstract table metadata access and updates.
*/
public interface TableOperations {
/**
* Return the currently loaded table metadata, without checking for updates.
*
* @return table metadata
*/
TableMetadata current();
/**
* Return the current table metadata after checking for updates.
*
* @return table metadata
*/
TableMetadata refresh();
/**
* Replace the base table metadata with a new version.
* <p>
* This method should implement and document atomicity guarantees.
* <p>
* Implementations must check that the base metadata is current to avoid overwriting updates.
* Once the atomic commit operation succeeds, implementations must not perform any operations that
* may fail because failure in this method cannot be distinguished from commit failure.
*
* @param base table metadata on which changes were based
* @param metadata new table metadata with updates
*/
void commit(TableMetadata base, TableMetadata metadata);
/**
* @return a {@link com.netflix.iceberg.FileIO} to read and write table data and metadata files
*/
FileIO io();
/**
* Given the name of a metadata file, obtain the full path of that file using an appropriate base
* location of the implementation's choosing.
* <p>
* The file may not exist yet, in which case the path should be returned as if it were to be created
* by e.g. {@link FileIO#newOutputFile(String)}.
*/
String metadataFileLocation(String fileName);
/**
* Create a new ID for a Snapshot
*
* @return a long snapshot ID
*/
long newSnapshotId();
}
| 1,968 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ManifestGroup.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.expressions.Evaluator;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Expressions;
import com.netflix.iceberg.expressions.InclusiveManifestEvaluator;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.types.Types;
import java.io.Closeable;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
class ManifestGroup {
private static final Types.StructType EMPTY_STRUCT = Types.StructType.of();
private final TableOperations ops;
private final Set<ManifestFile> manifests;
private final Expression dataFilter;
private final Expression fileFilter;
private final boolean ignoreDeleted;
private final List<String> columns;
private final LoadingCache<Integer, InclusiveManifestEvaluator> EVAL_CACHE = CacheBuilder
.newBuilder()
.build(new CacheLoader<Integer, InclusiveManifestEvaluator>() {
@Override
public InclusiveManifestEvaluator load(Integer specId) {
PartitionSpec spec = ops.current().spec(specId);
return new InclusiveManifestEvaluator(spec, dataFilter);
}
});
ManifestGroup(TableOperations ops, Iterable<ManifestFile> manifests) {
this(ops, Sets.newHashSet(manifests), Expressions.alwaysTrue(), Expressions.alwaysTrue(),
false, ImmutableList.of("*"));
}
private ManifestGroup(TableOperations ops, Set<ManifestFile> manifests,
Expression dataFilter, Expression fileFilter, boolean ignoreDeleted,
List<String> columns) {
this.ops = ops;
this.manifests = manifests;
this.dataFilter = dataFilter;
this.fileFilter = fileFilter;
this.ignoreDeleted = ignoreDeleted;
this.columns = columns;
}
public ManifestGroup filterData(Expression expr) {
return new ManifestGroup(
ops, manifests, Expressions.and(dataFilter, expr), fileFilter, ignoreDeleted, columns);
}
public ManifestGroup filterFiles(Expression expr) {
return new ManifestGroup(
ops, manifests, dataFilter, Expressions.and(fileFilter, expr), ignoreDeleted, columns);
}
public ManifestGroup ignoreDeleted() {
return new ManifestGroup(ops, manifests, dataFilter, fileFilter, true, columns);
}
public ManifestGroup select(List<String> columns) {
return new ManifestGroup(
ops, manifests, dataFilter, fileFilter, ignoreDeleted, Lists.newArrayList(columns));
}
public ManifestGroup select(String... columns) {
return select(Arrays.asList(columns));
}
/**
* Returns an iterable for manifest entries in the set of manifests.
* <p>
* Entries are not copied and it is the caller's responsibility to make defensive copies if
* adding these entries to a collection.
*
* @return a CloseableIterable of manifest entries.
*/
public CloseableIterable<ManifestEntry> entries() {
Evaluator evaluator = new Evaluator(DataFile.getType(EMPTY_STRUCT), fileFilter);
List<Closeable> toClose = Lists.newArrayList();
Iterable<ManifestFile> matchingManifests = Iterables.filter(manifests,
manifest -> EVAL_CACHE.getUnchecked(manifest.partitionSpecId()).eval(manifest));
if (ignoreDeleted) {
// remove any manifests that don't have any existing or added files. if either the added or
// existing files count is missing, the manifest must be scanned.
matchingManifests = Iterables.filter(manifests, manifest ->
manifest.addedFilesCount() == null || manifest.existingFilesCount() == null ||
manifest.addedFilesCount() + manifest.existingFilesCount() > 0);
}
Iterable<Iterable<ManifestEntry>> readers = Iterables.transform(
matchingManifests,
manifest -> {
ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest.path()));
FilteredManifest filtered = reader.filterRows(dataFilter).select(columns);
toClose.add(reader);
return Iterables.filter(
ignoreDeleted ? filtered.liveEntries() : filtered.allEntries(),
entry -> evaluator.eval((GenericDataFile) entry.file()));
});
return CloseableIterable.combine(Iterables.concat(readers), toClose);
}
}
| 1,969 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ReplaceFiles.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import java.util.Set;
class ReplaceFiles extends MergingSnapshotUpdate implements RewriteFiles {
ReplaceFiles(TableOperations ops) {
super(ops);
// replace files must fail if any of the deleted paths is missing and cannot be deleted
failMissingDeletePaths();
}
@Override
public RewriteFiles rewriteFiles(Set<DataFile> filesToDelete, Set<DataFile> filesToAdd) {
Preconditions.checkArgument(filesToDelete != null && !filesToDelete.isEmpty(),
"Files to delete cannot be null or empty");
Preconditions.checkArgument(filesToAdd != null && !filesToAdd.isEmpty(),
"Files to add can not be null or empty");
for (DataFile toDelete : filesToDelete) {
delete(toDelete.path());
}
for (DataFile toAdd : filesToAdd) {
add(toAdd);
}
return this;
}
}
| 1,970 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ManifestListWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.avro.Avro;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.FileAppender;
import com.netflix.iceberg.io.OutputFile;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
class ManifestListWriter implements FileAppender<ManifestFile> {
private final FileAppender<ManifestFile> writer;
ManifestListWriter(OutputFile snapshotFile, long snapshotId, Long parentSnapshotId) {
this.writer = newAppender(snapshotFile, ImmutableMap.of(
"snapshot-id", String.valueOf(snapshotId),
"parent-snapshot-id", String.valueOf(parentSnapshotId)));
}
@Override
public void add(ManifestFile file) {
writer.add(file);
}
@Override
public void addAll(Iterator<ManifestFile> values) {
writer.addAll(values);
}
@Override
public void addAll(Iterable<ManifestFile> values) {
writer.addAll(values);
}
@Override
public Metrics metrics() {
return writer.metrics();
}
@Override
public void close() throws IOException {
writer.close();
}
private static FileAppender<ManifestFile> newAppender(OutputFile file, Map<String, String> meta) {
try {
return Avro.write(file)
.schema(ManifestFile.schema())
.named("manifest_file")
.meta(meta)
.build();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to create snapshot list writer for path: " + file);
}
}
}
| 1,971 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ReplacePartitionsOperation.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.expressions.Expressions;
import java.util.List;
public class ReplacePartitionsOperation extends MergingSnapshotUpdate implements ReplacePartitions {
ReplacePartitionsOperation(TableOperations ops) {
super(ops);
}
@Override
public ReplacePartitions addFile(DataFile file) {
dropPartition(file.partition());
add(file);
return this;
}
@Override
public ReplacePartitions validateAppendOnly() {
failAnyDelete();
return this;
}
@Override
public List<ManifestFile> apply(TableMetadata base) {
if (writeSpec().fields().size() <= 0) {
// replace all data in an unpartitioned table
deleteByRowFilter(Expressions.alwaysTrue());
}
try {
return super.apply(base);
} catch (DeleteException e) {
throw new ValidationException(
"Cannot commit file that conflicts with existing partition: %s", e.partition());
}
}
}
| 1,972 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseTransaction.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.util.Tasks;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT;
class BaseTransaction implements Transaction {
private enum TransactionType {
CREATE_TABLE,
REPLACE_TABLE,
SIMPLE
}
static Transaction replaceTableTransaction(TableOperations ops, TableMetadata start) {
return new BaseTransaction(ops, start);
}
static Transaction createTableTransaction(TableOperations ops, TableMetadata start) {
Preconditions.checkArgument(ops.current() == null,
"Cannot start create table transaction: table already exists");
return new BaseTransaction(ops, start);
}
static Transaction newTransaction(TableOperations ops) {
return new BaseTransaction(ops, ops.refresh());
}
// exposed for testing
final TableOperations ops;
private final TransactionTable transactionTable;
private final TableOperations transactionOps;
private final List<PendingUpdate> updates;
private final Set<Long> intermediateSnapshotIds;
private TransactionType type;
private TableMetadata base;
private TableMetadata lastBase;
private TableMetadata current;
private BaseTransaction(TableOperations ops, TableMetadata start) {
this.ops = ops;
this.transactionTable = new TransactionTable();
this.transactionOps = new TransactionTableOperations();
this.updates = Lists.newArrayList();
this.intermediateSnapshotIds = Sets.newHashSet();
this.base = ops.current();
if (base == null && start != null) {
this.type = TransactionType.CREATE_TABLE;
} else if (base != null && start != base) {
this.type = TransactionType.REPLACE_TABLE;
} else {
this.type = TransactionType.SIMPLE;
}
this.lastBase = null;
this.current = start;
}
@Override
public Table table() {
return transactionTable;
}
private void checkLastOperationCommitted(String operation) {
Preconditions.checkState(lastBase != current,
"Cannot create new %s: last operation has not committed", operation);
this.lastBase = current;
}
@Override
public UpdateProperties updateProperties() {
checkLastOperationCommitted("UpdateProperties");
UpdateProperties props = new PropertiesUpdate(transactionOps);
updates.add(props);
return props;
}
@Override
public AppendFiles newAppend() {
checkLastOperationCommitted("AppendFiles");
AppendFiles append = new MergeAppend(transactionOps);
updates.add(append);
return append;
}
@Override
public RewriteFiles newRewrite() {
checkLastOperationCommitted("RewriteFiles");
RewriteFiles rewrite = new ReplaceFiles(transactionOps);
updates.add(rewrite);
return rewrite;
}
@Override
public OverwriteFiles newOverwrite() {
checkLastOperationCommitted("OverwriteFiles");
OverwriteFiles overwrite = new OverwriteData(transactionOps);
updates.add(overwrite);
return overwrite;
}
@Override
public ReplacePartitions newReplacePartitions() {
checkLastOperationCommitted("ReplacePartitions");
ReplacePartitionsOperation replacePartitions = new ReplacePartitionsOperation(transactionOps);
updates.add(replacePartitions);
return replacePartitions;
}
@Override
public DeleteFiles newDelete() {
checkLastOperationCommitted("DeleteFiles");
DeleteFiles delete = new StreamingDelete(transactionOps);
updates.add(delete);
return delete;
}
@Override
public ExpireSnapshots expireSnapshots() {
checkLastOperationCommitted("ExpireSnapshots");
ExpireSnapshots expire = new RemoveSnapshots(transactionOps);
updates.add(expire);
return expire;
}
@Override
public void commitTransaction() {
Preconditions.checkState(lastBase != current,
"Cannot commit transaction: last operation has not committed");
switch (type) {
case CREATE_TABLE:
// fix up the snapshot log, which should not contain intermediate snapshots
TableMetadata createMetadata = current.removeSnapshotLogEntries(intermediateSnapshotIds);
// this operation creates the table. if the commit fails, this cannot retry because another
// process has created the same table.
ops.commit(null, createMetadata);
break;
case REPLACE_TABLE:
// fix up the snapshot log, which should not contain intermediate snapshots
TableMetadata replaceMetadata = current.removeSnapshotLogEntries(intermediateSnapshotIds);
Tasks.foreach(ops)
.retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */)
.onlyRetryOn(CommitFailedException.class)
.run(ops -> {
// because this is a replace table, it will always completely replace the table
// metadata. even if it was just updated.
if (base != ops.refresh()) {
this.base = ops.current(); // just refreshed
}
ops.commit(base, replaceMetadata);
});
break;
case SIMPLE:
// if there were no changes, don't try to commit
if (base == current) {
return;
}
Tasks.foreach(ops)
.retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */)
.onlyRetryOn(CommitFailedException.class)
.run(ops -> {
if (base != ops.refresh()) {
this.base = ops.current(); // just refreshed
this.current = base;
for (PendingUpdate update : updates) {
// re-commit each update in the chain to apply it and update current
update.commit();
}
}
// fix up the snapshot log, which should not contain intermediate snapshots
ops.commit(base, current.removeSnapshotLogEntries(intermediateSnapshotIds));
});
break;
}
}
private static Long currentId(TableMetadata meta) {
if (meta != null) {
if (meta.currentSnapshot() != null) {
return meta.currentSnapshot().snapshotId();
}
}
return null;
}
public class TransactionTableOperations implements TableOperations {
@Override
public TableMetadata current() {
return current;
}
@Override
public TableMetadata refresh() {
return current;
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
if (base != current) {
// trigger a refresh and retry
throw new CommitFailedException("Table metadata refresh is required");
}
// track the intermediate snapshot ids for rewriting the snapshot log
// an id is intermediate if it isn't the base snapshot id and it is replaced by a new current
Long oldId = currentId(current);
if (oldId != null && !oldId.equals(currentId(metadata)) && !oldId.equals(currentId(base))) {
intermediateSnapshotIds.add(oldId);
}
BaseTransaction.this.current = metadata;
}
@Override
public FileIO io() {
return ops.io();
}
@Override
public String metadataFileLocation(String fileName) {
return ops.metadataFileLocation(fileName);
}
@Override
public long newSnapshotId() {
return ops.newSnapshotId();
}
}
public class TransactionTable implements Table {
@Override
public void refresh() {
}
@Override
public TableScan newScan() {
throw new UnsupportedOperationException("Transaction tables do not support scans");
}
@Override
public Schema schema() {
return current.schema();
}
@Override
public PartitionSpec spec() {
return current.spec();
}
@Override
public Map<String, String> properties() {
return current.properties();
}
@Override
public String location() {
return current.location();
}
@Override
public Snapshot currentSnapshot() {
return current.currentSnapshot();
}
@Override
public Iterable<Snapshot> snapshots() {
return current.snapshots();
}
@Override
public UpdateSchema updateSchema() {
throw new UnsupportedOperationException("Transaction tables do not support schema updates");
}
@Override
public UpdateProperties updateProperties() {
return BaseTransaction.this.updateProperties();
}
@Override
public AppendFiles newAppend() {
return BaseTransaction.this.newAppend();
}
@Override
public RewriteFiles newRewrite() {
return BaseTransaction.this.newRewrite();
}
@Override
public OverwriteFiles newOverwrite() {
return BaseTransaction.this.newOverwrite();
}
@Override
public ReplacePartitions newReplacePartitions() {
return BaseTransaction.this.newReplacePartitions();
}
@Override
public DeleteFiles newDelete() {
return BaseTransaction.this.newDelete();
}
@Override
public ExpireSnapshots expireSnapshots() {
return BaseTransaction.this.expireSnapshots();
}
@Override
public Rollback rollback() {
throw new UnsupportedOperationException("Transaction tables do not support rollback");
}
@Override
public Transaction newTransaction() {
throw new UnsupportedOperationException("Cannot create a transaction within a transaction");
}
}
}
| 1,973 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/SerializableByteBufferMap.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Maps;
import java.io.ObjectStreamException;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
class SerializableByteBufferMap implements Map<Integer, ByteBuffer>, Serializable {
private final Map<Integer, ByteBuffer> wrapped;
static Map<Integer, ByteBuffer> wrap(Map<Integer, ByteBuffer> map) {
if (map == null) {
return null;
}
if (map instanceof SerializableByteBufferMap) {
return map;
}
return new SerializableByteBufferMap(map);
}
public SerializableByteBufferMap() {
this.wrapped = Maps.newLinkedHashMap();
}
private SerializableByteBufferMap(Map<Integer, ByteBuffer> wrapped) {
this.wrapped = wrapped;
}
private static class MapSerializationProxy implements Serializable {
private int[] keys = null;
private byte[][] values = null;
/**
* Constructor for Java serialization.
*/
MapSerializationProxy() {
}
public MapSerializationProxy(int[] keys, byte[][] values) {
this.keys = keys;
this.values = values;
}
Object readResolve() throws ObjectStreamException {
Map<Integer, ByteBuffer> map = Maps.newLinkedHashMap();
for (int i = 0; i < keys.length; i += 1) {
map.put(keys[i], ByteBuffer.wrap(values[i]));
}
return SerializableByteBufferMap.wrap(map);
}
}
Object writeReplace() throws ObjectStreamException {
Collection<Map.Entry<Integer, ByteBuffer>> entries = wrapped.entrySet();
int[] keys = new int[entries.size()];
byte[][] values = new byte[keys.length][];
int i = 0;
for (Map.Entry<Integer, ByteBuffer> entry : entries) {
keys[i] = entry.getKey();
values[i] = copy(entry.getValue());
i += 1;
}
return new MapSerializationProxy(keys, values);
}
private byte[] copy(ByteBuffer buffer) {
if (buffer.hasArray()) {
byte[] array = buffer.array();
if (buffer.arrayOffset() == 0 && buffer.position() == 0 && array.length == buffer.remaining()) {
return array;
} else {
int start = buffer.arrayOffset() + buffer.position();
int end = start + buffer.remaining();
return Arrays.copyOfRange(array, start, end);
}
} else {
byte[] bytes = new byte[buffer.remaining()];
buffer.get(bytes);
return bytes;
}
}
@Override
public int size() {
return wrapped.size();
}
@Override
public boolean isEmpty() {
return wrapped.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return wrapped.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return wrapped.containsValue(value);
}
@Override
public ByteBuffer get(Object key) {
return wrapped.get(key);
}
@Override
public ByteBuffer put(Integer key, ByteBuffer value) {
return wrapped.put(key, value);
}
@Override
public ByteBuffer remove(Object key) {
return wrapped.remove(key);
}
@Override
public void putAll(Map<? extends Integer, ? extends ByteBuffer> m) {
wrapped.putAll(m);
}
@Override
public void clear() {
wrapped.clear();
}
@Override
public Set<Integer> keySet() {
return wrapped.keySet();
}
@Override
public Collection<ByteBuffer> values() {
return wrapped.values();
}
@Override
public Set<Entry<Integer, ByteBuffer>> entrySet() {
return wrapped.entrySet();
}
@Override
public boolean equals(Object o) {
return wrapped.equals(o);
}
@Override
public int hashCode() {
return wrapped.hashCode();
}
}
| 1,974 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseCombinedScanTask.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableList;
import java.util.Collection;
import java.util.List;
public class BaseCombinedScanTask implements CombinedScanTask {
private final List<FileScanTask> tasks;
public BaseCombinedScanTask(FileScanTask... tasks) {
this.tasks = ImmutableList.copyOf(tasks);
}
public BaseCombinedScanTask(List<FileScanTask> tasks) {
this.tasks = ImmutableList.copyOf(tasks);
}
@Override
public Collection<FileScanTask> files() {
return tasks;
}
}
| 1,975 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/CharSequenceWrapper.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.netflix.iceberg.types.Comparators;
/**
* Wrapper class to adapt CharSequence for use in maps and sets.
*/
public class CharSequenceWrapper {
public static CharSequenceWrapper wrap(CharSequence seq) {
return new CharSequenceWrapper(seq);
}
private CharSequence wrapped;
private CharSequenceWrapper(CharSequence wrapped) {
this.wrapped = wrapped;
}
public CharSequenceWrapper set(CharSequence wrapped) {
this.wrapped = wrapped;
return this;
}
public CharSequence get() {
return wrapped;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
CharSequenceWrapper that = (CharSequenceWrapper) other;
return Comparators.charSequences().compare(wrapped, that.wrapped) == 0;
}
@Override
public int hashCode() {
int result = 177;
for (int i = 0; i < wrapped.length(); i += 1) {
char c = wrapped.charAt(i);
result = 31 * result + (int) c;
}
return result;
}
}
| 1,976 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/ParallelIterable.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
public class ParallelIterable<T> implements Iterable<T> {
private final Iterable<Iterable<T>> iterables;
private final ExecutorService trackingPool;
private final ExecutorService workerPool;
public ParallelIterable(Iterable<Iterable<T>> iterables,
ExecutorService trackingPool,
ExecutorService workerPool) {
this.iterables = iterables;
this.trackingPool = trackingPool;
this.workerPool = workerPool;
}
@Override
public Iterator<T> iterator() {
return new ParallelIterator<>(iterables, trackingPool, workerPool);
}
private static class ParallelIterator<T> implements Iterator<T> {
private final ConcurrentLinkedQueue<T> queue = new ConcurrentLinkedQueue<>();
private final Future<?> taskFuture;
public ParallelIterator(Iterable<Iterable<T>> iterables,
ExecutorService trackingPool,
ExecutorService workerPool) {
this.taskFuture = trackingPool.submit(() -> {
Tasks.foreach(iterables)
.noRetry().stopOnFailure().throwFailureWhenFinished()
.executeWith(workerPool)
.run(iterable -> {
for (T item : iterable) {
queue.add(item);
}
});
return true;
});
}
@Override
public synchronized boolean hasNext() {
// this cannot conclude that there are no more records until tasks have finished. while some
// are running, return true when there is at least one item to return.
while (!taskFuture.isDone()) {
if (!queue.isEmpty()) {
return true;
}
try {
taskFuture.get(10, TimeUnit.MILLISECONDS);
break;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (ExecutionException e) {
ExceptionUtil.castAndThrow(e.getCause(), RuntimeException.class);
} catch (TimeoutException e) {
// continue looping to check the queue size and wait again
}
}
// when tasks are no longer running, return whether the queue has items
return !queue.isEmpty();
}
@Override
public synchronized T next() {
// use hasNext to block until there is an available record
if (!hasNext()) {
throw new NoSuchElementException();
}
return queue.poll();
}
}
}
| 1,977 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/JsonUtil.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
public class JsonUtil {
private static final JsonFactory FACTORY = new JsonFactory();
private static final ObjectMapper MAPPER = new ObjectMapper(FACTORY);
public static JsonFactory factory() {
return FACTORY;
}
public static ObjectMapper mapper() {
return MAPPER;
}
public static int getInt(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing int %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(pNode != null && !pNode.isNull() && pNode.isNumber(),
"Cannot parse %s from non-numeric value: %s", property, pNode);
return pNode.asInt();
}
public static long getLong(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing int %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(pNode != null && !pNode.isNull() && pNode.isNumber(),
"Cannot parse %s from non-numeric value: %s", property, pNode);
return pNode.asLong();
}
public static boolean getBool(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing boolean %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(pNode != null && !pNode.isNull() && pNode.isBoolean(),
"Cannot parse %s from non-boolean value: %s", property, pNode);
return pNode.asBoolean();
}
public static String getString(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing string %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(pNode != null && !pNode.isNull() && pNode.isTextual(),
"Cannot parse %s from non-string value: %s", property, pNode);
return pNode.asText();
}
public static Map<String, String> getStringMap(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing map %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(pNode != null && !pNode.isNull() && pNode.isObject(),
"Cannot parse %s from non-object value: %s", property, pNode);
ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
Iterator<String> fields = pNode.fieldNames();
while (fields.hasNext()) {
String field = fields.next();
builder.put(field, getString(field, pNode));
}
return builder.build();
}
public static List<String> getStringList(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing list %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(pNode != null && !pNode.isNull() && pNode.isArray(),
"Cannot parse %s from non-array value: %s", property, pNode);
ImmutableList.Builder<String> builder = ImmutableList.builder();
Iterator<JsonNode> elements = pNode.elements();
while (elements.hasNext()) {
JsonNode element = elements.next();
Preconditions.checkArgument(element.isTextual(),
"Cannot parse string from non-text value: %s", element);
builder.add(element.asText());
}
return builder.build();
}
}
| 1,978 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/StructLikeWrapper.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.netflix.iceberg.StructLike;
/**
* Wrapper to adapt StructLike for use in maps and sets by implementing equals and hashCode.
*/
public class StructLikeWrapper {
public static StructLikeWrapper wrap(StructLike struct) {
return new StructLikeWrapper(struct);
}
private StructLike struct;
private StructLikeWrapper(StructLike struct) {
this.struct = struct;
}
public StructLikeWrapper set(StructLike struct) {
this.struct = struct;
return this;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
StructLikeWrapper that = (StructLikeWrapper) other;
if (this.struct == null) {
if (that.struct == null) {
return true;
} else {
return false;
}
} else if (that.struct == null) {
return false;
}
int len = struct.size();
if (len != that.struct.size()) {
return false;
}
for (int i = 0; i < len; i += 1) {
if (!struct.get(i, Object.class).equals(that.struct.get(i, Object.class))) {
return false;
}
}
return true;
}
@Override
public int hashCode() {
int result = 97;
int len = struct.size();
result = 41 * result + len;
for (int i = 0; i < len; i += 1) {
result = 41 * result + struct.get(i, Object.class).hashCode();
}
return result;
}
}
| 1,979 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/ExceptionUtil.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
public class ExceptionUtil {
@SuppressWarnings("unchecked")
static <E extends Exception> void castAndThrow(
Throwable e, Class<E> exceptionClass) throws E {
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
} else if (e instanceof Error) {
throw (Error) e;
} else if (exceptionClass.isInstance(e)) {
throw (E) e;
}
throw new RuntimeException(e);
}
}
| 1,980 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/Tasks.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.google.common.collect.Lists;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
public class Tasks {
private static final Logger LOG = LoggerFactory.getLogger(Tasks.class);
public static class UnrecoverableException extends RuntimeException {
public UnrecoverableException(String message) {
super(message);
}
public UnrecoverableException(String message, Throwable cause) {
super(message, cause);
}
public UnrecoverableException(Throwable cause) {
super(cause);
}
}
public interface FailureTask<I, E extends Exception> {
void run(I item, Exception exception) throws E;
}
public interface Task<I, E extends Exception> {
void run(I item) throws E;
}
public static class Builder<I> {
private final Iterable<I> items;
private ExecutorService service = null;
private FailureTask<I, ?> onFailure = null;
private boolean stopOnFailure = false;
private boolean throwFailureWhenFinished = true;
private Task<I, ?> revertTask = null;
private boolean stopRevertsOnFailure = false;
private Task<I, ?> abortTask = null;
private boolean stopAbortsOnFailure = false;
// retry settings
@SuppressWarnings("unchecked")
private List<Class<? extends Exception>> stopRetryExceptions = Lists.newArrayList(
UnrecoverableException.class);
private List<Class<? extends Exception>> onlyRetryExceptions = null;
private int maxAttempts = 1; // not all operations can be retried
private long minSleepTimeMs = 1000; // 1 second
private long maxSleepTimeMs = 600000; // 10 minutes
private long maxDurationMs = 600000; // 10 minutes
private double scaleFactor = 2.0; // exponential
public Builder(Iterable<I> items) {
this.items = items;
}
public Builder<I> executeWith(ExecutorService service) {
this.service = service;
return this;
}
public Builder<I> onFailure(FailureTask<I, ?> task) {
this.onFailure = task;
return this;
}
public Builder<I> stopOnFailure() {
this.stopOnFailure = true;
return this;
}
public Builder<I> throwFailureWhenFinished() {
this.throwFailureWhenFinished = true;
return this;
}
public Builder<I> suppressFailureWhenFinished() {
this.throwFailureWhenFinished = false;
return this;
}
public Builder<I> throwFailureWhenFinished(boolean throwWhenFinished) {
this.throwFailureWhenFinished = throwWhenFinished;
return this;
}
public Builder<I> revertWith(Task<I, ?> task) {
this.revertTask = task;
return this;
}
public Builder<I> stopRevertsOnFailure() {
this.stopRevertsOnFailure = true;
return this;
}
public Builder<I> abortWith(Task<I, ?> task) {
this.abortTask = task;
return this;
}
public Builder<I> stopAbortsOnFailure() {
this.stopAbortsOnFailure = true;
return this;
}
public Builder<I> stopRetryOn(Class<? extends Exception>... exceptions) {
stopRetryExceptions.addAll(Arrays.asList(exceptions));
return this;
}
public Builder<I> noRetry() {
this.maxAttempts = 1;
return this;
}
public Builder<I> retry(int nTimes) {
this.maxAttempts = nTimes + 1;
return this;
}
public Builder<I> onlyRetryOn(Class<? extends Exception> exception) {
this.onlyRetryExceptions = Collections.singletonList(exception);
return this;
}
public Builder<I> onlyRetryOn(Class<? extends Exception>... exceptions) {
this.onlyRetryExceptions = Lists.newArrayList(exceptions);
return this;
}
public Builder<I> exponentialBackoff(long minSleepTimeMs,
long maxSleepTimeMs,
long maxRetryTimeMs,
double scaleFactor) {
this.minSleepTimeMs = minSleepTimeMs;
this.maxSleepTimeMs = maxSleepTimeMs;
this.maxDurationMs = maxRetryTimeMs;
this.scaleFactor = scaleFactor;
return this;
}
public boolean run(Task<I, RuntimeException> task) {
return run(task, RuntimeException.class);
}
public <E extends Exception> boolean run(Task<I, E> task,
Class<E> exceptionClass) throws E {
if (service != null) {
return runParallel(task, exceptionClass);
} else {
return runSingleThreaded(task, exceptionClass);
}
}
private <E extends Exception> boolean runSingleThreaded(
Task<I, E> task, Class<E> exceptionClass) throws E {
List<I> succeeded = Lists.newArrayList();
List<Throwable> exceptions = Lists.newArrayList();
Iterator<I> iterator = items.iterator();
boolean threw = true;
try {
while (iterator.hasNext()) {
I item = iterator.next();
try {
runTaskWithRetry(task, item);
succeeded.add(item);
} catch (Exception e) {
exceptions.add(e);
if (onFailure != null) {
try {
onFailure.run(item, e);
} catch (Exception failException) {
e.addSuppressed(failException);
LOG.error("Failed to clean up on failure", e);
// keep going
}
}
if (stopOnFailure) {
break;
}
}
}
threw = false;
} finally {
// threw handles exceptions that were *not* caught by the catch block,
// and exceptions that were caught and possibly handled by onFailure
// are kept in exceptions.
if (threw || !exceptions.isEmpty()) {
if (revertTask != null) {
boolean failed = false;
for (I item : succeeded) {
try {
revertTask.run(item);
} catch (Exception e) {
failed = true;
LOG.error("Failed to revert task", e);
// keep going
}
if (stopRevertsOnFailure && failed) {
break;
}
}
}
if (abortTask != null) {
boolean failed = false;
while (iterator.hasNext()) {
try {
abortTask.run(iterator.next());
} catch (Exception e) {
failed = true;
LOG.error("Failed to abort task", e);
// keep going
}
if (stopAbortsOnFailure && failed) {
break;
}
}
}
}
}
if (throwFailureWhenFinished && !exceptions.isEmpty()) {
Tasks.throwOne(exceptions, exceptionClass);
} else if (throwFailureWhenFinished && threw) {
throw new RuntimeException(
"Task set failed with an uncaught throwable");
}
return !threw;
}
private <E extends Exception> boolean runParallel(final Task<I, E> task,
Class<E> exceptionClass)
throws E {
final Queue<I> succeeded = new ConcurrentLinkedQueue<>();
final Queue<Throwable> exceptions = new ConcurrentLinkedQueue<>();
final AtomicBoolean taskFailed = new AtomicBoolean(false);
final AtomicBoolean abortFailed = new AtomicBoolean(false);
final AtomicBoolean revertFailed = new AtomicBoolean(false);
List<Future<?>> futures = Lists.newArrayList();
for (final I item : items) {
// submit a task for each item that will either run or abort the task
futures.add(service.submit(new Runnable() {
@Override
public void run() {
if (!(stopOnFailure && taskFailed.get())) {
// run the task with retries
boolean threw = true;
try {
runTaskWithRetry(task, item);
succeeded.add(item);
threw = false;
} catch (Exception e) {
taskFailed.set(true);
exceptions.add(e);
if (onFailure != null) {
try {
onFailure.run(item, e);
} catch (Exception failException) {
e.addSuppressed(failException);
LOG.error("Failed to clean up on failure", e);
// swallow the exception
}
}
} finally {
if (threw) {
taskFailed.set(true);
}
}
} else if (abortTask != null) {
// abort the task instead of running it
if (stopAbortsOnFailure && abortFailed.get()) {
return;
}
boolean failed = true;
try {
abortTask.run(item);
failed = false;
} catch (Exception e) {
LOG.error("Failed to abort task", e);
// swallow the exception
} finally {
if (failed) {
abortFailed.set(true);
}
}
}
}
}));
}
// let the above tasks complete (or abort)
exceptions.addAll(waitFor(futures));
futures.clear();
if (taskFailed.get() && revertTask != null) {
// at least one task failed, revert any that succeeded
for (final I item : succeeded) {
futures.add(service.submit(new Runnable() {
@Override
public void run() {
if (stopRevertsOnFailure && revertFailed.get()) {
return;
}
boolean failed = true;
try {
revertTask.run(item);
failed = false;
} catch (Exception e) {
LOG.error("Failed to revert task", e);
// swallow the exception
} finally {
if (failed) {
revertFailed.set(true);
}
}
}
}));
}
// let the revert tasks complete
exceptions.addAll(waitFor(futures));
}
if (throwFailureWhenFinished && !exceptions.isEmpty()) {
Tasks.throwOne(exceptions, exceptionClass);
} else if (throwFailureWhenFinished && taskFailed.get()) {
throw new RuntimeException(
"Task set failed with an uncaught throwable");
}
return !taskFailed.get();
}
private <E extends Exception> void runTaskWithRetry(Task<I, E> task, I item)
throws E {
long start = System.currentTimeMillis();
int attempt = 0;
while (true) {
attempt += 1;
try {
task.run(item);
break;
} catch (Exception e) {
long durationMs = System.currentTimeMillis() - start;
if (attempt >= maxAttempts || durationMs > maxDurationMs) {
throw e;
}
if (onlyRetryExceptions != null) {
// if onlyRetryExceptions are present, then this retries if one is found
boolean matchedRetryException = false;
for (Class<? extends Exception> exClass : onlyRetryExceptions) {
if (exClass.isInstance(e)) {
matchedRetryException = true;
}
}
if (!matchedRetryException) {
throw e;
}
} else {
// otherwise, always retry unless one of the stop exceptions is found
for (Class<? extends Exception> exClass : stopRetryExceptions) {
if (exClass.isInstance(e)) {
throw e;
}
}
}
int delayMs = (int) Math.min(
minSleepTimeMs * Math.pow(scaleFactor, attempt - 1),
maxSleepTimeMs);
int jitter = ThreadLocalRandom.current()
.nextInt(Math.max(1, (int) (delayMs * 0.1)));
LOG.warn("Retrying task after failure: " + e.getMessage(), e);
try {
TimeUnit.MILLISECONDS.sleep(delayMs + jitter);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new RuntimeException(ie);
}
}
}
}
}
private static Collection<Throwable> waitFor(Collection<Future<?>> futures)
throws Error {
while (true) {
int numFinished = 0;
for (Future<?> future : futures) {
if (future.isDone()) {
numFinished += 1;
}
}
if (numFinished == futures.size()) {
List<Throwable> uncaught = new ArrayList<>();
// all of the futures are done, get any uncaught exceptions
for (Future<?> future : futures) {
try {
future.get();
} catch (InterruptedException e) {
LOG.warn("Interrupted while getting future results", e);
for (Throwable t : uncaught) {
e.addSuppressed(t);
}
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (CancellationException e) {
// ignore cancellations
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (Error.class.isInstance(cause)) {
for (Throwable t : uncaught) {
cause.addSuppressed(t);
}
throw (Error) cause;
}
if (cause != null) {
uncaught.add(e);
}
LOG.warn("Task threw uncaught exception", cause);
}
}
return uncaught;
} else {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting for tasks to finish", e);
for (Future<?> future : futures) {
future.cancel(true);
}
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
}
/**
* A range, [ 0, size )
*/
private static class Range implements Iterable<Integer> {
private int size;
Range(int size) {
this.size = size;
}
@Override
public Iterator<Integer> iterator() {
return new Iterator<Integer>() {
private int current = 0;
@Override
public boolean hasNext() {
return current < size;
}
@Override
public Integer next() {
int ret = current;
current += 1;
return ret;
}
};
}
}
public static Builder<Integer> range(int upTo) {
return new Builder<>(new Range(upTo));
}
public static <I> Builder<I> foreach(Iterable<I> items) {
return new Builder<>(items);
}
public static <I> Builder<I> foreach(I... items) {
return new Builder<>(Arrays.asList(items));
}
@SuppressWarnings("unchecked")
private static <E extends Exception> void throwOne(
Collection<Throwable> exceptions, Class<E> allowedException) throws E {
Iterator<Throwable> iter = exceptions.iterator();
Throwable e = iter.next();
Class<? extends Throwable> exceptionClass = e.getClass();
while (iter.hasNext()) {
Throwable other = iter.next();
if (!exceptionClass.isInstance(other)) {
e.addSuppressed(other);
}
}
ExceptionUtil.<E>castAndThrow(e, allowedException);
}
}
| 1,981 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/Pair.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.google.common.base.Objects;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.Lists;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.specific.SpecificData;
import java.io.Serializable;
public class Pair<X, Y> implements IndexedRecord, SpecificData.SchemaConstructable, Serializable {
public static <X, Y> Pair<X, Y> of(X x, Y y) {
return new Pair<>(x, y);
}
private static final LoadingCache<Pair<Class<?>, Class<?>>, Schema> SCHEMA_CACHE = CacheBuilder
.newBuilder()
.build(new CacheLoader<Pair<Class<?>, Class<?>>, Schema>() {
@Override
@SuppressWarnings("deprecation")
public Schema load(Pair<Class<?>, Class<?>> key) {
Schema xSchema = ReflectData.get().getSchema(key.x);
Schema ySchema = ReflectData.get().getSchema(key.y);
return Schema.createRecord("pair", null, null, false, Lists.newArrayList(
new Schema.Field("x", xSchema, null, null),
new Schema.Field("y", ySchema, null, null)
));
}
});
private Schema schema = null;
private X x;
private Y y;
/**
* Constructor used by Avro
*/
private Pair(Schema schema) {
this.schema = schema;
}
private Pair(X x, Y y) {
this.x = x;
this.y = y;
}
@Override
@SuppressWarnings("unchecked")
public void put(int i, Object v) {
if (i == 0) {
this.x = (X) v;
return;
} else if (i == 1) {
this.y = (Y) v;
return;
}
throw new IllegalArgumentException("Cannot set value " + i + " (not 0 or 1): " + v);
}
@Override
public Object get(int i) {
if (i == 0) {
return x;
} else if (i == 1) {
return y;
}
throw new IllegalArgumentException("Cannot get value " + i + " (not 0 or 1)");
}
@Override
public Schema getSchema() {
if (schema == null) {
this.schema = SCHEMA_CACHE.getUnchecked(Pair.of(x.getClass(), y.getClass()));
}
return schema;
}
public X first() {
return x;
}
public Y second() {
return y;
}
@Override
public String toString() {
return "(" + String.valueOf(x) + ", " + String.valueOf(y) + ")";
}
@Override
public int hashCode() {
return Objects.hashCode(x, y);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (getClass() != other.getClass()) {
return false;
}
Pair<?, ?> otherPair = (Pair<?, ?>) other;
return Objects.equal(x, otherPair.x) && Objects.equal(y, otherPair.y);
}
}
| 1,982 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/BinPacking.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.function.Function;
public class BinPacking {
public static class ListPacker<T> {
private final long targetWeight;
private final int lookback;
public ListPacker(long targetWeight, int lookback) {
this.targetWeight = targetWeight;
this.lookback = lookback;
}
public List<List<T>> packEnd(List<T> items, Function<T, Long> weightFunc) {
return Lists.reverse(ImmutableList.copyOf(Iterables.transform(
new PackingIterable<>(Lists.reverse(items), targetWeight, lookback, weightFunc),
Lists::reverse)));
}
public List<List<T>> pack(Iterable<T> items, Function<T, Long> weightFunc) {
return ImmutableList.copyOf(new PackingIterable<>(items, targetWeight, lookback, weightFunc));
}
}
public static class PackingIterable<T> implements Iterable<List<T>> {
private final Iterable<T> iterable;
private final long targetWeight;
private final int lookback;
private final Function<T, Long> weightFunc;
public PackingIterable(Iterable<T> iterable, long targetWeight, int lookback,
Function<T, Long> weightFunc) {
Preconditions.checkArgument(lookback > 0,
"Bin look-back size must be greater than 0: %s", lookback);
this.iterable = iterable;
this.targetWeight = targetWeight;
this.lookback = lookback;
this.weightFunc = weightFunc;
}
@Override
public Iterator<List<T>> iterator() {
return new PackingIterator<>(iterable.iterator(), targetWeight, lookback, weightFunc);
}
}
private static class PackingIterator<T> implements Iterator<List<T>> {
private final LinkedList<Bin> bins = Lists.newLinkedList();
private final Iterator<T> items;
private final long targetWeight;
private final int lookback;
private final Function<T, Long> weightFunc;
private PackingIterator(Iterator<T> items, long targetWeight, int lookback,
Function<T, Long> weightFunc) {
this.items = items;
this.targetWeight = targetWeight;
this.lookback = lookback;
this.weightFunc = weightFunc;
}
public boolean hasNext() {
return items.hasNext() || !bins.isEmpty();
}
public List<T> next() {
while (items.hasNext()) {
T item = items.next();
long weight = weightFunc.apply(item);
Bin bin = find(bins, weight);
if (bin != null) {
bin.add(item, weight);
} else {
bin = new Bin();
bin.add(item, weight);
bins.addLast(bin);
if (bins.size() > lookback) {
return ImmutableList.copyOf(bins.removeFirst().items());
}
}
}
if (bins.isEmpty()) {
throw new NoSuchElementException();
}
return ImmutableList.copyOf(bins.removeFirst().items());
}
private Bin find(List<Bin> bins, long weight) {
for (Bin bin : bins) {
if (bin.canAdd(weight)) {
return bin;
}
}
return null;
}
private class Bin {
private long binWeight = 0L;
private List<T> items = Lists.newArrayList();
public List<T> items() {
return items;
}
public boolean canAdd(long weight) {
return (binWeight + weight <= targetWeight);
}
public void add(T item, long weight) {
this.binWeight += weight;
items.add(item);
}
}
}
}
| 1,983 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/Exceptions.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
public class Exceptions {
private Exceptions() {
}
public static <E extends Exception>
E suppressExceptions(E alreadyThrown, Runnable run) {
try {
run.run();
} catch (Exception e) {
alreadyThrown.addSuppressed(e);
}
return alreadyThrown;
}
public static <E extends Exception>
void suppressAndThrow(E alreadyThrown, Runnable run) throws E {
throw suppressExceptions(alreadyThrown, run);
}
}
| 1,984 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/ThreadPools.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.iceberg.SystemProperties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;
public class ThreadPools {
public static final String PLANNER_THREAD_POOL_SIZE_PROP =
SystemProperties.PLANNER_THREAD_POOL_SIZE_PROP;
public static final String WORKER_THREAD_POOL_SIZE_PROP =
SystemProperties.WORKER_THREAD_POOL_SIZE_PROP;
private static ExecutorService PLANNER_POOL = MoreExecutors.getExitingExecutorService(
(ThreadPoolExecutor) Executors.newFixedThreadPool(
getPoolSize(PLANNER_THREAD_POOL_SIZE_PROP, 4),
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("iceberg-planner-pool-%d")
.build()));
private static ExecutorService WORKER_POOL = MoreExecutors.getExitingExecutorService(
(ThreadPoolExecutor) Executors.newFixedThreadPool(
getPoolSize(WORKER_THREAD_POOL_SIZE_PROP, Runtime.getRuntime().availableProcessors()),
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("iceberg-worker-pool-%d")
.build()));
/**
* Return an {@link ExecutorService} that uses the "planner" thread-pool.
* <p>
* The size of the planner pool limits the number of concurrent planning operations in the base
* table implementation.
* <p>
* The size of this thread-pool is controlled by the Java system property
* {@code iceberg.planner.num-threads}.
*
* @return an {@link ExecutorService} that uses the planner pool
*/
public static ExecutorService getPlannerPool() {
return PLANNER_POOL;
}
/**
* Return an {@link ExecutorService} that uses the "worker" thread-pool.
* <p>
* The size of the worker pool limits the number of tasks concurrently reading manifests in the
* base table implementation across all concurrent planning operations.
* <p>
* The size of this thread-pool is controlled by the Java system property
* {@code iceberg.worker.num-threads}.
*
* @return an {@link ExecutorService} that uses the worker pool
*/
public static ExecutorService getWorkerPool() {
return WORKER_POOL;
}
private static int getPoolSize(String systemProperty, int defaultSize) {
String value = System.getProperty(systemProperty);
if (value != null) {
try {
return Integer.parseUnsignedInt(value);
} catch (NumberFormatException e) {
// will return the default
}
}
return defaultSize;
}
}
| 1,985 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/AvroFileAppender.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.netflix.iceberg.Metrics;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.FileAppender;
import com.netflix.iceberg.io.OutputFile;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.io.DatumWriter;
import java.io.IOException;
import java.util.Map;
import java.util.function.Function;
class AvroFileAppender<D> implements FileAppender<D> {
private DataFileWriter<D> writer = null;
private long numRecords = 0L;
AvroFileAppender(Schema schema, OutputFile file,
Function<Schema, DatumWriter<?>> createWriterFunc,
CodecFactory codec, Map<String, String> metadata) throws IOException {
this.writer = newAvroWriter(schema, file, createWriterFunc, codec, metadata);
}
@Override
public void add(D datum) {
try {
numRecords += 1L;
writer.append(datum);
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
@Override
public Metrics metrics() {
return new Metrics(numRecords, null, null, null);
}
@Override
public void close() throws IOException {
if (writer != null) {
writer.close();
this.writer = null;
}
}
@SuppressWarnings("unchecked")
private static <D> DataFileWriter<D> newAvroWriter(
Schema schema, OutputFile file, Function<Schema, DatumWriter<?>> createWriterFunc,
CodecFactory codec, Map<String, String> metadata) throws IOException {
DataFileWriter<D> writer = new DataFileWriter<>(
(DatumWriter<D>) createWriterFunc.apply(schema));
writer.setCodec(codec);
for (Map.Entry<String, String> entry : metadata.entrySet()) {
writer.setMeta(entry.getKey(), entry.getValue());
}
// TODO: support overwrite
return writer.create(schema, file.create());
}
}
| 1,986 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/ValueReaders.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.iceberg.common.DynConstructors;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.ResolvingDecoder;
import org.apache.avro.util.Utf8;
import java.io.IOException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import static java.util.Collections.emptyIterator;
public class ValueReaders {
private ValueReaders() {
}
public static ValueReader<Object> nulls() {
return NullReader.INSTANCE;
}
public static ValueReader<Boolean> booleans() {
return BooleanReader.INSTANCE;
}
public static ValueReader<Integer> ints() {
return IntegerReader.INSTANCE;
}
public static ValueReader<Long> longs() {
return LongReader.INSTANCE;
}
public static ValueReader<Float> floats() {
return FloatReader.INSTANCE;
}
public static ValueReader<Double> doubles() {
return DoubleReader.INSTANCE;
}
public static ValueReader<String> strings() {
return StringReader.INSTANCE;
}
public static ValueReader<Utf8> utf8s() {
return Utf8Reader.INSTANCE;
}
public static ValueReader<UUID> uuids() {
return UUIDReader.INSTANCE;
}
public static ValueReader<byte[]> fixed(int length) {
return new FixedReader(length);
}
public static ValueReader<GenericData.Fixed> fixed(Schema schema) {
return new GenericFixedReader(schema);
}
public static ValueReader<byte[]> bytes() {
return BytesReader.INSTANCE;
}
public static ValueReader<ByteBuffer> byteBuffers() {
return ByteBufferReader.INSTANCE;
}
public static ValueReader<BigDecimal> decimal(ValueReader<byte[]> unscaledReader, int scale) {
return new DecimalReader(unscaledReader, scale);
}
public static ValueReader<Object> union(List<ValueReader<?>> readers) {
return new UnionReader(readers);
}
public static <T> ValueReader<Collection<T>> array(ValueReader<T> elementReader) {
return new ArrayReader<>(elementReader);
}
public static <K, V> ValueReader<Map<K, V>> arrayMap(ValueReader<K> keyReader, ValueReader<V> valueReader) {
return new ArrayMapReader<>(keyReader, valueReader);
}
public static <K, V> ValueReader<Map<K, V>> map(ValueReader<K> keyReader, ValueReader<V> valueReader) {
return new MapReader<>(keyReader, valueReader);
}
public static ValueReader<GenericData.Record> record(List<ValueReader<?>> readers, Schema recordSchema) {
return new RecordReader(readers, recordSchema);
}
public static <R extends IndexedRecord> ValueReader<R> record(List<ValueReader<?>> readers, Class<R> recordClass, Schema recordSchema) {
return new IndexedRecordReader<>(readers, recordClass, recordSchema);
}
private static class NullReader implements ValueReader<Object> {
private static NullReader INSTANCE = new NullReader();
private NullReader() {
}
@Override
public Object read(Decoder decoder, Object ignored) throws IOException {
decoder.readNull();
return null;
}
}
private static class BooleanReader implements ValueReader<Boolean> {
private static BooleanReader INSTANCE = new BooleanReader();
private BooleanReader() {
}
@Override
public Boolean read(Decoder decoder, Object ignored) throws IOException {
return decoder.readBoolean();
}
}
private static class IntegerReader implements ValueReader<Integer> {
private static IntegerReader INSTANCE = new IntegerReader();
private IntegerReader() {
}
@Override
public Integer read(Decoder decoder, Object ignored) throws IOException {
return decoder.readInt();
}
}
private static class LongReader implements ValueReader<Long> {
private static LongReader INSTANCE = new LongReader();
private LongReader() {
}
@Override
public Long read(Decoder decoder, Object ignored) throws IOException {
return decoder.readLong();
}
}
private static class FloatReader implements ValueReader<Float> {
private static FloatReader INSTANCE = new FloatReader();
private FloatReader() {
}
@Override
public Float read(Decoder decoder, Object ignored) throws IOException {
return decoder.readFloat();
}
}
private static class DoubleReader implements ValueReader<Double> {
private static DoubleReader INSTANCE = new DoubleReader();
private DoubleReader() {
}
@Override
public Double read(Decoder decoder, Object ignored) throws IOException {
return decoder.readDouble();
}
}
private static class StringReader implements ValueReader<String> {
private static StringReader INSTANCE = new StringReader();
private final ThreadLocal<Utf8> reusedTempUtf8 = ThreadLocal.withInitial(Utf8::new);
private StringReader() {
}
@Override
public String read(Decoder decoder, Object ignored) throws IOException {
// use the decoder's readString(Utf8) method because it may be a resolving decoder
this.reusedTempUtf8.set(decoder.readString(reusedTempUtf8.get()));
return reusedTempUtf8.get().toString();
// int length = decoder.readInt();
// byte[] bytes = new byte[length];
// decoder.readFixed(bytes, 0, length);
}
}
private static class Utf8Reader implements ValueReader<Utf8> {
private static Utf8Reader INSTANCE = new Utf8Reader();
private Utf8Reader() {
}
@Override
public Utf8 read(Decoder decoder, Object reuse) throws IOException {
// use the decoder's readString(Utf8) method because it may be a resolving decoder
if (reuse instanceof Utf8) {
return decoder.readString((Utf8) reuse);
} else {
return decoder.readString(null);
}
// int length = decoder.readInt();
// byte[] bytes = new byte[length];
// decoder.readFixed(bytes, 0, length);
}
}
private static class UUIDReader implements ValueReader<UUID> {
private static final ThreadLocal<ByteBuffer> BUFFER = ThreadLocal.withInitial(() -> {
ByteBuffer buffer = ByteBuffer.allocate(16);
buffer.order(ByteOrder.BIG_ENDIAN);
return buffer;
});
private static UUIDReader INSTANCE = new UUIDReader();
private UUIDReader() {
}
@Override
public UUID read(Decoder decoder, Object ignored) throws IOException {
ByteBuffer buffer = BUFFER.get();
buffer.rewind();
decoder.readFixed(buffer.array(), 0, 16);
long mostSigBits = buffer.getLong();
long leastSigBits = buffer.getLong();
return new UUID(mostSigBits, leastSigBits);
}
}
private static class FixedReader implements ValueReader<byte[]> {
private final int length;
private FixedReader(int length) {
this.length = length;
}
@Override
public byte[] read(Decoder decoder, Object reuse) throws IOException {
if (reuse instanceof byte[]) {
byte[] reusedBytes = (byte[]) reuse;
if (reusedBytes.length == length) {
decoder.readFixed(reusedBytes, 0, length);
return reusedBytes;
}
}
byte[] bytes = new byte[length];
decoder.readFixed(bytes, 0, length);
return bytes;
}
}
private static class GenericFixedReader implements ValueReader<GenericData.Fixed> {
private final Schema schema;
private final int length;
private GenericFixedReader(Schema schema) {
this.schema = schema;
this.length = schema.getFixedSize();
}
@Override
public GenericData.Fixed read(Decoder decoder, Object reuse) throws IOException {
if (reuse instanceof GenericData.Fixed) {
GenericData.Fixed reusedFixed = (GenericData.Fixed) reuse;
if (reusedFixed.bytes().length == length) {
decoder.readFixed(reusedFixed.bytes(), 0, length);
return reusedFixed;
}
}
byte[] bytes = new byte[length];
decoder.readFixed(bytes, 0, length);
return new GenericData.Fixed(schema, bytes);
}
}
private static class BytesReader implements ValueReader<byte[]> {
private static BytesReader INSTANCE = new BytesReader();
private BytesReader() {
}
@Override
public byte[] read(Decoder decoder, Object reuse) throws IOException {
// use the decoder's readBytes method because it may be a resolving decoder
// the only time the previous value could be reused is when its length matches the next array,
// but there is no way to know this with the readBytes call, which uses a ByteBuffer. it is
// possible to wrap the reused array in a ByteBuffer, but this may still result in allocating
// a new buffer. since the usual case requires an allocation anyway to get the size right,
// just allocate every time.
return decoder.readBytes(null).array();
// int length = decoder.readInt();
// byte[] bytes = new byte[length];
// decoder.readFixed(bytes, 0, length);
// return bytes;
}
}
private static class ByteBufferReader implements ValueReader<ByteBuffer> {
private static ByteBufferReader INSTANCE = new ByteBufferReader();
private ByteBufferReader() {
}
@Override
public ByteBuffer read(Decoder decoder, Object reuse) throws IOException {
// use the decoder's readBytes method because it may be a resolving decoder
if (reuse instanceof ByteBuffer) {
return decoder.readBytes((ByteBuffer) reuse);
} else {
return decoder.readBytes(null);
}
// int length = decoder.readInt();
// byte[] bytes = new byte[length];
// decoder.readFixed(bytes, 0, length);
// return bytes;
}
}
private static class DecimalReader implements ValueReader<BigDecimal> {
private final ValueReader<byte[]> bytesReader;
private final int scale;
private DecimalReader(ValueReader<byte[]> bytesReader, int scale) {
this.bytesReader = bytesReader;
this.scale = scale;
}
@Override
public BigDecimal read(Decoder decoder, Object ignored) throws IOException {
// there isn't a way to get the backing buffer out of a BigInteger, so this can't reuse.
byte[] bytes = bytesReader.read(decoder, null);
return new BigDecimal(new BigInteger(bytes), scale);
}
}
private static class UnionReader implements ValueReader<Object> {
private final ValueReader[] readers;
private UnionReader(List<ValueReader<?>> readers) {
this.readers = new ValueReader[readers.size()];
for (int i = 0; i < this.readers.length; i += 1) {
this.readers[i] = readers.get(i);
}
}
@Override
public Object read(Decoder decoder, Object reuse) throws IOException {
int index = decoder.readIndex();
return readers[index].read(decoder, reuse);
}
}
private static class EnumReader implements ValueReader<String> {
private final String[] symbols;
private EnumReader(List<String> symbols) {
this.symbols = new String[symbols.size()];
for (int i = 0; i < this.symbols.length; i += 1) {
this.symbols[i] = symbols.get(i);
}
}
@Override
public String read(Decoder decoder, Object ignored) throws IOException {
int index = decoder.readEnum();
return symbols[index];
}
}
private static class ArrayReader<T> implements ValueReader<Collection<T>> {
private final ValueReader<T> elementReader;
private LinkedList<?> lastList = null;
private ArrayReader(ValueReader<T> elementReader) {
this.elementReader = elementReader;
}
@Override
@SuppressWarnings("unchecked")
public Collection<T> read(Decoder decoder, Object reused) throws IOException {
LinkedList<T> resultList;
if (lastList != null) {
lastList.clear();
resultList = (LinkedList<T>) lastList;
} else {
resultList = Lists.newLinkedList();
}
if (reused instanceof LinkedList) {
this.lastList = (LinkedList<?>) reused;
} else {
this.lastList = null;
}
long chunkLength = decoder.readArrayStart();
Iterator<?> elIter = lastList != null ? lastList.iterator() : emptyIterator();
while (chunkLength > 0) {
for (long i = 0; i < chunkLength; i += 1) {
Object lastValue = elIter.hasNext() ? elIter.next() : null;
resultList.addLast(elementReader.read(decoder, lastValue));
}
chunkLength = decoder.arrayNext();
}
return resultList;
}
}
private static class ArrayMapReader<K, V> implements ValueReader<Map<K, V>> {
private final ValueReader<K> keyReader;
private final ValueReader<V> valueReader;
private Map lastMap = null;
private ArrayMapReader(ValueReader<K> keyReader, ValueReader<V> valueReader) {
this.keyReader = keyReader;
this.valueReader = valueReader;
}
@Override
@SuppressWarnings("unchecked")
public Map<K, V> read(Decoder decoder, Object reuse) throws IOException {
if (reuse instanceof Map) {
this.lastMap = (Map<?, ?>) reuse;
} else {
this.lastMap = null;
}
Map<K, V> resultMap;
if (lastMap != null) {
lastMap.clear();
resultMap = (Map<K, V>) lastMap;
} else {
resultMap = Maps.newLinkedHashMap();
}
long chunkLength = decoder.readArrayStart();
Iterator<Map.Entry<?, ?>> kvIter = lastMap != null ?
lastMap.entrySet().iterator() :
emptyIterator();
while (chunkLength > 0) {
for (long i = 0; i < chunkLength; i += 1) {
K key;
V value;
if (kvIter.hasNext()) {
Map.Entry<?, ?> last = kvIter.next();
key = keyReader.read(decoder, last.getKey());
value = valueReader.read(decoder, last.getValue());
} else {
key = keyReader.read(decoder, null);
value = valueReader.read(decoder, null);
}
resultMap.put(key, value);
}
chunkLength = decoder.arrayNext();
}
return resultMap;
}
}
private static class MapReader<K, V> implements ValueReader<Map<K, V>> {
private final ValueReader<K> keyReader;
private final ValueReader<V> valueReader;
private Map lastMap = null;
private MapReader(ValueReader<K> keyReader, ValueReader<V> valueReader) {
this.keyReader = keyReader;
this.valueReader = valueReader;
}
@Override
@SuppressWarnings("unchecked")
public Map<K, V> read(Decoder decoder, Object reuse) throws IOException {
if (reuse instanceof Map) {
this.lastMap = (Map<?, ?>) reuse;
} else {
this.lastMap = null;
}
Map<K, V> resultMap;
if (lastMap != null) {
lastMap.clear();
resultMap = (Map<K, V>) lastMap;
} else {
resultMap = Maps.newLinkedHashMap();
}
long chunkLength = decoder.readMapStart();
Iterator<Map.Entry<?, ?>> kvIter = lastMap != null ?
lastMap.entrySet().iterator() :
emptyIterator();
while (chunkLength > 0) {
for (long i = 0; i < chunkLength; i += 1) {
K key;
V value;
if (kvIter.hasNext()) {
Map.Entry<?, ?> last = kvIter.next();
key = keyReader.read(decoder, last.getKey());
value = valueReader.read(decoder, last.getValue());
} else {
key = keyReader.read(decoder, null);
value = valueReader.read(decoder, null);
}
resultMap.put(key, value);
}
chunkLength = decoder.mapNext();
}
return resultMap;
}
}
public abstract static class StructReader<S> implements ValueReader<S> {
private final ValueReader<?>[] readers;
protected StructReader(List<ValueReader<?>> readers) {
this.readers = new ValueReader[readers.size()];
for (int i = 0; i < this.readers.length; i += 1) {
this.readers[i] = readers.get(i);
}
}
protected abstract S reuseOrCreate(Object reuse);
protected abstract Object get(S struct, int pos);
protected abstract void set(S struct, int pos, Object value);
public ValueReader<?> reader(int pos) {
return readers[pos];
}
@Override
public S read(Decoder decoder, Object reuse) throws IOException {
S struct = reuseOrCreate(reuse);
if (decoder instanceof ResolvingDecoder) {
// this may not set all of the fields. nulls are set by default.
for (org.apache.avro.Schema.Field field : ((ResolvingDecoder) decoder).readFieldOrder()) {
Object reusedValue = get(struct, field.pos());
set(struct, field.pos(), readers[field.pos()].read(decoder, reusedValue));
}
} else {
for (int i = 0; i < readers.length; i += 1) {
Object reusedValue = get(struct, i);
set(struct, i, readers[i].read(decoder, reusedValue));
}
}
return struct;
}
}
private static class RecordReader extends StructReader<GenericData.Record> {
private final Schema recordSchema;
private RecordReader(List<ValueReader<?>> readers, Schema recordSchema) {
super(readers);
this.recordSchema = recordSchema;
}
@Override
protected GenericData.Record reuseOrCreate(Object reuse) {
if (reuse instanceof GenericData.Record) {
return (GenericData.Record) reuse;
} else {
return new GenericData.Record(recordSchema);
}
}
@Override
protected Object get(GenericData.Record struct, int pos) {
return struct.get(pos);
}
@Override
protected void set(GenericData.Record struct, int pos, Object value) {
struct.put(pos, value);
}
}
static class IndexedRecordReader<R extends IndexedRecord> extends StructReader<R> {
private final Class<R> recordClass;
private final DynConstructors.Ctor<R> ctor;
private final Schema schema;
IndexedRecordReader(List<ValueReader<?>> readers, Class<R> recordClass, Schema schema) {
super(readers);
this.recordClass = recordClass;
this.ctor = DynConstructors.builder(IndexedRecord.class)
.hiddenImpl(recordClass, Schema.class)
.hiddenImpl(recordClass)
.build();
this.schema = schema;
}
@Override
protected R reuseOrCreate(Object reuse) {
if (recordClass.isInstance(reuse)) {
return recordClass.cast(reuse);
} else {
return ctor.newInstance(schema);
}
}
@Override
protected Object get(R struct, int pos) {
return struct.get(pos);
}
@Override
protected void set(R struct, int pos, Object value) {
struct.put(pos, value);
}
}
}
| 1,987 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/AvroIO.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.netflix.iceberg.common.DynClasses;
import com.netflix.iceberg.common.DynConstructors;
import com.netflix.iceberg.io.DelegatingInputStream;
import com.netflix.iceberg.io.SeekableInputStream;
import org.apache.avro.file.SeekableInput;
import java.io.IOException;
import java.io.InputStream;
class AvroIO {
private AvroIO() {
}
private static final Class<?> fsDataInputStreamClass = DynClasses.builder()
.impl("org.apache.hadoop.fs.FSDataInputStream")
.orNull()
.build();
private static final boolean relocated =
"org.apache.avro.file.SeekableInput".equals(SeekableInput.class.getName());
private static final DynConstructors.Ctor<SeekableInput> avroFsInputCtor =
!relocated && fsDataInputStreamClass != null ?
DynConstructors.builder(SeekableInput.class)
.impl("org.apache.hadoop.fs.AvroFSInput", fsDataInputStreamClass, Long.TYPE)
.build() :
null;
static SeekableInput stream(SeekableInputStream stream, long length) {
if (stream instanceof DelegatingInputStream) {
InputStream wrapped = ((DelegatingInputStream) stream).getDelegate();
if (avroFsInputCtor != null && fsDataInputStreamClass != null &&
fsDataInputStreamClass.isInstance(wrapped)) {
return avroFsInputCtor.newInstance(wrapped, length);
}
}
return new AvroInputStreamAdapter(stream, length);
}
private static class AvroInputStreamAdapter extends SeekableInputStream implements SeekableInput {
private final SeekableInputStream stream;
private final long length;
public AvroInputStreamAdapter(SeekableInputStream stream, long length) {
this.stream = stream;
this.length = length;
}
@Override
public void close() throws IOException {
stream.close();
}
@Override
public long getPos() throws IOException {
return stream.getPos();
}
@Override
public void seek(long newPos) throws IOException {
stream.seek(newPos);
}
@Override
public long tell() throws IOException {
return getPos();
}
@Override
public long length() throws IOException {
return length;
}
@Override
public int read() throws IOException {
return stream.read();
}
@Override
public int read(byte[] b) throws IOException {
return stream.read(b);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return stream.read(b, off, len);
}
@Override
public long skip(long n) throws IOException {
return stream.skip(n);
}
@Override
public int available() throws IOException {
return stream.available();
}
@Override
public synchronized void mark(int readlimit) {
stream.mark(readlimit);
}
@Override
public synchronized void reset() throws IOException {
stream.reset();
}
@Override
public boolean markSupported() {
return stream.markSupported();
}
}
}
| 1,988 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/ValueWriter.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import org.apache.avro.io.Encoder;
import java.io.IOException;
public interface ValueWriter<D> {
void write(D datum, Encoder encoder) throws IOException;
}
| 1,989 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/AvroIterable.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Maps;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.CloseableGroup;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.io.InputFile;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.FileReader;
import org.apache.avro.io.DatumReader;
import java.io.Closeable;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
public class AvroIterable<D> extends CloseableGroup implements CloseableIterable<D> {
private final InputFile file;
private final DatumReader<D> reader;
private final Long start;
private final Long end;
private final boolean reuseContainers;
private Map<String, String> metadata = null;
AvroIterable(InputFile file, DatumReader<D> reader,
Long start, Long length, boolean reuseContainers) {
this.file = file;
this.reader = reader;
this.start = start;
this.end = start != null ? start + length : null;
this.reuseContainers = reuseContainers;
}
private DataFileReader<D> initMetadata(DataFileReader<D> reader) {
if (metadata == null) {
this.metadata = Maps.newHashMap();
for (String key : reader.getMetaKeys()) {
metadata.put(key, reader.getMetaString(key));
}
}
return reader;
}
public Map<String, String> getMetadata() {
if (metadata == null) {
try (DataFileReader<D> reader = newFileReader()) {
initMetadata(reader);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read metadata for file: %s", file);
}
}
return metadata;
}
@Override
public Iterator<D> iterator() {
FileReader<D> reader = initMetadata(newFileReader());
if (start != null) {
reader = new AvroRangeIterator<>(reader, start, end);
}
if (reuseContainers) {
return new AvroReuseIterator<>(reader);
}
addCloseable(reader);
return reader;
}
private DataFileReader<D> newFileReader() {
try {
return (DataFileReader<D>) DataFileReader.openReader(
AvroIO.stream(file.newStream(), file.getLength()), reader);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to open file: %s", file);
}
}
private static class AvroRangeIterator<D> implements FileReader<D> {
private final FileReader<D> reader;
private final long end;
AvroRangeIterator(FileReader<D> reader, long start, long end) {
this.reader = reader;
this.end = end;
try {
reader.sync(start);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to find sync past position %d", start);
}
}
@Override
public Schema getSchema() {
return reader.getSchema();
}
@Override
public boolean hasNext() {
try {
return (reader.hasNext() && !reader.pastSync(end));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to check range end: %d", end);
}
}
@Override
public D next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
return reader.next();
}
@Override
public D next(D reuse) {
if (!hasNext()) {
throw new NoSuchElementException();
}
try {
return reader.next(reuse);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read next record");
}
}
@Override
public void sync(long position) throws IOException {
reader.sync(position);
}
@Override
public boolean pastSync(long position) throws IOException {
return reader.pastSync(position);
}
@Override
public long tell() throws IOException {
return reader.tell();
}
@Override
public void close() throws IOException {
reader.close();
}
@Override
public Iterator<D> iterator() {
return this;
}
}
private static class AvroReuseIterator<D> implements Iterator<D>, Closeable {
private final FileReader<D> reader;
private D reused = null;
AvroReuseIterator(FileReader<D> reader) {
this.reader = reader;
}
@Override
public boolean hasNext() {
return reader.hasNext();
}
@Override
public D next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
try {
this.reused = reader.next(reused);
return reused;
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read next record");
}
}
@Override
public void close() throws IOException {
reader.close();
}
}
}
| 1,990 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/AvroSchemaVisitor.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.avro.Schema;
import java.util.LinkedList;
import java.util.List;
public abstract class AvroSchemaVisitor<T> {
public static <T> T visit(Schema schema, AvroSchemaVisitor<T> visitor) {
switch (schema.getType()) {
case RECORD:
// check to make sure this hasn't been visited before
String name = schema.getFullName();
Preconditions.checkState(!visitor.recordLevels.contains(name),
"Cannot process recursive Avro record %s", name);
visitor.recordLevels.push(name);
List<Schema.Field> fields = schema.getFields();
List<String> names = Lists.newArrayListWithExpectedSize(fields.size());
List<T> results = Lists.newArrayListWithExpectedSize(fields.size());
for (Schema.Field field : schema.getFields()) {
names.add(field.name());
results.add(visit(field.schema(), visitor));
}
visitor.recordLevels.pop();
return visitor.record(schema, names, results);
case UNION:
List<Schema> types = schema.getTypes();
List<T> options = Lists.newArrayListWithExpectedSize(types.size());
for (Schema type : types) {
options.add(visit(type, visitor));
}
return visitor.union(schema, options);
case ARRAY:
return visitor.array(schema, visit(schema.getElementType(), visitor));
case MAP:
return visitor.map(schema, visit(schema.getValueType(), visitor));
default:
return visitor.primitive(schema);
}
}
protected LinkedList<String> recordLevels = Lists.newLinkedList();
public T record(Schema record, List<String> names, List<T> fields) {
return null;
}
public T union(Schema union, List<T> options) {
return null;
}
public T array(Schema array, T element) {
return null;
}
public T map(Schema map, T value) {
return null;
}
public T primitive(Schema primitive) {
return null;
}
}
| 1,991 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/ValueReader.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import org.apache.avro.io.Decoder;
import java.io.IOException;
public interface ValueReader<T> {
T read(Decoder decoder, Object reuse) throws IOException;
}
| 1,992 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/PruneColumns.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.avro.Schema;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static com.netflix.iceberg.avro.AvroSchemaUtil.getElementId;
import static com.netflix.iceberg.avro.AvroSchemaUtil.getFieldId;
import static com.netflix.iceberg.avro.AvroSchemaUtil.getKeyId;
import static com.netflix.iceberg.avro.AvroSchemaUtil.getValueId;
class PruneColumns extends AvroSchemaVisitor<Schema> {
private final Set<Integer> selectedIds;
PruneColumns(Set<Integer> selectedIds) {
this.selectedIds = selectedIds;
}
public Schema rootSchema(Schema record) {
Schema result = visit(record, this);
if (result != null) {
return result;
}
return copyRecord(record, ImmutableList.of());
}
@Override
public Schema record(Schema record, List<String> names, List<Schema> fields) {
// Then this should access the record's fields by name
List<Schema.Field> filteredFields = Lists.newArrayListWithExpectedSize(fields.size());
boolean hasChange = false;
for (Schema.Field field : record.getFields()) {
int fieldId = getFieldId(field);
Schema fieldSchema = fields.get(field.pos());
// All primitives are selected by selecting the field, but map and list
// types can be selected by projecting the keys, values, or elements.
// This creates two conditions where the field should be selected: if the
// id is selected or if the result of the field is non-null. The only
// case where the converted field is non-null is when a map or list is
// selected by lower IDs.
if (selectedIds.contains(fieldId)) {
filteredFields.add(copyField(field, field.schema()));
} else if (fieldSchema != null) {
hasChange = true;
filteredFields.add(copyField(field, fieldSchema));
}
}
if (hasChange) {
return copyRecord(record, filteredFields);
} else if (filteredFields.size() == record.getFields().size()) {
return record;
} else if (!filteredFields.isEmpty()) {
return copyRecord(record, filteredFields);
}
return null;
}
@Override
public Schema union(Schema union, List<Schema> options) {
Preconditions.checkState(AvroSchemaUtil.isOptionSchema(union),
"Invalid schema: non-option unions are not supported: {}", union);
// only unions with null are allowed, and a null schema results in null
Schema pruned = null;
if (options.get(0) != null) {
pruned = options.get(0);
} else if (options.get(1) != null) {
pruned = options.get(1);
}
if (pruned != null) {
if (pruned != AvroSchemaUtil.fromOption(union)) {
return AvroSchemaUtil.toOption(pruned);
}
return union;
}
return null;
}
@Override
public Schema array(Schema array, Schema element) {
if (array.getLogicalType() instanceof LogicalMap) {
Schema keyValue = array.getElementType();
int keyId = getFieldId(keyValue.getField("key"));
int valueId = getFieldId(keyValue.getField("value"));
// if either key or value is selected, the whole map must be projected
if (selectedIds.contains(keyId) || selectedIds.contains(valueId)) {
return array;
} else if (element != null) {
if (keyValue.getField("value").schema() != element.getField("value").schema()) {
// the value must be a projection
return AvroSchemaUtil.createMap(
keyId, keyValue.getField("key").schema(),
valueId, element.getField("value").schema());
} else {
return array;
}
}
} else {
int elementId = getElementId(array);
if (selectedIds.contains(elementId)) {
return array;
} else if (element != null) {
if (element != array.getElementType()) {
// the element must be a projection
return Schema.createArray(element);
}
return array;
}
}
return null;
}
@Override
public Schema map(Schema map, Schema value) {
int keyId = getKeyId(map);
int valueId = getValueId(map);
// if either key or value is selected, the whole map must be projected
if (selectedIds.contains(keyId) || selectedIds.contains(valueId)) {
return map;
} else if (value != null) {
if (value != map.getValueType()) {
// the value must be a projection
return Schema.createMap(value);
}
return map;
}
return null;
}
@Override
public Schema primitive(Schema primitive) {
// primitives are not selected directly
return null;
}
private static Schema copyRecord(Schema record, List<Schema.Field> newFields) {
Schema copy = Schema.createRecord(record.getName(),
record.getDoc(), record.getNamespace(), record.isError(), newFields);
for (Map.Entry<String, Object> prop : record.getObjectProps().entrySet()) {
copy.addProp(prop.getKey(), prop.getValue());
}
return copy;
}
private static Schema.Field copyField(Schema.Field field, Schema newSchema) {
Schema.Field copy = new Schema.Field(field.name(),
newSchema, field.doc(), field.defaultVal(), field.order());
for (Map.Entry<String, Object> prop : field.getObjectProps().entrySet()) {
copy.addProp(prop.getKey(), prop.getValue());
}
return copy;
}
}
| 1,993 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/AvroSchemaUtil.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import org.apache.avro.JsonProperties;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static org.apache.avro.Schema.Type.ARRAY;
import static org.apache.avro.Schema.Type.MAP;
import static org.apache.avro.Schema.Type.RECORD;
import static org.apache.avro.Schema.Type.UNION;
public class AvroSchemaUtil {
public static final String FIELD_ID_PROP = "field-id";
public static final String KEY_ID_PROP = "key-id";
public static final String VALUE_ID_PROP = "value-id";
public static final String ELEMENT_ID_PROP = "element-id";
public static final String ADJUST_TO_UTC_PROP = "adjust-to-utc";
private static final Schema NULL = Schema.create(Schema.Type.NULL);
public static Schema convert(com.netflix.iceberg.Schema schema,
String tableName) {
return convert(schema, ImmutableMap.of(schema.asStruct(), tableName));
}
public static Schema convert(com.netflix.iceberg.Schema schema,
Map<Types.StructType, String> names) {
return TypeUtil.visit(schema, new TypeToSchema(names));
}
public static Schema convert(Type type) {
return convert(type, ImmutableMap.of());
}
public static Schema convert(Types.StructType type, String name) {
return convert(type, ImmutableMap.of(type, name));
}
public static Schema convert(Type type, Map<Types.StructType, String> names) {
return TypeUtil.visit(type, new TypeToSchema(names));
}
public static Type convert(Schema schema) {
return AvroSchemaVisitor.visit(schema, new SchemaToType(schema));
}
public static Map<Type, Schema> convertTypes(Types.StructType type, String name) {
TypeToSchema converter = new TypeToSchema(ImmutableMap.of(type, name));
TypeUtil.visit(type, converter);
return ImmutableMap.copyOf(converter.getConversionMap());
}
public static Schema pruneColumns(Schema schema, Set<Integer> selectedIds) {
return new PruneColumns(selectedIds).rootSchema(schema);
}
public static Schema buildAvroProjection(Schema schema, com.netflix.iceberg.Schema expected,
Map<String, String> renames) {
return AvroCustomOrderSchemaVisitor.visit(schema, new BuildAvroProjection(expected, renames));
}
public static boolean isTimestamptz(Schema schema) {
LogicalType logicalType = schema.getLogicalType();
if (logicalType != null && logicalType instanceof LogicalTypes.TimestampMicros) {
// timestamptz is adjusted to UTC
Object value = schema.getObjectProp(ADJUST_TO_UTC_PROP);
if (value instanceof Boolean) {
return (Boolean) value;
} else if (value instanceof String) {
return Boolean.parseBoolean((String) value);
}
}
return false;
}
static boolean isOptionSchema(Schema schema) {
if (schema.getType() == UNION && schema.getTypes().size() == 2) {
if (schema.getTypes().get(0).getType() == Schema.Type.NULL) {
return true;
} else if (schema.getTypes().get(1).getType() == Schema.Type.NULL) {
return true;
}
}
return false;
}
static Schema toOption(Schema schema) {
if (schema.getType() == UNION) {
Preconditions.checkArgument(isOptionSchema(schema),
"Union schemas are not supported: " + schema);
return schema;
} else {
return Schema.createUnion(NULL, schema);
}
}
static Schema fromOption(Schema schema) {
Preconditions.checkArgument(schema.getType() == UNION,
"Expected union schema but was passed: {}", schema);
Preconditions.checkArgument(schema.getTypes().size() == 2,
"Expected optional schema, but was passed: {}", schema);
if (schema.getTypes().get(0).getType() == Schema.Type.NULL) {
return schema.getTypes().get(1);
} else {
return schema.getTypes().get(0);
}
}
static Schema fromOptions(List<Schema> options) {
Preconditions.checkArgument(options.size() == 2,
"Expected two schemas, but was passed: {} options", options.size());
if (options.get(0).getType() == Schema.Type.NULL) {
return options.get(1);
} else {
return options.get(0);
}
}
static boolean isKeyValueSchema(Schema schema) {
return (schema.getType() == RECORD && schema.getFields().size() == 2);
}
static Schema createMap(int keyId, Schema keySchema,
int valueId, Schema valueSchema) {
String keyValueName = "k" + keyId + "_v" + valueId;
Schema.Field keyField = new Schema.Field("key", keySchema, null, null);
keyField.addProp(FIELD_ID_PROP, keyId);
Schema.Field valueField = new Schema.Field("value", valueSchema, null,
isOptionSchema(valueSchema) ? JsonProperties.NULL_VALUE: null);
valueField.addProp(FIELD_ID_PROP, valueId);
return LogicalMap.get().addToSchema(Schema.createArray(Schema.createRecord(
keyValueName, null, null, false, ImmutableList.of(keyField, valueField))));
}
static Schema createProjectionMap(String recordName,
int keyId, String keyName, Schema keySchema,
int valueId, String valueName, Schema valueSchema) {
String keyValueName = "k" + keyId + "_v" + valueId;
Schema.Field keyField = new Schema.Field("key", keySchema, null, null);
if (!"key".equals(keyName)) {
keyField.addAlias(keyName);
}
keyField.addProp(FIELD_ID_PROP, keyId);
Schema.Field valueField = new Schema.Field("value", valueSchema, null,
isOptionSchema(valueSchema) ? JsonProperties.NULL_VALUE: null);
valueField.addProp(FIELD_ID_PROP, valueId);
if (!"value".equals(valueName)) {
valueField.addAlias(valueName);
}
Schema keyValueRecord = Schema.createRecord(
keyValueName, null, null, false, ImmutableList.of(keyField, valueField));
if (!keyValueName.equals(recordName)) {
keyValueRecord.addAlias(recordName);
}
return LogicalMap.get().addToSchema(Schema.createArray(keyValueRecord));
}
private static int getId(Schema schema, String propertyName) {
if (schema.getType() == UNION) {
return getId(fromOption(schema), propertyName);
}
Object id = schema.getObjectProp(propertyName);
Preconditions.checkNotNull(id, "Missing expected '%s' property", propertyName);
return toInt(id);
}
public static int getKeyId(Schema schema) {
Preconditions.checkArgument(schema.getType() == MAP,
"Cannot get map key id for non-map schema: " + schema);
return getId(schema, KEY_ID_PROP);
}
public static int getValueId(Schema schema) {
Preconditions.checkArgument(schema.getType() == MAP,
"Cannot get map value id for non-map schema: " + schema);
return getId(schema, VALUE_ID_PROP);
}
public static int getElementId(Schema schema) {
Preconditions.checkArgument(schema.getType() == ARRAY,
"Cannot get array element id for non-array schema: " + schema);
return getId(schema, ELEMENT_ID_PROP);
}
public static int getFieldId(Schema.Field field) {
Object id = field.getObjectProp(FIELD_ID_PROP);
Preconditions.checkNotNull(id, "Missing expected '%s' property", FIELD_ID_PROP);
return toInt(id);
}
private static int toInt(Object value) {
if (value instanceof Number) {
return ((Number) value).intValue();
} else if (value instanceof String) {
return Integer.parseInt((String) value);
}
throw new UnsupportedOperationException("Cannot coerce value to int: " + value);
}
static Schema copyRecord(Schema record, List<Schema.Field> newFields, String newName) {
Schema copy;
if (newName != null) {
copy = Schema.createRecord(newName, record.getDoc(), null, record.isError(), newFields);
// the namespace is defaulted to the record's namespace if it is null, which causes renames
// without the namespace to fail. using "" instead of null changes this behavior to match the
// original schema.
copy.addAlias(record.getName(), record.getNamespace() == null ? "" : record.getNamespace());
} else {
copy = Schema.createRecord(record.getName(),
record.getDoc(), record.getNamespace(), record.isError(), newFields);
}
for (Map.Entry<String, Object> prop : record.getObjectProps().entrySet()) {
copy.addProp(prop.getKey(), prop.getValue());
}
return copy;
}
static Schema.Field copyField(Schema.Field field, Schema newSchema, String newName) {
Schema.Field copy = new Schema.Field(newName,
newSchema, field.doc(), field.defaultVal(), field.order());
for (Map.Entry<String, Object> prop : field.getObjectProps().entrySet()) {
copy.addProp(prop.getKey(), prop.getValue());
}
if (!newName.equals(field.name())) {
copy.addAlias(field.name());
}
return copy;
}
}
| 1,994 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/TypeToSchema.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import java.util.List;
import java.util.Map;
import static com.netflix.iceberg.avro.AvroSchemaUtil.toOption;
import static org.apache.avro.JsonProperties.NULL_VALUE;
class TypeToSchema extends TypeUtil.SchemaVisitor<Schema> {
private static final Schema BOOLEAN_SCHEMA = Schema.create(Schema.Type.BOOLEAN);
private static final Schema INTEGER_SCHEMA = Schema.create(Schema.Type.INT);
private static final Schema LONG_SCHEMA = Schema.create(Schema.Type.LONG);
private static final Schema FLOAT_SCHEMA = Schema.create(Schema.Type.FLOAT);
private static final Schema DOUBLE_SCHEMA = Schema.create(Schema.Type.DOUBLE);
private static final Schema DATE_SCHEMA = LogicalTypes.date()
.addToSchema(Schema.create(Schema.Type.INT));
private static final Schema TIME_SCHEMA = LogicalTypes.timeMicros()
.addToSchema(Schema.create(Schema.Type.LONG));
private static final Schema TIMESTAMP_SCHEMA = LogicalTypes.timestampMicros()
.addToSchema(Schema.create(Schema.Type.LONG));
private static final Schema TIMESTAMPTZ_SCHEMA = LogicalTypes.timestampMicros()
.addToSchema(Schema.create(Schema.Type.LONG));
private static final Schema STRING_SCHEMA = Schema.create(Schema.Type.STRING);
private static final Schema UUID_SCHEMA = LogicalTypes.uuid()
.addToSchema(Schema.createFixed("uuid_fixed", null, null, 16));
private static final Schema BINARY_SCHEMA = Schema.create(Schema.Type.BYTES);
static {
TIMESTAMP_SCHEMA.addProp(AvroSchemaUtil.ADJUST_TO_UTC_PROP, false);
TIMESTAMPTZ_SCHEMA.addProp(AvroSchemaUtil.ADJUST_TO_UTC_PROP, true);
}
private final Map<Type, Schema> results = Maps.newHashMap();
private final Map<Types.StructType, String> names;
TypeToSchema(Map<Types.StructType, String> names) {
this.names = names;
}
Map<Type, Schema> getConversionMap() {
return results;
}
@Override
public Schema schema(com.netflix.iceberg.Schema schema, Schema structSchema) {
return structSchema;
}
@Override
public Schema struct(Types.StructType struct, List<Schema> fieldSchemas) {
Schema recordSchema = results.get(struct);
if (recordSchema != null) {
return recordSchema;
}
String recordName = names.get(struct);
if (recordName == null) {
recordName = "r" + fieldIds.peek();
}
List<Types.NestedField> structFields = struct.fields();
List<Schema.Field> fields = Lists.newArrayListWithExpectedSize(fieldSchemas.size());
for (int i = 0; i < structFields.size(); i += 1) {
Types.NestedField structField = structFields.get(i);
Schema.Field field = new Schema.Field(
structField.name(), fieldSchemas.get(i), null,
structField.isOptional() ? NULL_VALUE : null);
field.addProp(AvroSchemaUtil.FIELD_ID_PROP, structField.fieldId());
fields.add(field);
}
recordSchema = Schema.createRecord(recordName, null, null, false, fields);
results.put(struct, recordSchema);
return recordSchema;
}
@Override
public Schema field(Types.NestedField field, Schema fieldSchema) {
if (field.isOptional()) {
return toOption(fieldSchema);
} else {
return fieldSchema;
}
}
@Override
public Schema list(Types.ListType list, Schema elementSchema) {
Schema listSchema = results.get(list);
if (listSchema != null) {
return listSchema;
}
if (list.isElementOptional()) {
listSchema = Schema.createArray(toOption(elementSchema));
} else {
listSchema = Schema.createArray(elementSchema);
}
listSchema.addProp(AvroSchemaUtil.ELEMENT_ID_PROP, list.elementId());
results.put(list, listSchema);
return listSchema;
}
@Override
public Schema map(Types.MapType map, Schema keySchema, Schema valueSchema) {
Schema mapSchema = results.get(map);
if (mapSchema != null) {
return mapSchema;
}
if (keySchema.getType() == Schema.Type.STRING) {
// if the map has string keys, use Avro's map type
mapSchema = Schema.createMap(
map.isValueOptional() ? toOption(valueSchema) : valueSchema);
mapSchema.addProp(AvroSchemaUtil.KEY_ID_PROP, map.keyId());
mapSchema.addProp(AvroSchemaUtil.VALUE_ID_PROP, map.valueId());
} else {
mapSchema = AvroSchemaUtil.createMap(map.keyId(), keySchema,
map.valueId(), map.isValueOptional() ? toOption(valueSchema) : valueSchema);
}
results.put(map, mapSchema);
return mapSchema;
}
@Override
public Schema primitive(Type.PrimitiveType primitive) {
Schema primitiveSchema;
switch (primitive.typeId()) {
case BOOLEAN:
primitiveSchema = BOOLEAN_SCHEMA;
break;
case INTEGER:
primitiveSchema = INTEGER_SCHEMA;
break;
case LONG:
primitiveSchema = LONG_SCHEMA;
break;
case FLOAT:
primitiveSchema = FLOAT_SCHEMA;
break;
case DOUBLE:
primitiveSchema = DOUBLE_SCHEMA;
break;
case DATE:
primitiveSchema = DATE_SCHEMA;
break;
case TIME:
primitiveSchema = TIME_SCHEMA;
break;
case TIMESTAMP:
if (((Types.TimestampType) primitive).shouldAdjustToUTC()) {
primitiveSchema = TIMESTAMPTZ_SCHEMA;
} else {
primitiveSchema = TIMESTAMP_SCHEMA;
}
break;
case STRING:
primitiveSchema = STRING_SCHEMA;
break;
case UUID:
primitiveSchema = UUID_SCHEMA;
break;
case FIXED:
Types.FixedType fixed = (Types.FixedType) primitive;
primitiveSchema = Schema.createFixed("fixed_" + fixed.length(), null, null, fixed.length());
break;
case BINARY:
primitiveSchema = BINARY_SCHEMA;
break;
case DECIMAL:
Types.DecimalType decimal = (Types.DecimalType) primitive;
primitiveSchema = LogicalTypes.decimal(decimal.precision(), decimal.scale())
.addToSchema(Schema.createFixed(
"decimal_" + decimal.precision() + "_" + decimal.scale(),
null, null, TypeUtil.decimalRequriedBytes(decimal.precision())));
break;
default:
throw new UnsupportedOperationException(
"Unsupported type ID: " + primitive.typeId());
}
results.put(primitive, primitiveSchema);
return primitiveSchema;
}
}
| 1,995 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/GenericAvroReader.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.MapMaker;
import com.netflix.iceberg.common.DynClasses;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.ResolvingDecoder;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
class GenericAvroReader<T> implements DatumReader<T> {
private static final ThreadLocal<Map<Schema, Map<Schema, ResolvingDecoder>>> DECODER_CACHES =
ThreadLocal.withInitial(() -> new MapMaker().weakKeys().makeMap());
private final Schema readSchema;
private ClassLoader loader = Thread.currentThread().getContextClassLoader();
private Schema fileSchema = null;
private ValueReader<T> reader = null;
public GenericAvroReader(Schema readSchema) {
this.readSchema = readSchema;
}
@SuppressWarnings("unchecked")
private void initReader() {
this.reader = (ValueReader<T>) AvroSchemaVisitor.visit(readSchema, new ReadBuilder(loader));
}
@Override
public void setSchema(Schema fileSchema) {
this.fileSchema = Schema.applyAliases(fileSchema, readSchema);
initReader();
}
public void setClassLoader(ClassLoader loader) {
this.loader = loader;
}
@Override
public T read(T reuse, Decoder decoder) throws IOException {
ResolvingDecoder resolver = resolve(decoder);
T value = reader.read(resolver, reuse);
resolver.drain();
return value;
}
private ResolvingDecoder resolve(Decoder decoder) throws IOException {
Map<Schema, Map<Schema, ResolvingDecoder>> cache = DECODER_CACHES.get();
Map<Schema, ResolvingDecoder> fileSchemaToResolver = cache
.computeIfAbsent(readSchema, k -> new HashMap<>());
ResolvingDecoder resolver = fileSchemaToResolver.get(fileSchema);
if (resolver == null) {
resolver = newResolver();
fileSchemaToResolver.put(fileSchema, resolver);
}
resolver.configure(decoder);
return resolver;
}
private ResolvingDecoder newResolver() {
try {
return DecoderFactory.get().resolvingDecoder(fileSchema, readSchema, null);
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
private static class ReadBuilder extends AvroSchemaVisitor<ValueReader<?>> {
private final ClassLoader loader;
private ReadBuilder(ClassLoader loader) {
this.loader = loader;
}
@Override
@SuppressWarnings("unchecked")
public ValueReader<?> record(Schema record, List<String> names, List<ValueReader<?>> fields) {
try {
Class<?> recordClass = DynClasses.builder()
.loader(loader)
.impl(record.getFullName())
.buildChecked();
if (IndexedRecord.class.isAssignableFrom(recordClass)) {
return ValueReaders.record(fields, (Class<? extends IndexedRecord>) recordClass, record);
}
return ValueReaders.record(fields, record);
} catch (ClassNotFoundException e) {
return ValueReaders.record(fields, record);
}
}
@Override
public ValueReader<?> union(Schema union, List<ValueReader<?>> options) {
return ValueReaders.union(options);
}
@Override
public ValueReader<?> array(Schema array, ValueReader<?> elementReader) {
if (array.getLogicalType() instanceof LogicalMap) {
ValueReaders.StructReader<?> keyValueReader = (ValueReaders.StructReader) elementReader;
ValueReader<?> keyReader = keyValueReader.reader(0);
ValueReader<?> valueReader = keyValueReader.reader(1);
if (keyReader == ValueReaders.utf8s()) {
return ValueReaders.arrayMap(ValueReaders.strings(), valueReader);
}
return ValueReaders.arrayMap(keyReader, valueReader);
}
return ValueReaders.array(elementReader);
}
@Override
public ValueReader<?> map(Schema map, ValueReader<?> valueReader) {
return ValueReaders.map(ValueReaders.strings(), valueReader);
}
@Override
public ValueReader<?> primitive(Schema primitive) {
LogicalType logicalType = primitive.getLogicalType();
if (logicalType != null) {
switch (logicalType.getName()) {
case "date":
// Spark uses the same representation
return ValueReaders.ints();
case "timestamp-millis":
// adjust to microseconds
ValueReader<Long> longs = ValueReaders.longs();
return (ValueReader<Long>) (decoder, ignored) -> longs.read(decoder, null) * 1000L;
case "timestamp-micros":
// Spark uses the same representation
return ValueReaders.longs();
case "decimal":
ValueReader<byte[]> inner;
switch (primitive.getType()) {
case FIXED:
inner = ValueReaders.fixed(primitive.getFixedSize());
break;
case BYTES:
inner = ValueReaders.bytes();
break;
default:
throw new IllegalArgumentException(
"Invalid primitive type for decimal: " + primitive.getType());
}
LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType;
return ValueReaders.decimal(inner, decimal.getScale());
case "uuid":
return ValueReaders.uuids();
default:
throw new IllegalArgumentException("Unknown logical type: " + logicalType);
}
}
switch (primitive.getType()) {
case NULL:
return ValueReaders.nulls();
case BOOLEAN:
return ValueReaders.booleans();
case INT:
return ValueReaders.ints();
case LONG:
return ValueReaders.longs();
case FLOAT:
return ValueReaders.floats();
case DOUBLE:
return ValueReaders.doubles();
case STRING:
return ValueReaders.utf8s();
case FIXED:
return ValueReaders.fixed(primitive);
case BYTES:
return ValueReaders.byteBuffers();
default:
throw new IllegalArgumentException("Unsupported type: " + primitive);
}
}
}
}
| 1,996 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/GenericAvroWriter.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.Encoder;
import java.io.IOException;
import java.util.List;
import static com.netflix.iceberg.avro.AvroSchemaVisitor.visit;
class GenericAvroWriter<T> implements DatumWriter<T> {
private ValueWriter<T> writer = null;
public GenericAvroWriter(Schema schema) {
setSchema(schema);
}
@Override
@SuppressWarnings("unchecked")
public void setSchema(Schema schema) {
this.writer = (ValueWriter<T>) visit(schema, new WriteBuilder());
}
@Override
public void write(T datum, Encoder out) throws IOException {
writer.write(datum, out);
}
private static class WriteBuilder extends AvroSchemaVisitor<ValueWriter<?>> {
private WriteBuilder() {
}
@Override
public ValueWriter<?> record(Schema record, List<String> names, List<ValueWriter<?>> fields) {
return ValueWriters.record(fields);
}
@Override
public ValueWriter<?> union(Schema union, List<ValueWriter<?>> options) {
Preconditions.checkArgument(options.contains(ValueWriters.nulls()),
"Cannot create writer for non-option union: " + union);
Preconditions.checkArgument(options.size() == 2,
"Cannot create writer for non-option union: " + union);
if (union.getTypes().get(0).getType() == Schema.Type.NULL) {
return ValueWriters.option(0, options.get(1));
} else {
return ValueWriters.option(1, options.get(0));
}
}
@Override
public ValueWriter<?> array(Schema array, ValueWriter<?> elementWriter) {
if (array.getLogicalType() instanceof LogicalMap) {
ValueWriters.StructWriter<?> keyValueWriter = (ValueWriters.StructWriter<?>) elementWriter;
return ValueWriters.arrayMap(keyValueWriter.writer(0), keyValueWriter.writer(1));
}
return ValueWriters.array(elementWriter);
}
@Override
public ValueWriter<?> map(Schema map, ValueWriter<?> valueWriter) {
return ValueWriters.map(ValueWriters.strings(), valueWriter);
}
@Override
public ValueWriter<?> primitive(Schema primitive) {
LogicalType logicalType = primitive.getLogicalType();
if (logicalType != null) {
switch (logicalType.getName()) {
case "date":
return ValueWriters.ints();
case "timestamp-micros":
return ValueWriters.longs();
case "decimal":
LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType;
return ValueWriters.decimal(decimal.getPrecision(), decimal.getScale());
case "uuid":
return ValueWriters.uuids();
default:
throw new IllegalArgumentException("Unsupported logical type: " + logicalType);
}
}
switch (primitive.getType()) {
case NULL:
return ValueWriters.nulls();
case BOOLEAN:
return ValueWriters.booleans();
case INT:
return ValueWriters.ints();
case LONG:
return ValueWriters.longs();
case FLOAT:
return ValueWriters.floats();
case DOUBLE:
return ValueWriters.doubles();
case STRING:
return ValueWriters.strings();
case FIXED:
return ValueWriters.genericFixed(primitive.getFixedSize());
case BYTES:
return ValueWriters.byteBuffers();
default:
throw new IllegalArgumentException("Unsupported type: " + primitive);
}
}
}
}
| 1,997 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/SchemaToType.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import java.util.List;
class SchemaToType extends AvroSchemaVisitor<Type> {
private final Schema root;
SchemaToType(Schema root) {
this.root = root;
if (root.getType() == Schema.Type.RECORD) {
this.nextId = root.getFields().size();
}
}
private int nextId = 1;
private int getElementId(Schema schema) {
if (schema.getObjectProp(AvroSchemaUtil.ELEMENT_ID_PROP) != null) {
return AvroSchemaUtil.getElementId(schema);
} else {
return allocateId();
}
}
private int getKeyId(Schema schema) {
if (schema.getObjectProp(AvroSchemaUtil.KEY_ID_PROP) != null) {
return AvroSchemaUtil.getKeyId(schema);
} else {
return allocateId();
}
}
private int getValueId(Schema schema) {
if (schema.getObjectProp(AvroSchemaUtil.VALUE_ID_PROP) != null) {
return AvroSchemaUtil.getValueId(schema);
} else {
return allocateId();
}
}
private int getId(Schema.Field field) {
if (field.getObjectProp(AvroSchemaUtil.FIELD_ID_PROP) != null) {
return AvroSchemaUtil.getFieldId(field);
} else {
return allocateId();
}
}
private int allocateId() {
int current = nextId;
nextId += 1;
return current;
}
@Override
public Type record(Schema record, List<String> names, List<Type> fieldTypes) {
List<Schema.Field> fields = record.getFields();
List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(fields.size());
if (root == record) {
this.nextId = 0;
}
for (int i = 0; i < fields.size(); i += 1) {
Schema.Field field = fields.get(i);
Type fieldType = fieldTypes.get(i);
int fieldId = getId(field);
if (AvroSchemaUtil.isOptionSchema(field.schema())) {
newFields.add(Types.NestedField.optional(fieldId, field.name(), fieldType));
} else {
newFields.add(Types.NestedField.required(fieldId, field.name(), fieldType));
}
}
return Types.StructType.of(newFields);
}
@Override
public Type union(Schema union, List<Type> options) {
Preconditions.checkArgument(AvroSchemaUtil.isOptionSchema(union),
"Unsupported type: non-option union: {}", union);
// records, arrays, and maps will check nullability later
if (options.get(0) == null) {
return options.get(1);
} else {
return options.get(0);
}
}
@Override
public Type array(Schema array, Type elementType) {
if (array.getLogicalType() instanceof LogicalMap) {
// map stored as an array
Schema keyValueSchema = array.getElementType();
Preconditions.checkArgument(AvroSchemaUtil.isKeyValueSchema(keyValueSchema),
"Invalid key-value pair schema: {}", keyValueSchema);
Types.StructType keyValueType = elementType.asStructType();
Types.NestedField keyField = keyValueType.field("key");
Types.NestedField valueField = keyValueType.field("value");
if (keyValueType.field("value").isOptional()) {
return Types.MapType.ofOptional(
keyField.fieldId(), valueField.fieldId(), keyField.type(), valueField.type());
} else {
return Types.MapType.ofRequired(
keyField.fieldId(), valueField.fieldId(), keyField.type(), valueField.type());
}
} else {
// normal array
Schema elementSchema = array.getElementType();
int id = getElementId(array);
if (AvroSchemaUtil.isOptionSchema(elementSchema)) {
return Types.ListType.ofOptional(id, elementType);
} else {
return Types.ListType.ofRequired(id, elementType);
}
}
}
@Override
public Type map(Schema map, Type valueType) {
Schema valueSchema = map.getValueType();
int keyId = getKeyId(map);
int valueId = getValueId(map);
if (AvroSchemaUtil.isOptionSchema(valueSchema)) {
return Types.MapType.ofOptional(keyId, valueId, Types.StringType.get(), valueType);
} else {
return Types.MapType.ofRequired(keyId, valueId, Types.StringType.get(), valueType);
}
}
@Override
public Type primitive(Schema primitive) {
// first check supported logical types
LogicalType logical = primitive.getLogicalType();
if (logical != null) {
String name = logical.getName();
if (logical instanceof LogicalTypes.Decimal) {
return Types.DecimalType.of(
((LogicalTypes.Decimal) logical).getPrecision(),
((LogicalTypes.Decimal) logical).getScale());
} else if (logical instanceof LogicalTypes.Date) {
return Types.DateType.get();
} else if (
logical instanceof LogicalTypes.TimeMillis ||
logical instanceof LogicalTypes.TimeMicros) {
return Types.TimeType.get();
} else if (
logical instanceof LogicalTypes.TimestampMillis ||
logical instanceof LogicalTypes.TimestampMicros) {
Object adjustToUTC = primitive.getObjectProp(AvroSchemaUtil.ADJUST_TO_UTC_PROP);
Preconditions.checkArgument(adjustToUTC instanceof Boolean,
"Invalid value for adjust-to-utc: %s", adjustToUTC);
if ((Boolean) adjustToUTC) {
return Types.TimestampType.withZone();
} else {
return Types.TimestampType.withoutZone();
}
} else if (LogicalTypes.uuid().getName().equals(name)) {
return Types.UUIDType.get();
}
}
switch (primitive.getType()) {
case BOOLEAN:
return Types.BooleanType.get();
case INT:
return Types.IntegerType.get();
case LONG:
return Types.LongType.get();
case FLOAT:
return Types.FloatType.get();
case DOUBLE:
return Types.DoubleType.get();
case STRING:
case ENUM:
return Types.StringType.get();
case FIXED:
return Types.FixedType.ofLength(primitive.getFixedSize());
case BYTES:
return Types.BinaryType.get();
case NULL:
return null;
}
throw new UnsupportedOperationException(
"Unsupported primitive type: " + primitive);
}
}
| 1,998 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/AvroCustomOrderSchemaVisitor.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.avro.Schema;
import java.util.LinkedList;
import java.util.List;
import java.util.function.Supplier;
abstract class AvroCustomOrderSchemaVisitor<T, F> {
public static <T, F> T visit(Schema schema, AvroCustomOrderSchemaVisitor<T, F> visitor) {
switch (schema.getType()) {
case RECORD:
// check to make sure this hasn't been visited before
String name = schema.getFullName();
Preconditions.checkState(!visitor.recordLevels.contains(name),
"Cannot process recursive Avro record %s", name);
visitor.recordLevels.push(name);
List<Schema.Field> fields = schema.getFields();
List<String> names = Lists.newArrayListWithExpectedSize(fields.size());
List<Supplier<F>> results = Lists.newArrayListWithExpectedSize(fields.size());
for (Schema.Field field : schema.getFields()) {
names.add(field.name());
results.add(new VisitFieldFuture<>(field, visitor));
}
visitor.recordLevels.pop();
return visitor.record(schema, names, Iterables.transform(results, Supplier::get));
case UNION:
List<Schema> types = schema.getTypes();
List<Supplier<T>> options = Lists.newArrayListWithExpectedSize(types.size());
for (Schema type : types) {
options.add(new VisitFuture<>(type, visitor));
}
return visitor.union(schema, Iterables.transform(options, Supplier::get));
case ARRAY:
return visitor.array(schema, new VisitFuture<>(schema.getElementType(), visitor));
case MAP:
return visitor.map(schema, new VisitFuture<>(schema.getValueType(), visitor));
default:
return visitor.primitive(schema);
}
}
protected LinkedList<String> recordLevels = Lists.newLinkedList();
public T record(Schema record, List<String> names, Iterable<F> fields) {
return null;
}
public F field(Schema.Field field, Supplier<T> fieldResult) {
return null;
}
public T union(Schema union, Iterable<T> options) {
return null;
}
public T array(Schema array, Supplier<T> element) {
return null;
}
public T map(Schema map, Supplier<T> value) {
return null;
}
public T primitive(Schema primitive) {
return null;
}
private static class VisitFuture<T, F> implements Supplier<T> {
private final Schema schema;
private final AvroCustomOrderSchemaVisitor<T, F> visitor;
private VisitFuture(Schema schema, AvroCustomOrderSchemaVisitor<T, F> visitor) {
this.schema = schema;
this.visitor = visitor;
}
@Override
public T get() {
return visit(schema, visitor);
}
}
private static class VisitFieldFuture<T, F> implements Supplier<F> {
private final Schema.Field field;
private final AvroCustomOrderSchemaVisitor<T, F> visitor;
private VisitFieldFuture(Schema.Field field, AvroCustomOrderSchemaVisitor<T, F> visitor) {
this.field = field;
this.visitor = visitor;
}
@Override
public F get() {
return visitor.field(field, new VisitFuture<>(field.schema(), visitor));
}
}
}
| 1,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.