index int64 0 0 | repo_id stringlengths 9 205 | file_path stringlengths 31 246 | content stringlengths 1 12.2M | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/Avro.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.netflix.iceberg.SchemaParser;
import com.netflix.iceberg.io.FileAppender;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import org.apache.avro.Conversions;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.generic.GenericData;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.specific.SpecificData;
import java.io.IOException;
import java.util.Locale;
import java.util.Map;
import java.util.function.Function;
import static com.netflix.iceberg.TableProperties.AVRO_COMPRESSION;
import static com.netflix.iceberg.TableProperties.AVRO_COMPRESSION_DEFAULT;
public class Avro {
private Avro() {
}
private enum CodecName {
UNCOMPRESSED(CodecFactory.nullCodec()),
SNAPPY(CodecFactory.snappyCodec()),
GZIP(CodecFactory.deflateCodec(9)),
LZ4(null),
BROTLI(null),
ZSTD(null);
private CodecFactory avroCodec;
CodecName(CodecFactory avroCodec) {
this.avroCodec = avroCodec;
}
public CodecFactory get() {
Preconditions.checkArgument(avroCodec != null, "Missing implementation for codec " + this);
return avroCodec;
}
}
private static GenericData DEFAULT_MODEL = new SpecificData();
static {
LogicalTypes.register(LogicalMap.NAME, schema -> LogicalMap.get());
DEFAULT_MODEL.addLogicalTypeConversion(new Conversions.DecimalConversion());
DEFAULT_MODEL.addLogicalTypeConversion(new UUIDConversion());
}
public static WriteBuilder write(OutputFile file) {
return new WriteBuilder(file);
}
public static class WriteBuilder {
private final OutputFile file;
private com.netflix.iceberg.Schema schema = null;
private String name = "table";
private Map<String, String> config = Maps.newHashMap();
private Map<String, String> metadata = Maps.newLinkedHashMap();
private Function<Schema, DatumWriter<?>> createWriterFunc = GenericAvroWriter::new;
private WriteBuilder(OutputFile file) {
this.file = file;
}
public WriteBuilder schema(com.netflix.iceberg.Schema schema) {
this.schema = schema;
return this;
}
public WriteBuilder named(String name) {
this.name = name;
return this;
}
public WriteBuilder createWriterFunc(Function<Schema, DatumWriter<?>> writerFunction) {
this.createWriterFunc = writerFunction;
return this;
}
public WriteBuilder set(String property, String value) {
config.put(property, value);
return this;
}
public WriteBuilder setAll(Map<String, String> properties) {
config.putAll(properties);
return this;
}
public WriteBuilder meta(String property, String value) {
metadata.put(property, value);
return this;
}
public WriteBuilder meta(Map<String, String> properties) {
metadata.putAll(properties);
return this;
}
private CodecFactory codec() {
String codec = config.getOrDefault(AVRO_COMPRESSION, AVRO_COMPRESSION_DEFAULT);
try {
return CodecName.valueOf(codec.toUpperCase(Locale.ENGLISH)).get();
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Unsupported compression codec: " + codec);
}
}
public <D> FileAppender<D> build() throws IOException {
Preconditions.checkNotNull(schema, "Schema is required");
Preconditions.checkNotNull(name, "Table name is required and cannot be null");
// add the Iceberg schema to keyValueMetadata
meta("iceberg.schema", SchemaParser.toJson(schema));
return new AvroFileAppender<>(
AvroSchemaUtil.convert(schema, name), file, createWriterFunc, codec(), metadata);
}
}
public static ReadBuilder read(InputFile file) {
return new ReadBuilder(file);
}
public static class ReadBuilder {
private final ClassLoader defaultLoader = Thread.currentThread().getContextClassLoader();
private final InputFile file;
private final Map<String, String> renames = Maps.newLinkedHashMap();
private boolean reuseContainers = false;
private com.netflix.iceberg.Schema schema = null;
private Function<Schema, DatumReader<?>> createReaderFunc = schema -> {
GenericAvroReader<?> reader = new GenericAvroReader<>(schema);
reader.setClassLoader(defaultLoader);
return reader;
};
private Long start = null;
private Long length = null;
private ReadBuilder(InputFile file) {
Preconditions.checkNotNull(file, "Input file cannot be null");
this.file = file;
}
public ReadBuilder createReaderFunc(Function<Schema, DatumReader<?>> readerFunction) {
this.createReaderFunc = readerFunction;
return this;
}
/**
* Restricts the read to the given range: [start, end = start + length).
*
* @param start the start position for this read
* @param length the length of the range this read should scan
* @return this builder for method chaining
*/
public ReadBuilder split(long start, long length) {
this.start = start;
this.length = length;
return this;
}
public ReadBuilder project(com.netflix.iceberg.Schema schema) {
this.schema = schema;
return this;
}
public ReadBuilder reuseContainers() {
this.reuseContainers = true;
return this;
}
public ReadBuilder reuseContainers(boolean shouldReuse) {
this.reuseContainers = shouldReuse;
return this;
}
public ReadBuilder rename(String fullName, String newName) {
renames.put(fullName, newName);
return this;
}
public <D> AvroIterable<D> build() {
Preconditions.checkNotNull(schema, "Schema is required");
return new AvroIterable<>(file,
new ProjectionDatumReader<>(createReaderFunc, schema, renames),
start, length, reuseContainers);
}
}
}
| 2,000 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/ProjectionDatumReader.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import org.apache.avro.Schema;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.Decoder;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import static com.netflix.iceberg.types.TypeUtil.getProjectedIds;
public class ProjectionDatumReader<D> implements DatumReader<D> {
private final Function<Schema, DatumReader<?>> getReader;
private final com.netflix.iceberg.Schema expectedSchema;
private final Map<String, String> renames;
private Schema readSchema = null;
private Schema fileSchema = null;
private DatumReader<D> wrapped = null;
public ProjectionDatumReader(Function<Schema, DatumReader<?>> getReader,
com.netflix.iceberg.Schema expectedSchema,
Map<String, String> renames) {
this.getReader = getReader;
this.expectedSchema = expectedSchema;
this.renames = renames;
}
@Override
public void setSchema(Schema fileSchema) {
this.fileSchema = fileSchema;
Set<Integer> projectedIds = getProjectedIds(expectedSchema);
Schema prunedSchema = AvroSchemaUtil.pruneColumns(fileSchema, projectedIds);
this.readSchema = AvroSchemaUtil.buildAvroProjection(prunedSchema, expectedSchema, renames);
this.wrapped = newDatumReader();
}
@Override
public D read(D reuse, Decoder in) throws IOException {
return wrapped.read(reuse, in);
}
@SuppressWarnings("unchecked")
private DatumReader<D> newDatumReader() {
DatumReader<D> reader = (DatumReader<D>) getReader.apply(readSchema);
reader.setSchema(fileSchema);
return reader;
}
}
| 2,001 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/BuildAvroProjection.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.apache.avro.JsonProperties;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import static com.netflix.iceberg.avro.AvroSchemaUtil.convert;
import static com.netflix.iceberg.avro.AvroSchemaUtil.copyField;
import static com.netflix.iceberg.avro.AvroSchemaUtil.copyRecord;
import static com.netflix.iceberg.avro.AvroSchemaUtil.fromOption;
import static com.netflix.iceberg.avro.AvroSchemaUtil.fromOptions;
import static com.netflix.iceberg.avro.AvroSchemaUtil.getFieldId;
import static com.netflix.iceberg.avro.AvroSchemaUtil.isKeyValueSchema;
import static com.netflix.iceberg.avro.AvroSchemaUtil.isOptionSchema;
import static com.netflix.iceberg.avro.AvroSchemaUtil.toOption;
/**
* Renames and aliases fields in an Avro schema based on the current table schema.
* <p>
* This class creates a read schema based on an Avro file's schema that will correctly translate
* from the file's field names to the current table schema.
* <p>
* This will also rename records in the file's Avro schema to support custom read classes.
*/
class BuildAvroProjection extends AvroCustomOrderSchemaVisitor<Schema, Schema.Field> {
private final Map<String, String> renames;
private Type current = null;
BuildAvroProjection(com.netflix.iceberg.Schema expectedSchema, Map<String, String> renames) {
this.renames = renames;
this.current = expectedSchema.asStruct();
}
@Override
public Schema record(Schema record, List<String> names, Iterable<Schema.Field> schemaIterable) {
Preconditions.checkArgument(
current.isNestedType() && current.asNestedType().isStructType(),
"Cannot project non-struct: %s", current);
Types.StructType struct = current.asNestedType().asStructType();
boolean hasChange = false;
List<Schema.Field> fields = record.getFields();
List<Schema.Field> fieldResults = Lists.newArrayList(schemaIterable);
Map<String, Schema.Field> updateMap = Maps.newHashMap();
for (int i = 0; i < fields.size(); i += 1) {
Schema.Field field = fields.get(i);
Schema.Field updatedField = fieldResults.get(i);
if (updatedField != null) {
updateMap.put(updatedField.name(), updatedField);
if (!updatedField.schema().equals(field.schema()) ||
!updatedField.name().equals(field.name())) {
hasChange = true;
}
} else {
hasChange = true; // column was not projected
}
}
// construct the schema using the expected order
List<Schema.Field> updatedFields = Lists.newArrayListWithExpectedSize(struct.fields().size());
List<Types.NestedField> expectedFields = struct.fields();
for (int i = 0; i < expectedFields.size(); i += 1) {
Types.NestedField field = expectedFields.get(i);
// detect reordering
if (i < fields.size() && !field.name().equals(fields.get(i).name())) {
hasChange = true;
}
Schema.Field avroField = updateMap.get(field.name());
if (avroField != null) {
updatedFields.add(avroField);
} else {
Preconditions.checkArgument(field.isOptional(), "Missing required field: %s", field.name());
// create a field that will be defaulted to null
Schema.Field newField = new Schema.Field(
field.name(), toOption(convert(field.type())), null, JsonProperties.NULL_VALUE);
newField.addProp(AvroSchemaUtil.FIELD_ID_PROP, field.fieldId());
updatedFields.add(newField);
hasChange = true;
}
}
if (hasChange || renames.containsKey(record.getFullName())) {
return copyRecord(record, updatedFields, renames.get(record.getFullName()));
}
return record;
}
@Override
public Schema.Field field(Schema.Field field, Supplier<Schema> fieldResult) {
Types.StructType struct = current.asNestedType().asStructType();
int fieldId = AvroSchemaUtil.getFieldId(field);
Types.NestedField expectedField = struct.field(fieldId); // TODO: what if there are no ids?
// if the field isn't present, it was not selected
if (expectedField == null) {
return null;
}
String expectedName = expectedField.name();
this.current = expectedField.type();
try {
Schema schema = fieldResult.get();
if (schema != field.schema() || !expectedName.equals(field.name())) {
// add an alias for the field
return copyField(field, schema, expectedName);
} else {
// always copy because fields can't be reused
return copyField(field, field.schema(), field.name());
}
} finally {
this.current = struct;
}
}
@Override
public Schema union(Schema union, Iterable<Schema> options) {
Preconditions.checkState(isOptionSchema(union),
"Invalid schema: non-option unions are not supported: {}", union);
Schema nonNullOriginal = fromOption(union);
Schema nonNullResult = fromOptions(Lists.newArrayList(options));
if (nonNullOriginal != nonNullResult) {
return toOption(nonNullResult);
}
return union;
}
@Override
public Schema array(Schema array, Supplier<Schema> element) {
if (array.getLogicalType() instanceof LogicalMap ||
(current.isMapType() && isKeyValueSchema(array.getElementType()))) {
Preconditions.checkArgument(current.isMapType(), "Incompatible projected type: %s", current);
Types.MapType m = current.asNestedType().asMapType();
this.current = Types.StructType.of(m.fields()); // create a struct to correspond to element
try {
Schema keyValueSchema = array.getElementType();
Schema.Field keyField = keyValueSchema.getFields().get(0);
Schema.Field valueField = keyValueSchema.getFields().get(1);
Schema.Field valueProjection = element.get().getField("value");
// element was changed, create a new array
if (valueProjection.schema() != valueField.schema()) {
return AvroSchemaUtil.createProjectionMap(keyValueSchema.getFullName(),
getFieldId(keyField), keyField.name(), keyField.schema(),
getFieldId(valueField), valueField.name(), valueProjection.schema());
} else if (!(array.getLogicalType() instanceof LogicalMap)) {
return AvroSchemaUtil.createProjectionMap(keyValueSchema.getFullName(),
getFieldId(keyField), keyField.name(), keyField.schema(),
getFieldId(valueField), valueField.name(), valueField.schema());
}
return array;
} finally {
this.current = m;
}
} else {
Preconditions.checkArgument(current.isListType(),
"Incompatible projected type: %s", current);
Types.ListType list = current.asNestedType().asListType();
this.current = list.elementType();
try {
Schema elementSchema = element.get();
// element was changed, create a new array
if (elementSchema != array.getElementType()) {
return Schema.createArray(elementSchema);
}
return array;
} finally {
this.current = list;
}
}
}
@Override
public Schema map(Schema map, Supplier<Schema> value) {
Preconditions.checkArgument(current.isNestedType() && current.asNestedType().isMapType(),
"Incompatible projected type: %s", current);
Types.MapType m = current.asNestedType().asMapType();
Preconditions.checkArgument(m.keyType() == Types.StringType.get(),
"Incompatible projected type: key type %s is not string", m.keyType());
this.current = m.valueType();
try {
Schema valueSchema = value.get();
// element was changed, create a new map
if (valueSchema != map.getValueType()) {
return Schema.createMap(valueSchema);
}
return map;
} finally {
this.current = m;
}
}
@Override
public Schema primitive(Schema primitive) {
// check for type promotion
switch (primitive.getType()) {
case INT:
if (current.typeId() == Type.TypeID.LONG) {
return Schema.create(Schema.Type.LONG);
}
return primitive;
case FLOAT:
if (current.typeId() == Type.TypeID.DOUBLE) {
return Schema.create(Schema.Type.DOUBLE);
}
return primitive;
default:
return primitive;
}
}
}
| 2,002 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/ValueWriters.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.netflix.iceberg.types.TypeUtil;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.io.Encoder;
import org.apache.avro.util.Utf8;
import java.io.IOException;
import java.lang.reflect.Array;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.UUID;
public class ValueWriters {
private ValueWriters() {
}
public static ValueWriter<Void> nulls() {
return NullWriter.INSTANCE;
}
public static ValueWriter<Boolean> booleans() {
return BooleanWriter.INSTANCE;
}
public static ValueWriter<Integer> ints() {
return IntegerWriter.INSTANCE;
}
public static ValueWriter<Long> longs() {
return LongWriter.INSTANCE;
}
public static ValueWriter<Float> floats() {
return FloatWriter.INSTANCE;
}
public static ValueWriter<Double> doubles() {
return DoubleWriter.INSTANCE;
}
public static ValueWriter<Object> strings() {
return StringWriter.INSTANCE;
}
public static ValueWriter<Utf8> utf8s() {
return Utf8Writer.INSTANCE;
}
public static ValueWriter<UUID> uuids() {
return UUIDWriter.INSTANCE;
}
public static ValueWriter<byte[]> fixed(int length) {
return new FixedWriter(length);
}
public static ValueWriter<GenericData.Fixed> genericFixed(int length) {
return new GenericFixedWriter(length);
}
public static ValueWriter<byte[]> bytes() {
return BytesWriter.INSTANCE;
}
public static ValueWriter<ByteBuffer> byteBuffers() {
return ByteBufferWriter.INSTANCE;
}
public static ValueWriter<BigDecimal> decimal(int precision, int scale) {
return new DecimalWriter(precision, scale);
}
public static <T> ValueWriter<T> option(int nullIndex, ValueWriter<T> writer) {
return new OptionWriter<>(nullIndex, writer);
}
public static <T> ValueWriter<Collection<T>> array(ValueWriter<T> elementWriter) {
return new CollectionWriter<>(elementWriter);
}
public static <K, V> ValueWriter<Map<K, V>> arrayMap(ValueWriter<K> keyWriter,
ValueWriter<V> valueWriter) {
return new ArrayMapWriter<>(keyWriter, valueWriter);
}
public static <K, V> ValueWriter<Map<K, V>> map(ValueWriter<K> keyWriter,
ValueWriter<V> valueWriter) {
return new MapWriter<>(keyWriter, valueWriter);
}
public static ValueWriter<IndexedRecord> record(List<ValueWriter<?>> writers) {
return new RecordWriter(writers);
}
private static class NullWriter implements ValueWriter<Void> {
private static NullWriter INSTANCE = new NullWriter();
private NullWriter() {
}
@Override
public void write(Void ignored, Encoder encoder) throws IOException {
encoder.writeNull();
}
}
private static class BooleanWriter implements ValueWriter<Boolean> {
private static BooleanWriter INSTANCE = new BooleanWriter();
private BooleanWriter() {
}
@Override
public void write(Boolean bool, Encoder encoder) throws IOException {
encoder.writeBoolean(bool);
}
}
private static class IntegerWriter implements ValueWriter<Integer> {
private static IntegerWriter INSTANCE = new IntegerWriter();
private IntegerWriter() {
}
@Override
public void write(Integer i, Encoder encoder) throws IOException {
encoder.writeInt(i);
}
}
private static class LongWriter implements ValueWriter<Long> {
private static LongWriter INSTANCE = new LongWriter();
private LongWriter() {
}
@Override
public void write(Long l, Encoder encoder) throws IOException {
encoder.writeLong(l);
}
}
private static class FloatWriter implements ValueWriter<Float> {
private static FloatWriter INSTANCE = new FloatWriter();
private FloatWriter() {
}
@Override
public void write(Float f, Encoder encoder) throws IOException {
encoder.writeFloat(f);
}
}
private static class DoubleWriter implements ValueWriter<Double> {
private static DoubleWriter INSTANCE = new DoubleWriter();
private DoubleWriter() {
}
@Override
public void write(Double d, Encoder encoder) throws IOException {
encoder.writeDouble(d);
}
}
private static class StringWriter implements ValueWriter<Object> {
private static StringWriter INSTANCE = new StringWriter();
private StringWriter() {
}
@Override
public void write(Object s, Encoder encoder) throws IOException {
// use getBytes because it may return the backing byte array if available.
// otherwise, it copies to a new byte array, which is still cheaper than Avro
// calling toString, which incurs encoding costs
if (s instanceof Utf8) {
encoder.writeString((Utf8) s);
} else if (s instanceof String) {
encoder.writeString(new Utf8((String) s));
} else if (s == null) {
throw new IllegalArgumentException("Cannot write null to required string column");
} else {
throw new IllegalArgumentException(
"Cannot write unknown string type: " + s.getClass().getName() + ": " + s.toString());
}
}
}
private static class Utf8Writer implements ValueWriter<Utf8> {
private static Utf8Writer INSTANCE = new Utf8Writer();
private Utf8Writer() {
}
@Override
public void write(Utf8 s, Encoder encoder) throws IOException {
encoder.writeString(s);
}
}
private static class UUIDWriter implements ValueWriter<UUID> {
private static final ThreadLocal<ByteBuffer> BUFFER = ThreadLocal.withInitial(() -> {
ByteBuffer buffer = ByteBuffer.allocate(16);
buffer.order(ByteOrder.BIG_ENDIAN);
return buffer;
});
private static UUIDWriter INSTANCE = new UUIDWriter();
private UUIDWriter() {
}
@Override
public void write(UUID uuid, Encoder encoder) throws IOException {
// TODO: direct conversion from string to byte buffer
ByteBuffer buffer = BUFFER.get();
buffer.rewind();
buffer.putLong(uuid.getMostSignificantBits());
buffer.putLong(uuid.getLeastSignificantBits());
encoder.writeFixed(buffer.array());
}
}
private static class FixedWriter implements ValueWriter<byte[]> {
private final int length;
private FixedWriter(int length) {
this.length = length;
}
@Override
public void write(byte[] bytes, Encoder encoder) throws IOException {
Preconditions.checkArgument(bytes.length == length,
"Cannot write byte array of length %s as fixed[%s]", bytes.length, length);
encoder.writeFixed(bytes);
}
}
private static class GenericFixedWriter implements ValueWriter<GenericData.Fixed> {
private final int length;
private GenericFixedWriter(int length) {
this.length = length;
}
@Override
public void write(GenericData.Fixed datum, Encoder encoder) throws IOException {
Preconditions.checkArgument(datum.bytes().length == length,
"Cannot write byte array of length %s as fixed[%s]", datum.bytes().length, length);
encoder.writeFixed(datum.bytes());
}
}
private static class BytesWriter implements ValueWriter<byte[]> {
private static BytesWriter INSTANCE = new BytesWriter();
private BytesWriter() {
}
@Override
public void write(byte[] bytes, Encoder encoder) throws IOException {
encoder.writeBytes(bytes);
}
}
private static class ByteBufferWriter implements ValueWriter<ByteBuffer> {
private static ByteBufferWriter INSTANCE = new ByteBufferWriter();
private ByteBufferWriter() {
}
@Override
public void write(ByteBuffer bytes, Encoder encoder) throws IOException {
encoder.writeBytes(bytes);
}
}
private static class DecimalWriter implements ValueWriter<BigDecimal> {
private final int precision;
private final int scale;
private final int length;
private final ThreadLocal<byte[]> bytes;
private DecimalWriter(int precision, int scale) {
this.precision = precision;
this.scale = scale;
this.length = TypeUtil.decimalRequriedBytes(precision);
this.bytes = ThreadLocal.withInitial(() -> new byte[length]);
}
@Override
public void write(BigDecimal decimal, Encoder encoder) throws IOException {
Preconditions.checkArgument(decimal.scale() == scale,
"Cannot write value as decimal(%s,%s), wrong scale: %s", precision, scale, decimal);
Preconditions.checkArgument(decimal.precision() <= precision,
"Cannot write value as decimal(%s,%s), too large: %s", precision, scale, decimal);
byte fillByte = (byte) (decimal.signum() < 0 ? 0xFF : 0x00);
byte[] unscaled = decimal.unscaledValue().toByteArray();
byte[] buf = bytes.get();
int offset = length - unscaled.length;
for (int i = 0; i < length; i += 1) {
if (i < offset) {
buf[i] = fillByte;
} else {
buf[i] = unscaled[i - offset];
}
}
encoder.writeFixed(buf);
}
}
private static class OptionWriter<T> implements ValueWriter<T> {
private final int nullIndex;
private final int valueIndex;
private final ValueWriter<T> valueWriter;
private OptionWriter(int nullIndex, ValueWriter<T> valueWriter) {
this.nullIndex = nullIndex;
if (nullIndex == 0) {
this.valueIndex = 1;
} else if (nullIndex == 1) {
this.valueIndex = 0;
} else {
throw new IllegalArgumentException("Invalid option index: " + nullIndex);
}
this.valueWriter = valueWriter;
}
@Override
public void write(T option, Encoder encoder) throws IOException {
if (option == null) {
encoder.writeIndex(nullIndex);
} else {
encoder.writeIndex(valueIndex);
valueWriter.write(option, encoder);
}
}
}
private static class CollectionWriter<T> implements ValueWriter<Collection<T>> {
private final ValueWriter<T> elementWriter;
private CollectionWriter(ValueWriter<T> elementWriter) {
this.elementWriter = elementWriter;
}
@Override
@SuppressWarnings("unchecked")
public void write(Collection<T> array, Encoder encoder) throws IOException {
encoder.writeArrayStart();
int numElements = array.size();
encoder.setItemCount(numElements);
Iterator<T> iter = array.iterator();
for (int i = 0; i < numElements; i += 1) {
encoder.startItem();
elementWriter.write(iter.next(), encoder);
}
encoder.writeArrayEnd();
}
}
private static class ArrayMapWriter<K, V> implements ValueWriter<Map<K, V>> {
private final ValueWriter<K> keyWriter;
private final ValueWriter<V> valueWriter;
private ArrayMapWriter(ValueWriter<K> keyWriter, ValueWriter<V> valueWriter) {
this.keyWriter = keyWriter;
this.valueWriter = valueWriter;
}
@Override
@SuppressWarnings("unchecked")
public void write(Map<K, V> map, Encoder encoder) throws IOException {
encoder.writeArrayStart();
int numElements = map.size();
encoder.setItemCount(numElements);
Iterator<Map.Entry<K, V>> iter = map.entrySet().iterator();
for (int i = 0; i < numElements; i += 1) {
encoder.startItem();
Map.Entry<K, V> entry = iter.next();
keyWriter.write(entry.getKey(), encoder);
valueWriter.write(entry.getValue(), encoder);
}
encoder.writeArrayEnd();
}
}
private static class MapWriter<K, V> implements ValueWriter<Map<K, V>> {
private final ValueWriter<K> keyWriter;
private final ValueWriter<V> valueWriter;
private MapWriter(ValueWriter<K> keyWriter, ValueWriter<V> valueWriter) {
this.keyWriter = keyWriter;
this.valueWriter = valueWriter;
}
@Override
@SuppressWarnings("unchecked")
public void write(Map<K, V> map, Encoder encoder) throws IOException {
encoder.writeMapStart();
int numElements = map.size();
encoder.setItemCount(numElements);
Iterator<Map.Entry<K, V>> iter = map.entrySet().iterator();
for (int i = 0; i < numElements; i += 1) {
encoder.startItem();
Map.Entry<K, V> entry = iter.next();
keyWriter.write(entry.getKey(), encoder);
valueWriter.write(entry.getValue(), encoder);
}
encoder.writeMapEnd();
}
}
public abstract static class StructWriter<S> implements ValueWriter<S> {
private final ValueWriter<Object>[] writers;
@SuppressWarnings("unchecked")
protected StructWriter(List<ValueWriter<?>> writers) {
this.writers = (ValueWriter<Object>[]) Array.newInstance(ValueWriter.class, writers.size());
for (int i = 0; i < this.writers.length; i += 1) {
this.writers[i] = (ValueWriter<Object>) writers.get(i);
}
}
protected abstract Object get(S struct, int pos);
public ValueWriter<?> writer(int pos) {
return writers[pos];
}
@Override
public void write(S row, Encoder encoder) throws IOException {
for (int i = 0; i < writers.length; i += 1) {
writers[i].write(get(row, i), encoder);
}
}
}
private static class RecordWriter extends StructWriter<IndexedRecord> {
@SuppressWarnings("unchecked")
private RecordWriter(List<ValueWriter<?>> writers) {
super(writers);
}
@Override
protected Object get(IndexedRecord struct, int pos) {
return struct.get(pos);
}
}
}
| 2,003 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/UUIDConversion.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import org.apache.avro.Conversion;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericFixed;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.UUID;
public class UUIDConversion extends Conversion<UUID> {
@Override
public Class<UUID> getConvertedType() {
return UUID.class;
}
@Override
public String getLogicalTypeName() {
return LogicalTypes.uuid().getName();
}
@Override
public UUID fromFixed(GenericFixed value, Schema schema, LogicalType type) {
ByteBuffer buffer = ByteBuffer.wrap(value.bytes());
buffer.order(ByteOrder.BIG_ENDIAN);
long mostSigBits = buffer.getLong();
long leastSigBits = buffer.getLong();
return new UUID(mostSigBits, leastSigBits);
}
@Override
public GenericFixed toFixed(UUID value, Schema schema, LogicalType type) {
ByteBuffer buffer = ByteBuffer.allocate(16);
buffer.order(ByteOrder.BIG_ENDIAN);
buffer.putLong(value.getMostSignificantBits());
buffer.putLong(value.getLeastSignificantBits());
return new GenericData.Fixed(schema, buffer.array());
}
}
| 2,004 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/LogicalMap.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import org.apache.avro.Conversion;
import org.apache.avro.LogicalType;
import org.apache.avro.Schema;
import java.util.Collection;
import java.util.Map;
import static org.apache.avro.Schema.Type.ARRAY;
public class LogicalMap extends LogicalType {
static final String NAME = "map";
private static final LogicalMap INSTANCE = new LogicalMap();
static LogicalMap get() {
return INSTANCE;
}
private LogicalMap() {
super(NAME);
}
@Override
public void validate(Schema schema) {
super.validate(schema);
Preconditions.checkArgument(schema.getType() == ARRAY,
"Invalid type for map, must be an array: %s", schema);
Preconditions.checkArgument(AvroSchemaUtil.isKeyValueSchema(schema.getElementType()),
"Invalid key-value record: %s", schema.getElementType());
}
}
| 2,005 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/HadoopFileIO.java | package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.FileIO;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
public class HadoopFileIO implements FileIO {
private final SerializableConfiguration hadoopConf;
public HadoopFileIO(Configuration hadoopConf) {
this.hadoopConf = new SerializableConfiguration(hadoopConf);
}
@Override
public InputFile newInputFile(String path) {
return HadoopInputFile.fromLocation(path, hadoopConf.get());
}
@Override
public OutputFile newOutputFile(String path) {
return HadoopOutputFile.fromPath(new Path(path), hadoopConf.get());
}
@Override
public void deleteFile(String path) {
Path toDelete = new Path(path);
FileSystem fs = Util.getFS(toDelete, hadoopConf.get());
try {
fs.delete(toDelete, false /* not recursive */);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to delete file: %s", path);
}
}
}
| 2,006 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/HadoopStreams.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.io.DelegatingInputStream;
import com.netflix.iceberg.io.DelegatingOutputStream;
import com.netflix.iceberg.io.PositionOutputStream;
import com.netflix.iceberg.io.SeekableInputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
/**
* Convenience methods to get Parquet abstractions for Hadoop data streams.
*
* This class is based on Parquet's HadoopStreams.
*/
class HadoopStreams {
private static final Logger LOG = LoggerFactory.getLogger(HadoopStreams.class);
/**
* Wraps a {@link FSDataInputStream} in a {@link SeekableInputStream} implementation for readers.
*
* @param stream a Hadoop FSDataInputStream
* @return a SeekableInputStream
*/
static SeekableInputStream wrap(FSDataInputStream stream) {
return new HadoopSeekableInputStream(stream);
}
/**
* Wraps a {@link FSDataOutputStream} in a {@link PositionOutputStream} implementation for
* writers.
*
* @param stream a Hadoop FSDataOutputStream
* @return a PositionOutputStream
*/
static PositionOutputStream wrap(FSDataOutputStream stream) {
return new HadoopPositionOutputStream(stream);
}
/**
* SeekableInputStream implementation for FSDataInputStream that implements ByteBufferReadable in
* Hadoop 2.
*/
private static class HadoopSeekableInputStream extends SeekableInputStream implements DelegatingInputStream {
private final FSDataInputStream stream;
HadoopSeekableInputStream(FSDataInputStream stream) {
this.stream = stream;
}
@Override
public InputStream getDelegate() {
return stream;
}
@Override
public void close() throws IOException {
stream.close();
}
@Override
public long getPos() throws IOException {
return stream.getPos();
}
@Override
public void seek(long newPos) throws IOException {
stream.seek(newPos);
}
@Override
public int read() throws IOException {
return stream.read();
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return stream.read(b, off, len);
}
public int read(ByteBuffer buf) throws IOException {
return stream.read(buf);
}
}
/**
* PositionOutputStream implementation for FSDataOutputStream.
*/
private static class HadoopPositionOutputStream extends PositionOutputStream implements DelegatingOutputStream {
private final FSDataOutputStream stream;
public HadoopPositionOutputStream(FSDataOutputStream stream) {
this.stream = stream;
}
@Override
public OutputStream getDelegate() {
return stream;
}
@Override
public long getPos() throws IOException {
return stream.getPos();
}
@Override
public void write(int b) throws IOException {
stream.write(b);
}
@Override
public void write(byte[] b) throws IOException {
stream.write(b);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
stream.write(b, off, len);
}
@Override
public void flush() throws IOException {
stream.flush();
}
@Override
public void close() throws IOException {
stream.close();
}
}
}
| 2,007 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/Util.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
class Util {
private Util() {
}
public static FileSystem getFS(Path path, Configuration conf) {
try {
return path.getFileSystem(conf);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get file system for path: %s", path);
}
}
}
| 2,008 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/HadoopInputFile.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.SeekableInputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
/**
* {@link InputFile} implementation using the Hadoop {@link FileSystem} API.
* <p>
* This class is based on Parquet's HadoopInputFile.
*/
public class HadoopInputFile implements InputFile {
private final FileSystem fs;
private final Path path;
private final Configuration conf;
private FileStatus stat = null;
private Long length = null;
public static HadoopInputFile fromLocation(CharSequence location, Configuration conf) {
Path path = new Path(location.toString());
return fromPath(path, conf);
}
public static HadoopInputFile fromLocation(CharSequence location, long length,
Configuration conf) {
Path path = new Path(location.toString());
return fromPath(path, length, conf);
}
public static HadoopInputFile fromPath(Path path, Configuration conf) {
try {
FileSystem fs = path.getFileSystem(conf);
return new HadoopInputFile(fs, path, conf);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get file system for path: %s", path);
}
}
public static HadoopInputFile fromPath(Path path, long length, Configuration conf) {
try {
FileSystem fs = path.getFileSystem(conf);
return new HadoopInputFile(fs, path, length, conf);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get file system for path: %s", path);
}
}
public static HadoopInputFile fromStatus(FileStatus stat, Configuration conf) {
try {
FileSystem fs = stat.getPath().getFileSystem(conf);
return new HadoopInputFile(fs, stat, conf);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get file system for path: %s", stat.getPath());
}
}
private HadoopInputFile(FileSystem fs, Path path, Configuration conf) {
this.fs = fs;
this.path = path;
this.conf = conf;
}
private HadoopInputFile(FileSystem fs, Path path, long length, Configuration conf) {
this.fs = fs;
this.path = path;
this.conf = conf;
this.length = length;
}
private HadoopInputFile(FileSystem fs, FileStatus stat, Configuration conf) {
this.fs = fs;
this.path = stat.getPath();
this.stat = stat;
this.conf = conf;
this.length = stat.getLen();
}
private FileStatus lazyStat() {
if (stat == null) {
try {
this.stat = fs.getFileStatus(path);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get status for file: %s", path);
}
}
return stat;
}
@Override
public long getLength() {
if (length == null) {
this.length = lazyStat().getLen();
}
return length;
}
@Override
public SeekableInputStream newStream() {
try {
return HadoopStreams.wrap(fs.open(path));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to open input stream for file: %s", path);
}
}
public Configuration getConf() {
return conf;
}
public FileStatus getStat() {
return lazyStat();
}
@Override
public String location() {
return path.toString();
}
@Override
public String toString() {
return path.toString();
}
}
| 2,009 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/HadoopOutputFile.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.exceptions.AlreadyExistsException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.io.PositionOutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
/**
* {@link OutputFile} implementation using the Hadoop {@link FileSystem} API.
*/
public class HadoopOutputFile implements OutputFile {
public static OutputFile fromPath(Path path, Configuration conf) {
return new HadoopOutputFile(path, conf);
}
private final Path path;
private final Configuration conf;
private HadoopOutputFile(Path path, Configuration conf) {
this.path = path;
this.conf = conf;
}
@Override
public PositionOutputStream create() {
FileSystem fs = Util.getFS(path, conf);
try {
return HadoopStreams.wrap(fs.create(path, false /* createOrOverwrite */));
} catch (FileAlreadyExistsException e) {
throw new AlreadyExistsException(e, "Path already exists: %s", path);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to create file: %s", path);
}
}
@Override
public PositionOutputStream createOrOverwrite() {
FileSystem fs = Util.getFS(path, conf);
try {
return HadoopStreams.wrap(fs.create(path, true /* createOrOverwrite */ ));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to create file: %s", path);
}
}
public Path getPath() {
return path;
}
public Configuration getConf() {
return conf;
}
@Override
public String location() {
return path.toString();
}
@Override
public InputFile toInputFile() {
return HadoopInputFile.fromPath(path, conf);
}
@Override
public String toString() {
return location();
}
}
| 2,010 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/HadoopTableOperations.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.FileIO;
import com.netflix.iceberg.TableMetadata;
import com.netflix.iceberg.TableMetadataParser;
import com.netflix.iceberg.TableOperations;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.exceptions.ValidationException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.UUID;
import static com.netflix.iceberg.TableMetadataParser.getFileExtension;
/**
* TableOperations implementation for file systems that support atomic rename.
* <p>
* This maintains metadata in a "metadata" folder under the table location.
*/
public class HadoopTableOperations implements TableOperations {
private static final Logger LOG = LoggerFactory.getLogger(HadoopTableOperations.class);
private final Configuration conf;
private final Path location;
private TableMetadata currentMetadata = null;
private Integer version = null;
private boolean shouldRefresh = true;
private HadoopFileIO defaultFileIo = null;
protected HadoopTableOperations(Path location, Configuration conf) {
this.conf = conf;
this.location = location;
}
public TableMetadata current() {
if (shouldRefresh) {
return refresh();
}
return currentMetadata;
}
@Override
public TableMetadata refresh() {
int ver = version != null ? version : readVersionHint();
Path metadataFile = metadataFile(ver);
FileSystem fs = getFS(metadataFile, conf);
try {
// don't check if the file exists if version is non-null because it was already checked
if (version == null && !fs.exists(metadataFile)) {
if (ver == 0) {
// no v0 metadata means the table doesn't exist yet
return null;
}
throw new ValidationException("Metadata file is missing: %s", metadataFile);
}
while (fs.exists(metadataFile(ver + 1))) {
ver += 1;
metadataFile = metadataFile(ver);
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get file system for path: %s", metadataFile);
}
this.version = ver;
this.currentMetadata = TableMetadataParser.read(this,
io().newInputFile(metadataFile.toString()));
this.shouldRefresh = false;
return currentMetadata;
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
if (base != current()) {
throw new CommitFailedException("Cannot commit changes based on stale table metadata");
}
if (base == metadata) {
LOG.info("Nothing to commit.");
return;
}
Path tempMetadataFile = metadataPath(UUID.randomUUID().toString() + getFileExtension(conf));
TableMetadataParser.write(metadata, io().newOutputFile(tempMetadataFile.toString()));
int nextVersion = (version != null ? version : 0) + 1;
Path finalMetadataFile = metadataFile(nextVersion);
FileSystem fs = getFS(tempMetadataFile, conf);
try {
if (fs.exists(finalMetadataFile)) {
throw new CommitFailedException(
"Version %d already exists: %s", nextVersion, finalMetadataFile);
}
} catch (IOException e) {
throw new RuntimeIOException(e,
"Failed to check if next version exists: " + finalMetadataFile);
}
try {
// this rename operation is the atomic commit operation
if (!fs.rename(tempMetadataFile, finalMetadataFile)) {
throw new CommitFailedException(
"Failed to commit changes using rename: %s", finalMetadataFile);
}
} catch (IOException e) {
throw new CommitFailedException(e,
"Failed to commit changes using rename: %s", finalMetadataFile);
}
// update the best-effort version pointer
writeVersionHint(nextVersion);
this.shouldRefresh = true;
}
@Override
public FileIO io() {
if (defaultFileIo == null) {
defaultFileIo = new HadoopFileIO(conf);
}
return defaultFileIo;
}
@Override
public String metadataFileLocation(String fileName) {
return metadataPath(fileName).toString();
}
@Override
public long newSnapshotId() {
return System.currentTimeMillis();
}
private Path metadataFile(int version) {
return metadataPath("v" + version + getFileExtension(conf));
}
private Path metadataPath(String filename) {
return new Path(new Path(location, "metadata"), filename);
}
private Path versionHintFile() {
return metadataPath("version-hint.text");
}
private void writeVersionHint(int version) {
Path versionHintFile = versionHintFile();
FileSystem fs = getFS(versionHintFile, conf);
try (FSDataOutputStream out = fs.create(versionHintFile, true /* overwrite */ )) {
out.write(String.valueOf(version).getBytes("UTF-8"));
} catch (IOException e) {
LOG.warn("Failed to update version hint", e);
}
}
private int readVersionHint() {
Path versionHintFile = versionHintFile();
try {
FileSystem fs = Util.getFS(versionHintFile, conf);
if (!fs.exists(versionHintFile)) {
return 0;
}
try (BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(versionHintFile)))) {
return Integer.parseInt(in.readLine().replace("\n", ""));
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get file system for path: %s", versionHintFile);
}
}
protected FileSystem getFS(Path path, Configuration conf) {
return Util.getFS(path, conf);
}
}
| 2,011 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/SerializableConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg.hadoop;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import org.apache.hadoop.conf.Configuration;
/**
* Wraps a {@link Configuration} object in a {@link Serializable} layer.
*/
public class SerializableConfiguration implements Serializable {
private transient Configuration hadoopConf;
public SerializableConfiguration(Configuration hadoopCOnf) {
this.hadoopConf = hadoopCOnf;
}
private void writeObject(ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
hadoopConf.write(out);
}
private void readObject(ObjectInputStream in) throws ClassNotFoundException, IOException {
in.defaultReadObject();
hadoopConf = new Configuration(false);
hadoopConf.readFields(in);
}
public Configuration get() {
return hadoopConf;
}
}
| 2,012 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/HadoopTables.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.BaseTable;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.Table;
import com.netflix.iceberg.TableMetadata;
import com.netflix.iceberg.TableOperations;
import com.netflix.iceberg.Tables;
import com.netflix.iceberg.exceptions.AlreadyExistsException;
import com.netflix.iceberg.exceptions.NoSuchTableException;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import java.util.Map;
import static com.netflix.iceberg.TableMetadata.newTableMetadata;
/**
* Implementation of Iceberg tables that uses the Hadoop FileSystem
* to store metadata and manifests.
*/
public class HadoopTables implements Tables, Configurable {
private Configuration conf;
public HadoopTables() {
this(new Configuration());
}
public HadoopTables(Configuration conf) {
this.conf = conf;
}
/**
* Loads the table location from a FileSystem path location.
*
* @param location a path URI (e.g. hdfs:///warehouse/my_table/)
* @return table implementation
*/
@Override
public Table load(String location) {
TableOperations ops = newTableOps(location);
if (ops.current() == null) {
throw new NoSuchTableException("Table does not exist at location: " + location);
}
return new BaseTable(ops, location);
}
/**
* Create a table using the FileSystem implementation resolve from
* location.
*
* @param schema iceberg schema used to create the table
* @param spec partition specification
* @param location a path URI (e.g. hdfs:///warehouse/my_table)
* @return newly created table implementation
*/
@Override
public Table create(Schema schema, PartitionSpec spec, Map<String, String> properties,
String location) {
TableOperations ops = newTableOps(location);
if (ops.current() != null) {
throw new AlreadyExistsException("Table already exists at location: " + location);
}
TableMetadata metadata = newTableMetadata(ops, schema, spec, location, properties);
ops.commit(null, metadata);
return new BaseTable(ops, location);
}
private TableOperations newTableOps(String location) {
return new HadoopTableOperations(new Path(location), conf);
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return conf;
}
}
| 2,013 |
0 | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg/common/DynFields.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.common;
import com.google.common.base.Joiner;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.Sets;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Set;
public class DynFields {
/**
* Convenience wrapper class around {@link java.lang.reflect.Field}.
*
* Allows callers to invoke the wrapped method with all Exceptions wrapped by
* RuntimeException, or with a single Exception catch block.
*/
public static class UnboundField<T> {
private final Field field;
private final String name;
private UnboundField(Field field, String name) {
this.field = field;
this.name = name;
}
@SuppressWarnings("unchecked")
public T get(Object target) {
try {
return (T) field.get(target);
} catch (IllegalAccessException e) {
throw Throwables.propagate(e);
}
}
public void set(Object target, T value) {
try {
field.set(target, value);
} catch (IllegalAccessException e) {
throw Throwables.propagate(e);
}
}
public String toString() {
return Objects.toStringHelper(this)
.add("class", field.getDeclaringClass().toString())
.add("name", name)
.add("type", field.getType())
.toString();
}
/**
* Returns this method as a BoundMethod for the given receiver.
*
* @param target an Object on which to get or set this field
* @return a {@link BoundField} for this field and the target
* @throws IllegalStateException if the method is static
* @throws IllegalArgumentException if the receiver's class is incompatible
*/
public BoundField<T> bind(Object target) {
Preconditions.checkState(!isStatic() || this == AlwaysNull.INSTANCE,
"Cannot bind static field " + name);
Preconditions.checkArgument(
field.getDeclaringClass().isAssignableFrom(target.getClass()),
"Cannot bind field " + name + " to instance of " +
target.getClass());
return new BoundField<>(this, target);
}
/**
* Returns this field as a StaticField.
*
* @return a {@link StaticField} for this field
* @throws IllegalStateException if the method is not static
*/
public StaticField<T> asStatic() {
Preconditions.checkState(isStatic(), "Field " + name + " is not static");
return new StaticField<>(this);
}
/**
* @return whether the field is a static field
*/
public boolean isStatic() {
return Modifier.isStatic(field.getModifiers());
}
/**
* @return whether the field is always null
*/
public boolean isAlwaysNull() {
return this == AlwaysNull.INSTANCE;
}
}
private static class AlwaysNull extends UnboundField<Void> {
private static final AlwaysNull INSTANCE = new AlwaysNull();
private AlwaysNull() {
super(null, "AlwaysNull");
}
@Override
public Void get(Object target) {
return null;
}
@Override
public void set(Object target, Void value) {
}
public String toString() {
return "Field(AlwaysNull)";
}
@Override
public boolean isStatic() {
return true;
}
@Override
public boolean isAlwaysNull() {
return true;
}
}
public static class StaticField<T> {
private final UnboundField<T> field;
private StaticField(UnboundField<T> field) {
this.field = field;
}
public T get() {
return field.get(null);
}
public void set(T value) {
field.set(null, value);
}
}
public static class BoundField<T> {
private final UnboundField<T> field;
private final Object target;
private BoundField(UnboundField<T> field, Object target) {
this.field = field;
this.target = target;
}
public T get() {
return field.get(target);
}
public void set(T value) {
field.set(target, value);
}
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
private ClassLoader loader = Thread.currentThread().getContextClassLoader();
private UnboundField<?> field = null;
private final Set<String> candidates = Sets.newHashSet();
private boolean defaultAlwaysNull = false;
/**
* Set the {@link ClassLoader} used to lookup classes by name.
* <p>
* If not set, the current thread's ClassLoader is used.
*
* @param loader a ClassLoader
* @return this Builder for method chaining
*/
public Builder loader(ClassLoader loader) {
this.loader = loader;
return this;
}
/**
* Instructs this builder to return AlwaysNull if no implementation is
* found.
*
* @return this Builder for method chaining
*/
public Builder defaultAlwaysNull() {
this.defaultAlwaysNull = true;
return this;
}
/**
* Checks for an implementation, first finding the class by name.
*
* @param className name of a class
* @param fieldName name of the field
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getField(String)
*/
public Builder impl(String className, String fieldName) {
// don't do any work if an implementation has been found
if (field != null) {
return this;
}
try {
Class<?> targetClass = Class.forName(className, true, loader);
impl(targetClass, fieldName);
} catch (ClassNotFoundException e) {
// not the right implementation
candidates.add(className + "." + fieldName);
}
return this;
}
/**
* Checks for an implementation.
*
* @param targetClass a class instance
* @param fieldName name of a field (different from constructor)
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getField(String)
*/
public Builder impl(Class<?> targetClass, String fieldName) {
// don't do any work if an implementation has been found
if (field != null || targetClass == null) {
return this;
}
try {
this.field = new UnboundField<>(
targetClass.getField(fieldName), fieldName);
} catch (NoSuchFieldException e) {
// not the right implementation
candidates.add(targetClass.getName() + "." + fieldName);
}
return this;
}
/**
* Checks for a hidden implementation, first finding the class by name.
*
* @param className name of a class
* @param fieldName name of a field (different from constructor)
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getField(String)
*/
public Builder hiddenImpl(String className, String fieldName) {
// don't do any work if an implementation has been found
if (field != null) {
return this;
}
try {
Class<?> targetClass = Class.forName(className, true, loader);
hiddenImpl(targetClass, fieldName);
} catch (ClassNotFoundException e) {
// not the right implementation
candidates.add(className + "." + fieldName);
}
return this;
}
/**
* Checks for a hidden implementation.
*
* @param targetClass a class instance
* @param fieldName name of a field (different from constructor)
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getField(String)
*/
public Builder hiddenImpl(Class<?> targetClass, String fieldName) {
// don't do any work if an implementation has been found
if (field != null || targetClass == null) {
return this;
}
try {
Field hidden = targetClass.getDeclaredField(fieldName);
AccessController.doPrivileged(new MakeFieldAccessible(hidden));
this.field = new UnboundField(hidden, fieldName);
} catch (SecurityException | NoSuchFieldException e) {
// unusable
candidates.add(targetClass.getName() + "." + fieldName);
}
return this;
}
/**
* Returns the first valid implementation as a UnboundField or throws a
* NoSuchFieldException if there is none.
*
* @param <T> Java class stored in the field
* @return a {@link UnboundField} with a valid implementation
* @throws NoSuchFieldException if no implementation was found
*/
@SuppressWarnings("unchecked")
public <T> UnboundField<T> buildChecked() throws NoSuchFieldException {
if (field != null) {
return (UnboundField<T>) field;
} else if (defaultAlwaysNull) {
return (UnboundField<T>) AlwaysNull.INSTANCE;
} else {
throw new NoSuchFieldException("Cannot find field from candidates: " +
Joiner.on(", ").join(candidates));
}
}
/**
* Returns the first valid implementation as a UnboundField or throws a
* NoSuchFieldException if there is none.
*
* @param <T> Java class stored in the field
* @return a {@link UnboundField} with a valid implementation
* @throws RuntimeException if no implementation was found
*/
@SuppressWarnings("unchecked")
public <T> UnboundField<T> build() {
if (field != null) {
return (UnboundField<T>) field;
} else if (defaultAlwaysNull) {
return (UnboundField<T>) AlwaysNull.INSTANCE;
} else {
throw new RuntimeException("Cannot find field from candidates: " +
Joiner.on(", ").join(candidates));
}
}
/**
* Returns the first valid implementation as a BoundMethod or throws a
* NoSuchMethodException if there is none.
*
* @param target an Object on which to get and set the field
* @param <T> Java class stored in the field
* @return a {@link BoundField} with a valid implementation and target
* @throws IllegalStateException if the method is static
* @throws IllegalArgumentException if the receiver's class is incompatible
* @throws NoSuchFieldException if no implementation was found
*/
public <T> BoundField<T> buildChecked(Object target) throws NoSuchFieldException {
return this.<T>buildChecked().bind(target);
}
/**
* Returns the first valid implementation as a BoundMethod or throws a
* RuntimeException if there is none.
*
* @param target an Object on which to get and set the field
* @param <T> Java class stored in the field
* @return a {@link BoundField} with a valid implementation and target
* @throws IllegalStateException if the method is static
* @throws IllegalArgumentException if the receiver's class is incompatible
* @throws RuntimeException if no implementation was found
*/
public <T> BoundField<T> build(Object target) {
return this.<T>build().bind(target);
}
/**
* Returns the first valid implementation as a StaticField or throws a
* NoSuchFieldException if there is none.
*
* @param <T> Java class stored in the field
* @return a {@link StaticField} with a valid implementation
* @throws IllegalStateException if the method is not static
* @throws NoSuchFieldException if no implementation was found
*/
public <T> StaticField<T> buildStaticChecked() throws NoSuchFieldException {
return this.<T>buildChecked().asStatic();
}
/**
* Returns the first valid implementation as a StaticField or throws a
* RuntimeException if there is none.
*
* @param <T> Java class stored in the field
* @return a {@link StaticField} with a valid implementation
* @throws IllegalStateException if the method is not static
* @throws RuntimeException if no implementation was found
*/
public <T> StaticField<T> buildStatic() {
return this.<T>build().asStatic();
}
}
private static class MakeFieldAccessible implements PrivilegedAction<Void> {
private Field hidden;
public MakeFieldAccessible(Field hidden) {
this.hidden = hidden;
}
@Override
public Void run() {
hidden.setAccessible(true);
return null;
}
}
}
| 2,014 |
0 | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg/common/DynMethods.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg.common;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Arrays;
/**
* Copied from parquet-common
*/
public class DynMethods {
/**
* Convenience wrapper class around {@link java.lang.reflect.Method}.
*
* Allows callers to invoke the wrapped method with all Exceptions wrapped by
* RuntimeException, or with a single Exception catch block.
*/
public static class UnboundMethod {
private final Method method;
private final String name;
private final int argLength;
UnboundMethod(Method method, String name) {
this.method = method;
this.name = name;
this.argLength = (method == null || method.isVarArgs()) ? -1 :
method.getParameterTypes().length;
}
@SuppressWarnings("unchecked")
public <R> R invokeChecked(Object target, Object... args) throws Exception {
try {
if (argLength < 0) {
return (R) method.invoke(target, args);
} else {
return (R) method.invoke(target, Arrays.copyOfRange(args, 0, argLength));
}
} catch (InvocationTargetException e) {
Throwables.propagateIfInstanceOf(e.getCause(), Exception.class);
Throwables.propagateIfInstanceOf(e.getCause(), RuntimeException.class);
throw Throwables.propagate(e.getCause());
}
}
public <R> R invoke(Object target, Object... args) {
try {
return this.invokeChecked(target, args);
} catch (Exception e) {
Throwables.propagateIfInstanceOf(e, RuntimeException.class);
throw Throwables.propagate(e);
}
}
/**
* Returns this method as a BoundMethod for the given receiver.
*
* @param receiver an Object to receive the method invocation
* @return a {@link BoundMethod} for this method and the receiver
* @throws IllegalStateException if the method is static
* @throws IllegalArgumentException if the receiver's class is incompatible
*/
public BoundMethod bind(Object receiver) {
Preconditions.checkState(!isStatic(),
"Cannot bind static method " + method.toGenericString());
Preconditions.checkArgument(
method.getDeclaringClass().isAssignableFrom(receiver.getClass()),
"Cannot bind " + method.toGenericString() + " to instance of " +
receiver.getClass());
return new BoundMethod(this, receiver);
}
/**
* @return whether the method is a static method
*/
public boolean isStatic() {
return Modifier.isStatic(method.getModifiers());
}
/**
* @return whether the method is a noop
*/
public boolean isNoop() {
return this == NOOP;
}
/**
* Returns this method as a StaticMethod.
*
* @return a {@link StaticMethod} for this method
* @throws IllegalStateException if the method is not static
*/
public StaticMethod asStatic() {
Preconditions.checkState(isStatic(), "Method is not static");
return new StaticMethod(this);
}
public String toString() {
return "DynMethods.UnboundMethod(name=" + name +" method=" +
method.toGenericString() + ")";
}
/**
* Singleton {@link UnboundMethod}, performs no operation and returns null.
*/
private static UnboundMethod NOOP = new UnboundMethod(null, "NOOP") {
@Override
public <R> R invokeChecked(Object target, Object... args) throws Exception {
return null;
}
@Override
public BoundMethod bind(Object receiver) {
return new BoundMethod(this, receiver);
}
@Override
public StaticMethod asStatic() {
return new StaticMethod(this);
}
@Override
public boolean isStatic() {
return true;
}
@Override
public String toString() {
return "DynMethods.UnboundMethod(NOOP)";
}
};
}
public static class BoundMethod {
private final UnboundMethod method;
private final Object receiver;
private BoundMethod(UnboundMethod method, Object receiver) {
this.method = method;
this.receiver = receiver;
}
public <R> R invokeChecked(Object... args) throws Exception {
return method.invokeChecked(receiver, args);
}
public <R> R invoke(Object... args) {
return method.invoke(receiver, args);
}
}
public static class StaticMethod {
private final UnboundMethod method;
private StaticMethod(UnboundMethod method) {
this.method = method;
}
public <R> R invokeChecked(Object... args) throws Exception {
return method.invokeChecked(null, args);
}
public <R> R invoke(Object... args) {
return method.invoke(null, args);
}
}
/**
* Constructs a new builder for calling methods dynamically.
*
* @param methodName name of the method the builder will locate
* @return a Builder for finding a method
*/
public static Builder builder(String methodName) {
return new Builder(methodName);
}
public static class Builder {
private final String name;
private ClassLoader loader = Thread.currentThread().getContextClassLoader();
private UnboundMethod method = null;
public Builder(String methodName) {
this.name = methodName;
}
/**
* Set the {@link ClassLoader} used to lookup classes by name.
* <p>
* If not set, the current thread's ClassLoader is used.
*
* @param loader a ClassLoader
* @return this Builder for method chaining
*/
public Builder loader(ClassLoader loader) {
this.loader = loader;
return this;
}
/**
* If no implementation has been found, adds a NOOP method.
*
* Note: calls to impl will not match after this method is called!
*
* @return this Builder for method chaining
*/
public Builder orNoop() {
if (method == null) {
this.method = UnboundMethod.NOOP;
}
return this;
}
/**
* Checks for an implementation, first finding the given class by name.
*
* @param className name of a class
* @param methodName name of a method (different from constructor)
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder impl(String className, String methodName, Class<?>... argClasses) {
// don't do any work if an implementation has been found
if (method != null) {
return this;
}
try {
Class<?> targetClass = Class.forName(className, true, loader);
impl(targetClass, methodName, argClasses);
} catch (ClassNotFoundException e) {
// not the right implementation
}
return this;
}
/**
* Checks for an implementation, first finding the given class by name.
*
* The name passed to the constructor is the method name used.
*
* @param className name of a class
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder impl(String className, Class<?>... argClasses) {
impl(className, name, argClasses);
return this;
}
/**
* Checks for a method implementation.
*
* @param targetClass a class instance
* @param methodName name of a method (different from constructor)
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder impl(Class<?> targetClass, String methodName, Class<?>... argClasses) {
// don't do any work if an implementation has been found
if (method != null) {
return this;
}
try {
this.method = new UnboundMethod(
targetClass.getMethod(methodName, argClasses), name);
} catch (NoSuchMethodException e) {
// not the right implementation
}
return this;
}
/**
* Checks for a method implementation.
*
* The name passed to the constructor is the method name used.
*
* @param targetClass a class instance
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder impl(Class<?> targetClass, Class<?>... argClasses) {
impl(targetClass, name, argClasses);
return this;
}
public Builder ctorImpl(Class<?> targetClass, Class<?>... argClasses) {
// don't do any work if an implementation has been found
if (method != null) {
return this;
}
try {
this.method = new DynConstructors.Builder()
.impl(targetClass, argClasses)
.buildChecked();
} catch (NoSuchMethodException e) {
// not the right implementation
}
return this;
}
public Builder ctorImpl(String className, Class<?>... argClasses) {
// don't do any work if an implementation has been found
if (method != null) {
return this;
}
try {
this.method = new DynConstructors.Builder()
.impl(className, argClasses)
.buildChecked();
} catch (NoSuchMethodException e) {
// not the right implementation
}
return this;
}
/**
* Checks for an implementation, first finding the given class by name.
*
* @param className name of a class
* @param methodName name of a method (different from constructor)
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder hiddenImpl(String className, String methodName, Class<?>... argClasses) {
// don't do any work if an implementation has been found
if (method != null) {
return this;
}
try {
Class<?> targetClass = Class.forName(className, true, loader);
hiddenImpl(targetClass, methodName, argClasses);
} catch (ClassNotFoundException e) {
// not the right implementation
}
return this;
}
/**
* Checks for an implementation, first finding the given class by name.
*
* The name passed to the constructor is the method name used.
*
* @param className name of a class
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder hiddenImpl(String className, Class<?>... argClasses) {
hiddenImpl(className, name, argClasses);
return this;
}
/**
* Checks for a method implementation.
*
* @param targetClass a class instance
* @param methodName name of a method (different from constructor)
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder hiddenImpl(Class<?> targetClass, String methodName, Class<?>... argClasses) {
// don't do any work if an implementation has been found
if (method != null) {
return this;
}
try {
Method hidden = targetClass.getDeclaredMethod(methodName, argClasses);
AccessController.doPrivileged(new MakeAccessible(hidden));
this.method = new UnboundMethod(hidden, name);
} catch (SecurityException | NoSuchMethodException e) {
// unusable or not the right implementation
}
return this;
}
/**
* Checks for a method implementation.
*
* The name passed to the constructor is the method name used.
*
* @param targetClass a class instance
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder hiddenImpl(Class<?> targetClass, Class<?>... argClasses) {
hiddenImpl(targetClass, name, argClasses);
return this;
}
/**
* Returns the first valid implementation as a UnboundMethod or throws a
* NoSuchMethodException if there is none.
*
* @return a {@link UnboundMethod} with a valid implementation
* @throws NoSuchMethodException if no implementation was found
*/
public UnboundMethod buildChecked() throws NoSuchMethodException {
if (method != null) {
return method;
} else {
throw new NoSuchMethodException("Cannot find method: " + name);
}
}
/**
* Returns the first valid implementation as a UnboundMethod or throws a
* RuntimeError if there is none.
*
* @return a {@link UnboundMethod} with a valid implementation
* @throws RuntimeException if no implementation was found
*/
public UnboundMethod build() {
if (method != null) {
return method;
} else {
throw new RuntimeException("Cannot find method: " + name);
}
}
/**
* Returns the first valid implementation as a BoundMethod or throws a
* NoSuchMethodException if there is none.
*
* @param receiver an Object to receive the method invocation
* @return a {@link BoundMethod} with a valid implementation and receiver
* @throws IllegalStateException if the method is static
* @throws IllegalArgumentException if the receiver's class is incompatible
* @throws NoSuchMethodException if no implementation was found
*/
public BoundMethod buildChecked(Object receiver) throws NoSuchMethodException {
return buildChecked().bind(receiver);
}
/**
* Returns the first valid implementation as a BoundMethod or throws a
* RuntimeError if there is none.
*
* @param receiver an Object to receive the method invocation
* @return a {@link BoundMethod} with a valid implementation and receiver
* @throws IllegalStateException if the method is static
* @throws IllegalArgumentException if the receiver's class is incompatible
* @throws RuntimeException if no implementation was found
*/
public BoundMethod build(Object receiver) {
return build().bind(receiver);
}
/**
* Returns the first valid implementation as a StaticMethod or throws a
* NoSuchMethodException if there is none.
*
* @return a {@link StaticMethod} with a valid implementation
* @throws IllegalStateException if the method is not static
* @throws NoSuchMethodException if no implementation was found
*/
public StaticMethod buildStaticChecked() throws NoSuchMethodException {
return buildChecked().asStatic();
}
/**
* Returns the first valid implementation as a StaticMethod or throws a
* RuntimeException if there is none.
*
* @return a {@link StaticMethod} with a valid implementation
* @throws IllegalStateException if the method is not static
* @throws RuntimeException if no implementation was found
*/
public StaticMethod buildStatic() {
return build().asStatic();
}
}
private static class MakeAccessible implements PrivilegedAction<Void> {
private Method hidden;
public MakeAccessible(Method hidden) {
this.hidden = hidden;
}
@Override
public Void run() {
hidden.setAccessible(true);
return null;
}
}
}
| 2,015 |
0 | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg/common/DynClasses.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.common;
import com.google.common.base.Joiner;
import java.util.LinkedHashSet;
import java.util.Set;
public class DynClasses {
public static Builder builder() {
return new Builder();
}
public static class Builder {
private ClassLoader loader = Thread.currentThread().getContextClassLoader();
private Class<?> foundClass = null;
private boolean nullOk = false;
private Set<String> classNames = new LinkedHashSet<>();
/**
* Set the {@link ClassLoader} used to lookup classes by name.
* <p>
* If not set, the current thread's ClassLoader is used.
*
* @param loader a ClassLoader
* @return this Builder for method chaining
*/
public Builder loader(ClassLoader loader) {
this.loader = loader;
return this;
}
/**
* Checks for an implementation of the class by name.
*
* @param className name of a class
* @return this Builder for method chaining
*/
public Builder impl(String className) {
classNames.add(className);
if (foundClass != null) {
return this;
}
try {
this.foundClass = Class.forName(className, true, loader);
} catch (ClassNotFoundException e) {
// not the right implementation
}
return this;
}
/**
* Instructs this builder to return null if no class is found, rather than
* throwing an Exception.
*
* @return this Builder for method chaining
*/
public Builder orNull() {
this.nullOk = true;
return this;
}
/**
* Returns the first implementation or throws ClassNotFoundException if
* one was not found.
*
* @param <S> Java superclass
* @return a {@link Class} for the first implementation found
* @throws ClassNotFoundException if no implementation was found
*/
@SuppressWarnings("unchecked")
public <S> Class<? extends S> buildChecked() throws ClassNotFoundException {
if (!nullOk && foundClass == null) {
throw new ClassNotFoundException("Cannot find class; alternatives: " +
Joiner.on(", ").join(classNames));
}
return (Class<? extends S>) foundClass;
}
/**
* Returns the first implementation or throws RuntimeException if one was
* not found.
*
* @param <S> Java superclass
* @return a {@link Class} for the first implementation found
* @throws RuntimeException if no implementation was found
*/
@SuppressWarnings("unchecked")
public <S> Class<? extends S> build() {
if (!nullOk && foundClass == null) {
throw new RuntimeException("Cannot find class; alternatives: " +
Joiner.on(", ").join(classNames));
}
return (Class<? extends S>) foundClass;
}
}
}
| 2,016 |
0 | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg/common/DynConstructors.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg.common;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
/**
* Copied from parquet-common
*/
public class DynConstructors {
public static class Ctor<C> extends DynMethods.UnboundMethod {
private final Constructor<C> ctor;
private final Class<? extends C> constructed;
private Ctor(Constructor<C> constructor, Class<? extends C> constructed) {
super(null, "newInstance");
this.ctor = constructor;
this.constructed = constructed;
}
public Class<? extends C> getConstructedClass() {
return constructed;
}
public C newInstanceChecked(Object... args) throws Exception {
try {
if (args.length > ctor.getParameterCount()) {
return ctor.newInstance(Arrays.copyOfRange(args, 0, ctor.getParameterCount()));
} else {
return ctor.newInstance(args);
}
} catch (InstantiationException | IllegalAccessException e) {
throw e;
} catch (InvocationTargetException e) {
Throwables.propagateIfInstanceOf(e.getCause(), Exception.class);
Throwables.propagateIfInstanceOf(e.getCause(), RuntimeException.class);
throw Throwables.propagate(e.getCause());
}
}
public C newInstance(Object... args) {
try {
return newInstanceChecked(args);
} catch (Exception e) {
Throwables.propagateIfInstanceOf(e, RuntimeException.class);
throw Throwables.propagate(e);
}
}
@Override
@SuppressWarnings("unchecked")
public <R> R invoke(Object target, Object... args) {
Preconditions.checkArgument(target == null,
"Invalid call to constructor: target must be null");
return (R) newInstance(args);
}
@Override
@SuppressWarnings("unchecked")
public <R> R invokeChecked(Object target, Object... args) throws Exception {
Preconditions.checkArgument(target == null,
"Invalid call to constructor: target must be null");
return (R) newInstanceChecked(args);
}
@Override
public DynMethods.BoundMethod bind(Object receiver) {
throw new IllegalStateException("Cannot bind constructors");
}
@Override
public boolean isStatic() {
return true;
}
@Override
public String toString() {
return getClass().getSimpleName() +
"(constructor=" + ctor + ", class=" + constructed + ")";
}
}
public static Builder builder() {
return new Builder();
}
public static Builder builder(Class<?> baseClass) {
return new Builder(baseClass);
}
public static class Builder {
private final Class<?> baseClass;
private ClassLoader loader = Thread.currentThread().getContextClassLoader();
private Ctor ctor = null;
private Map<String, Throwable> problems = new HashMap<String, Throwable>();
public Builder(Class<?> baseClass) {
this.baseClass = baseClass;
}
public Builder() {
this.baseClass = null;
}
/**
* Set the {@link ClassLoader} used to lookup classes by name.
* <p>
* If not set, the current thread's ClassLoader is used.
*
* @param loader a ClassLoader
* @return this Builder for method chaining
*/
public Builder loader(ClassLoader loader) {
this.loader = loader;
return this;
}
public Builder impl(String className, Class<?>... types) {
// don't do any work if an implementation has been found
if (ctor != null) {
return this;
}
try {
Class<?> targetClass = Class.forName(className, true, loader);
impl(targetClass, types);
} catch (NoClassDefFoundError | ClassNotFoundException e) {
// cannot load this implementation
problems.put(className, e);
}
return this;
}
public <T> Builder impl(Class<T> targetClass, Class<?>... types) {
// don't do any work if an implementation has been found
if (ctor != null) {
return this;
}
try {
ctor = new Ctor<T>(targetClass.getConstructor(types), targetClass);
} catch (NoSuchMethodException e) {
// not the right implementation
problems.put(methodName(targetClass, types), e);
}
return this;
}
public Builder hiddenImpl(Class<?>... types) {
hiddenImpl(baseClass, types);
return this;
}
@SuppressWarnings("unchecked")
public Builder hiddenImpl(String className, Class<?>... types) {
// don't do any work if an implementation has been found
if (ctor != null) {
return this;
}
try {
Class targetClass = Class.forName(className, true, loader);
hiddenImpl(targetClass, types);
} catch (NoClassDefFoundError | ClassNotFoundException e) {
// cannot load this implementation
problems.put(className, e);
}
return this;
}
public <T> Builder hiddenImpl(Class<T> targetClass, Class<?>... types) {
// don't do any work if an implementation has been found
if (ctor != null) {
return this;
}
try {
Constructor<T> hidden = targetClass.getDeclaredConstructor(types);
AccessController.doPrivileged(new MakeAccessible(hidden));
ctor = new Ctor<T>(hidden, targetClass);
} catch (SecurityException e) {
// unusable
problems.put(methodName(targetClass, types), e);
} catch (NoSuchMethodException e) {
// not the right implementation
problems.put(methodName(targetClass, types), e);
}
return this;
}
@SuppressWarnings("unchecked")
public <C> Ctor<C> buildChecked() throws NoSuchMethodException {
if (ctor != null) {
return ctor;
}
throw new NoSuchMethodException("Cannot find constructor for " +
baseClass + "\n" + formatProblems(problems));
}
@SuppressWarnings("unchecked")
public <C> Ctor<C> build() {
if (ctor != null) {
return ctor;
}
throw new RuntimeException("Cannot find constructor for " +
baseClass + "\n" + formatProblems(problems));
}
}
private static class MakeAccessible implements PrivilegedAction<Void> {
private Constructor<?> hidden;
public MakeAccessible(Constructor<?> hidden) {
this.hidden = hidden;
}
@Override
public Void run() {
hidden.setAccessible(true);
return null;
}
}
private static String formatProblems(Map<String, Throwable> problems) {
StringBuilder sb = new StringBuilder();
boolean first = true;
for (Map.Entry<String, Throwable> problem : problems.entrySet()) {
if (first) {
first = false;
} else {
sb.append("\n");
}
sb.append("\tMissing ").append(problem.getKey()).append(" [")
.append(problem.getValue().getClass().getName()).append(": ")
.append(problem.getValue().getMessage()).append("]");
}
return sb.toString();
}
private static String methodName(Class<?> targetClass, Class<?>... types) {
StringBuilder sb = new StringBuilder();
sb.append(targetClass.getName()).append("(");
boolean first = true;
for (Class<?> type : types) {
if (first) {
first = false;
} else {
sb.append(",");
}
sb.append(type.getName());
}
sb.append(")");
return sb.toString();
}
}
| 2,017 |
0 | Create_ds/iceberg/hive/src/test/java/com/netflix/iceberg | Create_ds/iceberg/hive/src/test/java/com/netflix/iceberg/hive/HiveTableBaseTest.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hive;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.TableMetadataParser;
import com.netflix.iceberg.types.Types;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IHMSHandler;
import org.apache.hadoop.hive.metastore.RetryingHMSHandler;
import org.apache.hadoop.hive.metastore.TServerSocketKeepAlive;
import org.apache.hadoop.hive.metastore.TSetIpAddressProcessor;
import org.apache.hadoop.hive.metastore.api.Catalog;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.server.TServer;
import org.apache.thrift.server.TThreadPoolServer;
import org.apache.thrift.transport.TServerSocket;
import org.apache.thrift.transport.TTransportException;
import org.apache.thrift.transport.TTransportFactory;
import org.junit.After;
import org.junit.Before;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.lang.reflect.InvocationTargetException;
import java.net.URL;
import java.nio.file.Paths;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.stream.Collectors;
import static com.netflix.iceberg.PartitionSpec.builderFor;
import static com.netflix.iceberg.TableMetadataParser.getFileExtension;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
import static java.nio.file.Files.createTempDirectory;
import static java.nio.file.attribute.PosixFilePermissions.asFileAttribute;
import static java.nio.file.attribute.PosixFilePermissions.fromString;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.AUTO_CREATE_ALL;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.COMPACTOR_WORKER_THREADS;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.CONNECT_URL_KEY;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_SUPPORT_CONCURRENCY;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_TXN_MANAGER;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.SCHEMA_VERIFICATION;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.THRIFT_URIS;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.WAREHOUSE;
class HiveTableBaseTest {
static final String DB_NAME = "hivedb";
static final String TABLE_NAME = "tbl";
static final Schema schema = new Schema(Types.StructType.of(
required(1, "id", Types.LongType.get())).fields());
static final Schema altered = new Schema(Types.StructType.of(
required(1, "id", Types.LongType.get()),
optional(2, "data", Types.LongType.get())).fields());
private static final PartitionSpec partitionSpec = builderFor(schema).identity("id").build();
Configuration hiveConf;
HiveMetaStoreClient metastoreClient;
private File hiveLocalDir;
private ExecutorService executorService;
private TServer server;
@Before
public void setup() throws IOException,
TException,
InvocationTargetException,
NoSuchMethodException,
IllegalAccessException,
NoSuchFieldException, SQLException {
this.executorService = Executors.newSingleThreadExecutor();
hiveLocalDir = createTempDirectory("hive", asFileAttribute(fromString("rwxrwxrwx"))).toFile();
setupDB("jdbc:derby:" + getDerbyPath() + ";create=true");
this.server = thriftServer();
executorService.submit(() -> server.serve());
this.metastoreClient = new HiveMetaStoreClient(this.hiveConf);
createIfNotExistsCatalog("hive");
this.metastoreClient.createDatabase(new Database(DB_NAME, "description", getDBPath(), new HashMap<>()));
new HiveTables(this.hiveConf).create(schema, partitionSpec, DB_NAME, TABLE_NAME);
}
@After
public void cleanup() {
if (server != null) {
server.stop();
}
executorService.shutdown();
if (hiveLocalDir != null) {
hiveLocalDir.delete();
}
}
private HiveConf hiveConf(Configuration conf, int port) throws IOException {
File derbyLogFile = new File(hiveLocalDir, "derby.log");
derbyLogFile.createNewFile();
System.setProperty("derby.stream.error.file", derbyLogFile.getPath());
final HiveConf hiveConf = new HiveConf(conf, this.getClass());
// Setting AUTO_CREATE_ALL in hadoop config somehow still reverts to false.
hiveConf.set(SCHEMA_VERIFICATION.getVarname(), "false");
hiveConf.set(THRIFT_URIS.getVarname(), "thrift://localhost:" + port);
hiveConf.set(WAREHOUSE.getVarname(), "file:" + hiveLocalDir.getAbsolutePath());
hiveConf.set(WAREHOUSE.getHiveName(), "file:" + hiveLocalDir.getAbsolutePath());
hiveConf.set(CONNECT_URL_KEY.getVarname(), "jdbc:derby:" + getDerbyPath() + ";create=true");
hiveConf.set(AUTO_CREATE_ALL.getVarname(), "true");
hiveConf.set(HIVE_TXN_MANAGER.getVarname(), "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager");
hiveConf.set(COMPACTOR_INITIATOR_ON.getVarname(), "true");
hiveConf.set(COMPACTOR_WORKER_THREADS.getVarname(), "1");
hiveConf.set(HIVE_SUPPORT_CONCURRENCY.getVarname(), "true");
return hiveConf;
}
private String getDerbyPath() {
final File metastore_db = new File(hiveLocalDir, "metastore_db");
return metastore_db.getPath();
}
private TServer thriftServer() throws IOException,
TTransportException,
MetaException,
InvocationTargetException,
NoSuchMethodException,
IllegalAccessException,
NoSuchFieldException {
final TServerSocketKeepAlive socket = new TServerSocketKeepAlive(new TServerSocket(0));
this.hiveConf = hiveConf(new Configuration(), socket.getServerSocket().getLocalPort());
HiveMetaStore.HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", hiveConf);
IHMSHandler handler = RetryingHMSHandler.getProxy(hiveConf, baseHandler, true);
final TTransportFactory transportFactory = new TTransportFactory();
final TSetIpAddressProcessor<IHMSHandler> processor = new TSetIpAddressProcessor<>(handler);
TThreadPoolServer.Args args = new TThreadPoolServer.Args(socket)
.processor(processor)
.transportFactory(transportFactory)
.protocolFactory(new TBinaryProtocol.Factory())
.minWorkerThreads(3)
.maxWorkerThreads(5);
return new TThreadPoolServer(args);
}
private void setupDB(String dbURL) throws SQLException, IOException {
Connection connection = DriverManager.getConnection(dbURL);
ScriptRunner scriptRunner = new ScriptRunner(connection, true, true);
URL hiveSqlScript = getClass().getClassLoader().getResource("hive-schema-3.1.0.derby.sql");
Reader reader = new BufferedReader(new FileReader(new File(hiveSqlScript.getFile())));
scriptRunner.runScript(reader);
}
private void createIfNotExistsCatalog(String catalogName) throws TException {
try {
metastoreClient.getCatalog(catalogName);
} catch(NoSuchObjectException e) {
String catalogPath = Paths.get(hiveLocalDir.getAbsolutePath(), catalogName + ".catalog").toString();
metastoreClient.createCatalog(new Catalog(catalogName, catalogPath));
}
}
private String getDBPath() {
return Paths.get(hiveLocalDir.getAbsolutePath(), DB_NAME + ".db").toAbsolutePath().toString();
}
String getTableBasePath(String tableName) {
return Paths.get(getDBPath(), tableName).toAbsolutePath().toString();
}
String getTableLocation(String tableName) {
return new Path("file", null, Paths.get(getTableBasePath(tableName), "empty").toString()).toString();
}
String metadataLocation(String tableName) {
return Paths.get(getTableBasePath(tableName), "metadata").toString();
}
private List<String> metadataFiles(String tableName) {
return Arrays.stream(new File(metadataLocation(tableName)).listFiles())
.map(File::getAbsolutePath)
.collect(Collectors.toList());
}
List<String> metadataVersionFiles(String tableName) {
return filterByExtension(tableName, getFileExtension(hiveConf));
}
List<String> manifestFiles(String tableName) {
return filterByExtension(tableName, ".avro");
}
private List<String> filterByExtension(String tableName, String extension) {
return metadataFiles(tableName)
.stream()
.filter(f -> f.endsWith(extension))
.collect(Collectors.toList());
}
}
| 2,018 |
0 | Create_ds/iceberg/hive/src/test/java/com/netflix/iceberg | Create_ds/iceberg/hive/src/test/java/com/netflix/iceberg/hive/HiveTablesTest.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hive;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.types.Types;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static com.netflix.iceberg.BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE;
import static com.netflix.iceberg.BaseMetastoreTableOperations.METADATA_LOCATION_PROP;
import static com.netflix.iceberg.BaseMetastoreTableOperations.TABLE_TYPE_PROP;
public class HiveTablesTest extends HiveTableBaseTest {
@Test
public void testCreate() throws TException {
// Table should be created in hive metastore
final Table table = metastoreClient.getTable(DB_NAME, TABLE_NAME);
// check parameters are in expected state
final Map<String, String> parameters = table.getParameters();
Assert.assertNotNull(parameters);
Assert.assertTrue(ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(parameters.get(TABLE_TYPE_PROP)));
Assert.assertTrue(ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(table.getTableType()));
// Ensure the table is pointing to empty location
Assert.assertEquals(getTableLocation(TABLE_NAME) , table.getSd().getLocation());
// Ensure it is stored as unpartitioned table in hive.
Assert.assertEquals(0 , table.getPartitionKeysSize());
// Only 1 snapshotFile Should exist and no manifests should exist
Assert.assertEquals(1, metadataVersionFiles(TABLE_NAME).size());
Assert.assertEquals(0, manifestFiles(TABLE_NAME).size());
final com.netflix.iceberg.Table icebergTable = new HiveTables(hiveConf).load(DB_NAME, TABLE_NAME);
// Iceberg schema should match the loaded table
Assert.assertEquals(schema.asStruct(), icebergTable.schema().asStruct());
}
@Test
public void testExistingTableUpdate() throws TException {
com.netflix.iceberg.Table icebergTable = new HiveTables(hiveConf).load(DB_NAME, TABLE_NAME);
// add a column
icebergTable.updateSchema().addColumn("data", Types.LongType.get()).commit();
icebergTable = new HiveTables(hiveConf).load(DB_NAME, TABLE_NAME);
// Only 2 snapshotFile Should exist and no manifests should exist
Assert.assertEquals(2, metadataVersionFiles(TABLE_NAME).size());
Assert.assertEquals(0, manifestFiles(TABLE_NAME).size());
Assert.assertEquals(altered.asStruct(), icebergTable.schema().asStruct());
final Table table = metastoreClient.getTable(DB_NAME, TABLE_NAME);
final List<String> hiveColumns = table.getSd().getCols().stream().map(f -> f.getName()).collect(Collectors.toList());
final List<String> icebergColumns = altered.columns().stream().map(f -> f.name()).collect(Collectors.toList());
Assert.assertEquals(icebergColumns, hiveColumns);
}
@Test(expected = CommitFailedException.class)
public void testFailure() throws TException {
com.netflix.iceberg.Table icebergTable = new HiveTables(hiveConf).load(DB_NAME, TABLE_NAME);
final Table table = metastoreClient.getTable(DB_NAME, TABLE_NAME);
final String dummyLocation = "dummylocation";
table.getParameters().put(METADATA_LOCATION_PROP, dummyLocation);
metastoreClient.alter_table(DB_NAME, TABLE_NAME, table);
icebergTable.updateSchema()
.addColumn("data", Types.LongType.get())
.commit();
}
}
| 2,019 |
0 | Create_ds/iceberg/hive/src/test/java/com/netflix/iceberg | Create_ds/iceberg/hive/src/test/java/com/netflix/iceberg/hive/ScriptRunner.java | /*
*
* Copyright 2004 Clinton Begin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Slightly modified version of the com.ibatis.common.jdbc.ScriptRunner class
* from the iBATIS Apache project. Only removed dependency on Resource class
* and a constructor.
*/
package com.netflix.iceberg.hive;
import java.io.IOException;
import java.io.LineNumberReader;
import java.io.PrintWriter;
import java.io.Reader;
import java.sql.Connection;
import java.sql.Statement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.ResultSetMetaData;
/**
* Tool to run database scripts
*/
public class ScriptRunner {
private static final String DEFAULT_DELIMITER = ";";
private Connection connection;
private boolean stopOnError;
private boolean autoCommit;
private PrintWriter logWriter = new PrintWriter(System.out);
private PrintWriter errorLogWriter = new PrintWriter(System.err);
private String delimiter = DEFAULT_DELIMITER;
private boolean fullLineDelimiter = false;
/**
* Default constructor
*/
public ScriptRunner(Connection connection, boolean autoCommit,
boolean stopOnError) {
this.connection = connection;
this.autoCommit = autoCommit;
this.stopOnError = stopOnError;
}
public void setDelimiter(String delimiter, boolean fullLineDelimiter) {
this.delimiter = delimiter;
this.fullLineDelimiter = fullLineDelimiter;
}
/**
* Setter for logWriter property
*
* @param logWriter
* - the new value of the logWriter property
*/
public void setLogWriter(PrintWriter logWriter) {
this.logWriter = logWriter;
}
/**
* Setter for errorLogWriter property
*
* @param errorLogWriter
* - the new value of the errorLogWriter property
*/
public void setErrorLogWriter(PrintWriter errorLogWriter) {
this.errorLogWriter = errorLogWriter;
}
/**
* Runs an SQL script (read in using the Reader parameter)
*
* @param reader
* - the source of the script
*/
public void runScript(Reader reader) throws IOException, SQLException {
try {
boolean originalAutoCommit = connection.getAutoCommit();
try {
if (originalAutoCommit != this.autoCommit) {
connection.setAutoCommit(this.autoCommit);
}
runScript(connection, reader);
} finally {
connection.setAutoCommit(originalAutoCommit);
}
} catch (IOException e) {
throw e;
} catch (SQLException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException("Error running script. Cause: " + e, e);
}
}
/**
* Runs an SQL script (read in using the Reader parameter) using the
* connection passed in
*
* @param conn
* - the connection to use for the script
* @param reader
* - the source of the script
* @throws SQLException
* if any SQL errors occur
* @throws IOException
* if there is an error reading from the Reader
*/
private void runScript(Connection conn, Reader reader) throws IOException,
SQLException {
StringBuffer command = null;
try {
LineNumberReader lineReader = new LineNumberReader(reader);
String line = null;
while ((line = lineReader.readLine()) != null) {
if (command == null) {
command = new StringBuffer();
}
String trimmedLine = line.trim();
if (trimmedLine.startsWith("--")) {
println(trimmedLine);
} else if (trimmedLine.length() < 1
|| trimmedLine.startsWith("//")) {
// Do nothing
} else if (trimmedLine.length() < 1
|| trimmedLine.startsWith("--")) {
// Do nothing
} else if (!fullLineDelimiter
&& trimmedLine.endsWith(getDelimiter())
|| fullLineDelimiter
&& trimmedLine.equals(getDelimiter())) {
command.append(line.substring(0, line
.lastIndexOf(getDelimiter())));
command.append(" ");
Statement statement = conn.createStatement();
println(command);
boolean hasResults = false;
if (stopOnError) {
hasResults = statement.execute(command.toString());
} else {
try {
statement.execute(command.toString());
} catch (SQLException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
}
}
if (autoCommit && !conn.getAutoCommit()) {
conn.commit();
}
ResultSet rs = statement.getResultSet();
if (hasResults && rs != null) {
ResultSetMetaData md = rs.getMetaData();
int cols = md.getColumnCount();
for (int i = 0; i < cols; i++) {
String name = md.getColumnLabel(i);
print(name + "\t");
}
println("");
while (rs.next()) {
for (int i = 0; i < cols; i++) {
String value = rs.getString(i);
print(value + "\t");
}
println("");
}
}
command = null;
try {
statement.close();
} catch (Exception e) {
// Ignore to workaround a bug in Jakarta DBCP
}
Thread.yield();
} else {
command.append(line);
command.append(" ");
}
}
if (!autoCommit) {
conn.commit();
}
} catch (SQLException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
throw e;
} catch (IOException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
throw e;
} finally {
conn.rollback();
flush();
}
}
private String getDelimiter() {
return delimiter;
}
private void print(Object o) {
if (logWriter != null) {
System.out.print(o);
}
}
private void println(Object o) {
if (logWriter != null) {
logWriter.println(o);
}
}
private void printlnError(Object o) {
if (errorLogWriter != null) {
errorLogWriter.println(o);
}
}
private void flush() {
if (logWriter != null) {
logWriter.flush();
}
if (errorLogWriter != null) {
errorLogWriter.flush();
}
}
}
| 2,020 |
0 | Create_ds/iceberg/hive/src/main/java/com/netflix/iceberg | Create_ds/iceberg/hive/src/main/java/com/netflix/iceberg/hive/HiveTables.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hive;
import com.google.common.base.Splitter;
import com.netflix.iceberg.BaseMetastoreTableOperations;
import com.netflix.iceberg.BaseMetastoreTables;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.Table;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import java.net.URI;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.CLIENT_SOCKET_TIMEOUT;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.THRIFT_URIS;
public class HiveTables extends BaseMetastoreTables {
private static final Splitter DOT = Splitter.on('.').limit(2);
private Configuration conf;
public HiveTables(Configuration conf) {
super(conf);
this.conf = conf;
}
@Override
public Table create(Schema schema, String tableIdentifier) {
return create(schema, PartitionSpec.unpartitioned(), tableIdentifier);
}
@Override
public Table create(Schema schema, PartitionSpec spec, Map<String, String> properties, String tableIdentifier) {
List<String> parts = DOT.splitToList(tableIdentifier);
if (parts.size() == 2) {
return create(schema, spec, properties, parts.get(0), parts.get(1));
}
throw new UnsupportedOperationException("Could not parse table identifier: " + tableIdentifier);
}
@Override
public Table load(String tableIdentifier) {
List<String> parts = DOT.splitToList(tableIdentifier);
if (parts.size() == 2) {
return load(parts.get(0), parts.get(1));
}
throw new UnsupportedOperationException("Could not parse table identifier: " + tableIdentifier);
}
@Override
public BaseMetastoreTableOperations newTableOps(Configuration conf, String database, String table) {
return new HiveTableOperations(conf, getClient(), database, table);
}
private ThriftHiveMetastore.Client getClient() {
final URI metastoreUri = URI.create(MetastoreConf.getAsString(conf, THRIFT_URIS));
final int socketTimeOut = (int) MetastoreConf.getTimeVar(conf, CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
TTransport transport = new TSocket(metastoreUri.getHost(), metastoreUri.getPort(), socketTimeOut);
try {
transport.open();
} catch (TTransportException e) {
throw new RuntimeException("failed to open socket for " + metastoreUri + " with timeoutMillis " + socketTimeOut);
}
return new ThriftHiveMetastore.Client(new TBinaryProtocol(transport));
}
}
| 2,021 |
0 | Create_ds/iceberg/hive/src/main/java/com/netflix/iceberg | Create_ds/iceberg/hive/src/main/java/com/netflix/iceberg/hive/HiveTableOperations.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hive;
import com.google.common.collect.Lists;
import com.netflix.iceberg.BaseMetastoreTableOperations;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.TableMetadata;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.NoSuchTableException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.CheckLockRequest;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.LockComponent;
import org.apache.hadoop.hive.metastore.api.LockLevel;
import org.apache.hadoop.hive.metastore.api.LockRequest;
import org.apache.hadoop.hive.metastore.api.LockResponse;
import org.apache.hadoop.hive.metastore.api.LockState;
import org.apache.hadoop.hive.metastore.api.LockType;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.SerdeType;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
import org.apache.hadoop.hive.metastore.api.UnlockRequest;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import static java.lang.String.format;
/**
* TODO we should be able to extract some more commonalities to BaseMetastoreTableOperations to
* avoid code duplication between this class and Metacat Tables.
*/
public class HiveTableOperations extends BaseMetastoreTableOperations {
private static final Logger LOG = LoggerFactory.getLogger(HiveTableOperations.class);
private final ThriftHiveMetastore.Client metaStoreClient;
private final String database;
private final String tableName;
protected HiveTableOperations(Configuration conf, ThriftHiveMetastore.Client metaStoreClient, String database, String table) {
super(conf);
this.metaStoreClient = metaStoreClient;
this.database = database;
this.tableName = table;
}
@Override
public TableMetadata refresh() {
String metadataLocation = null;
try {
final Table table = metaStoreClient.get_table(database, tableName);
String tableType = table.getParameters().get(TABLE_TYPE_PROP);
if (tableType == null || !tableType.equalsIgnoreCase(ICEBERG_TABLE_TYPE_VALUE)) {
throw new IllegalArgumentException(format("Invalid tableName, not Iceberg: %s.%s", database, table));
}
metadataLocation = table.getParameters().get(METADATA_LOCATION_PROP);
if (metadataLocation == null) {
throw new IllegalArgumentException(format("%s.%s is missing %s property", database, tableName, METADATA_LOCATION_PROP));
}
} catch (NoSuchObjectException e) {
if (currentMetadataLocation() != null) {
throw new NoSuchTableException(format("No such table: %s.%s", database, tableName));
}
} catch (TException e) {
throw new RuntimeException(format("Failed to get table info from metastore %s.%s", database, tableName));
}
refreshFromMetadataLocation(metadataLocation);
return current();
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
// if the metadata is already out of date, reject it
if (base != current()) {
throw new CommitFailedException(format("stale table metadata for %s.%s", database, tableName));
}
// if the metadata is not changed, return early
if (base == metadata) {
LOG.info("Nothing to commit.");
return;
}
String newMetadataLocation = writeNewMetadata(metadata, currentVersion() + 1);
boolean threw = true;
Optional<Long> lockId = Optional.empty();
try {
lockId = Optional.of(acquireLock());
// TODO add lock heart beating for cases where default lock timeout is too low.
Table tbl;
if (base != null) {
tbl = metaStoreClient.get_table(database, tableName);
} else {
final long currentTimeMillis = System.currentTimeMillis();
tbl = new Table(tableName,
database,
System.getProperty("user.name"),
(int) currentTimeMillis / 1000,
(int) currentTimeMillis / 1000,
Integer.MAX_VALUE,
storageDescriptor(metadata.schema()),
Collections.emptyList(),
new HashMap<>(),
null,
null,
ICEBERG_TABLE_TYPE_VALUE);
}
tbl.setSd(storageDescriptor(metadata.schema())); // set to pickup any schema changes
final String metadataLocation = tbl.getParameters().get(METADATA_LOCATION_PROP);
if (!Objects.equals(currentMetadataLocation(), metadataLocation)) {
throw new CommitFailedException(format("metadataLocation = %s is not same as table metadataLocation %s for %s.%s",
currentMetadataLocation(), metadataLocation, database, tableName));
}
setParameters(newMetadataLocation, tbl);
if (base != null) {
metaStoreClient.alter_table(database, tableName, tbl);
} else {
metaStoreClient.create_table(tbl);
}
threw = false;
} catch (TException | UnknownHostException e) {
throw new RuntimeException(format("Metastore operation failed for %s.%s", database, tableName), e);
} finally {
if (threw) {
// if anything went wrong, clean up the uncommitted metadata file
io().deleteFile(newMetadataLocation);
}
unlock(lockId);
}
requestRefresh();
}
private void setParameters(String newMetadataLocation, Table tbl) {
Map<String, String> parameters = tbl.getParameters();
if (parameters == null) {
parameters = new HashMap<>();
}
parameters.put(TABLE_TYPE_PROP, ICEBERG_TABLE_TYPE_VALUE.toUpperCase(Locale.ENGLISH));
parameters.put(METADATA_LOCATION_PROP, newMetadataLocation);
if (currentMetadataLocation() != null && !currentMetadataLocation().isEmpty()) {
parameters.put(PREVIOUS_METADATA_LOCATION_PROP, currentMetadataLocation());
}
tbl.setParameters(parameters);
}
private StorageDescriptor storageDescriptor(Schema schema) {
final StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(columns(schema));
storageDescriptor.setLocation(hiveTableLocation());
storageDescriptor.setOutputFormat("org.apache.hadoop.mapred.FileInputFormat");
storageDescriptor.setInputFormat("org.apache.hadoop.mapred.FileOutputFormat");
SerDeInfo serDeInfo = new SerDeInfo();
serDeInfo.setSerdeType(SerdeType.HIVE);
serDeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
storageDescriptor.setSerdeInfo(serDeInfo);
return storageDescriptor;
}
private final List<FieldSchema> columns(Schema schema) {
return schema.columns().stream().map(col -> new FieldSchema(col.name(), HiveTypeConverter.convert(col.type()), "")).collect(Collectors.toList());
}
private long acquireLock() throws UnknownHostException, TException {
final LockComponent lockComponent = new LockComponent(LockType.EXCLUSIVE, LockLevel.TABLE, database);
lockComponent.setTablename(tableName);
final LockRequest lockRequest = new LockRequest(Lists.newArrayList(lockComponent),
System.getProperty("user.name"),
InetAddress.getLocalHost().getHostName());
LockResponse lockResponse = metaStoreClient.lock(lockRequest);
LockState state = lockResponse.getState();
long lockId = lockResponse.getLockid();
//TODO add timeout
while (state.equals(LockState.WAITING)) {
lockResponse = metaStoreClient.check_lock(new CheckLockRequest(lockResponse.getLockid()));
state = lockResponse.getState();
}
if (!state.equals(LockState.ACQUIRED)) {
throw new CommitFailedException(format("Could not acquire the lock on %s.%s, " +
"lock request ended in state %s", database, tableName, state));
}
return lockId;
}
private void unlock(Optional<Long> lockId) {
if (lockId.isPresent()) {
try {
metaStoreClient.unlock(new UnlockRequest(lockId.get()));
} catch (TException e) {
throw new RuntimeException(format("Failed to unlock %s.%s", database, tableName) , e);
}
}
}
}
| 2,022 |
0 | Create_ds/iceberg/hive/src/main/java/com/netflix/iceberg | Create_ds/iceberg/hive/src/main/java/com/netflix/iceberg/hive/HiveTypeConverter.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hive;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import java.util.stream.Collectors;
import static java.lang.String.format;
public final class HiveTypeConverter {
private HiveTypeConverter() {
}
public static String convert(Type type) {
switch (type.typeId()) {
case BOOLEAN:
return "boolean";
case INTEGER:
return "int";
case LONG:
return "bigint";
case FLOAT:
return "float";
case DOUBLE:
return "double";
case DATE:
return "date";
case TIME:
throw new UnsupportedOperationException("Hive does not support time fields");
case TIMESTAMP:
return "timestamp";
case STRING:
case UUID:
return "string";
case FIXED:
return "binary";
case BINARY:
return "binary";
case DECIMAL:
final Types.DecimalType decimalType = (Types.DecimalType) type;
return format("decimal(%s,%s)", decimalType.precision(), decimalType.scale()); //TODO may be just decimal?
case STRUCT:
final Types.StructType structType = type.asStructType();
final String nameToType = structType.fields().stream().map(
f -> format("%s:%s", f.name(), convert(f.type()))
).collect(Collectors.joining(","));
return format("struct<%s>", nameToType);
case LIST:
final Types.ListType listType = type.asListType();
return format("array<%s>", convert(listType.elementType()));
case MAP:
final Types.MapType mapType = type.asMapType();
return format("map<%s,%s>", convert(mapType.keyType()), convert(mapType.valueType()));
default:
throw new UnsupportedOperationException(type +" is not supported");
}
}
}
| 2,023 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/TestHelpers.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.expressions.BoundPredicate;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.ExpressionVisitors;
import com.netflix.iceberg.expressions.UnboundPredicate;
import org.junit.Assert;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
public class TestHelpers {
public static <T> T assertAndUnwrap(Expression expr, Class<T> expected) {
Assert.assertTrue("Expression should have expected type: " + expected,
expected.isInstance(expr));
return expected.cast(expr);
}
@SuppressWarnings("unchecked")
public static <T> BoundPredicate<T> assertAndUnwrap(Expression expr) {
Assert.assertTrue("Expression should be a bound predicate: " + expr,
expr instanceof BoundPredicate);
return (BoundPredicate<T>) expr;
}
@SuppressWarnings("unchecked")
public static <T> UnboundPredicate<T> assertAndUnwrapUnbound(Expression expr) {
Assert.assertTrue("Expression should be an unbound predicate: " + expr,
expr instanceof UnboundPredicate);
return (UnboundPredicate<T>) expr;
}
public static void assertAllReferencesBound(String message, Expression expr) {
ExpressionVisitors.visit(expr, new CheckReferencesBound(message));
}
@SuppressWarnings("unchecked")
public static <T> T roundTripSerialize(T type) throws IOException, ClassNotFoundException {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
try (ObjectOutputStream out = new ObjectOutputStream(bytes)) {
out.writeObject(type);
}
try (ObjectInputStream in = new ObjectInputStream(
new ByteArrayInputStream(bytes.toByteArray()))) {
return (T) in.readObject();
}
}
private static class CheckReferencesBound extends ExpressionVisitors.ExpressionVisitor<Void> {
private final String message;
public CheckReferencesBound(String message) {
this.message = message;
}
@Override
public <T> Void predicate(UnboundPredicate<T> pred) {
Assert.fail(message + ": Found unbound predicate: " + pred);
return null;
}
}
/**
* Implements {@link StructLike#get} for passing data in tests.
*/
public static class Row implements StructLike {
public static Row of(Object... values) {
return new Row(values);
}
private final Object[] values;
private Row(Object... values) {
this.values = values;
}
@Override
public int size() {
return values.length;
}
@Override
@SuppressWarnings("unchecked")
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(values[pos]);
}
@Override
public <T> void set(int pos, T value) {
throw new UnsupportedOperationException("Setting values is not supported");
}
}
/**
* A convenience method to avoid a large number of @Test(expected=...) tests
* @param message A String message to describe this assertion
* @param expected An Exception class that the Runnable should throw
* @param containedInMessage A String that should be contained by the thrown
* exception's message
* @param callable A Callable that is expected to throw the exception
*/
public static void assertThrows(String message,
Class<? extends Exception> expected,
String containedInMessage,
Callable callable) {
try {
callable.call();
Assert.fail("No exception was thrown (" + message + "), expected: " +
expected.getName());
} catch (Exception actual) {
handleException(message, expected, containedInMessage, actual);
}
}
/**
* A convenience method to avoid a large number of @Test(expected=...) tests
* @param message A String message to describe this assertion
* @param expected An Exception class that the Runnable should throw
* @param containedInMessage A String that should be contained by the thrown
* exception's message
* @param runnable A Runnable that is expected to throw the runtime exception
*/
public static void assertThrows(String message,
Class<? extends Exception> expected,
String containedInMessage,
Runnable runnable) {
try {
runnable.run();
Assert.fail("No exception was thrown (" + message + "), expected: " +
expected.getName());
} catch (Exception actual) {
handleException(message, expected, containedInMessage, actual);
}
}
private static void handleException(String message,
Class<? extends Exception> expected,
String containedInMessage,
Exception actual) {
try {
Assert.assertEquals(message, expected, actual.getClass());
Assert.assertTrue(
"Expected exception message (" + containedInMessage + ") missing: " +
actual.getMessage(),
actual.getMessage().contains(containedInMessage)
);
} catch (AssertionError e) {
e.addSuppressed(actual);
throw e;
}
}
public static class TestFieldSummary implements ManifestFile.PartitionFieldSummary {
private final boolean containsNull;
private final ByteBuffer lowerBound;
private final ByteBuffer upperBound;
public TestFieldSummary(boolean containsNull, ByteBuffer lowerBound, ByteBuffer upperBound) {
this.containsNull = containsNull;
this.lowerBound = lowerBound;
this.upperBound = upperBound;
}
@Override
public boolean containsNull() {
return containsNull;
}
@Override
public ByteBuffer lowerBound() {
return lowerBound;
}
@Override
public ByteBuffer upperBound() {
return upperBound;
}
@Override
public ManifestFile.PartitionFieldSummary copy() {
return this;
}
}
public static class TestManifestFile implements ManifestFile {
private final String path;
private final long length;
private final int specId;
private final Long snapshotId;
private final Integer addedFiles;
private final Integer existingFiles;
private final Integer deletedFiles;
private final List<PartitionFieldSummary> partitions;
public TestManifestFile(String path, long length, int specId, Long snapshotId,
Integer addedFiles, Integer existingFiles, Integer deletedFiles,
List<PartitionFieldSummary> partitions) {
this.path = path;
this.length = length;
this.specId = specId;
this.snapshotId = snapshotId;
this.addedFiles = addedFiles;
this.existingFiles = existingFiles;
this.deletedFiles = deletedFiles;
this.partitions = partitions;
}
@Override
public String path() {
return path;
}
@Override
public long length() {
return length;
}
@Override
public int partitionSpecId() {
return specId;
}
@Override
public Long snapshotId() {
return snapshotId;
}
@Override
public Integer addedFilesCount() {
return addedFiles;
}
@Override
public Integer existingFilesCount() {
return existingFiles;
}
@Override
public Integer deletedFilesCount() {
return deletedFiles;
}
@Override
public List<PartitionFieldSummary> partitions() {
return partitions;
}
@Override
public ManifestFile copy() {
return this;
}
}
public static class TestDataFile implements DataFile {
private final String path;
private final StructLike partition;
private final long recordCount;
private final Map<Integer, Long> valueCounts;
private final Map<Integer, Long> nullValueCounts;
private final Map<Integer, ByteBuffer> lowerBounds;
private final Map<Integer, ByteBuffer> upperBounds;
public TestDataFile(String path, StructLike partition, long recordCount) {
this(path, partition, recordCount, null, null, null, null);
}
public TestDataFile(String path, StructLike partition, long recordCount,
Map<Integer, Long> valueCounts,
Map<Integer, Long> nullValueCounts,
Map<Integer, ByteBuffer> lowerBounds,
Map<Integer, ByteBuffer> upperBounds) {
this.path = path;
this.partition = partition;
this.recordCount = recordCount;
this.valueCounts = valueCounts;
this.nullValueCounts = nullValueCounts;
this.lowerBounds = lowerBounds;
this.upperBounds = upperBounds;
}
@Override
public CharSequence path() {
return path;
}
@Override
public FileFormat format() {
return FileFormat.fromFileName(path());
}
@Override
public StructLike partition() {
return partition;
}
@Override
public long recordCount() {
return recordCount;
}
@Override
public long fileSizeInBytes() {
return 0;
}
@Override
public long blockSizeInBytes() {
return 0;
}
@Override
public Integer fileOrdinal() {
return null;
}
@Override
public List<Integer> sortColumns() {
return null;
}
@Override
public Map<Integer, Long> columnSizes() {
return null;
}
@Override
public Map<Integer, Long> valueCounts() {
return valueCounts;
}
@Override
public Map<Integer, Long> nullValueCounts() {
return nullValueCounts;
}
@Override
public Map<Integer, ByteBuffer> lowerBounds() {
return lowerBounds;
}
@Override
public Map<Integer, ByteBuffer> upperBounds() {
return upperBounds;
}
@Override
public DataFile copy() {
return this;
}
}
}
| 2,024 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/TestPartitionPaths.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.TestHelpers.Row;
import com.netflix.iceberg.expressions.Literal;
import com.netflix.iceberg.transforms.Transform;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
public class TestPartitionPaths {
private static final Schema SCHEMA = new Schema(
Types.NestedField.required(1, "id", Types.IntegerType.get()),
Types.NestedField.optional(2, "data", Types.StringType.get()),
Types.NestedField.optional(3, "ts", Types.TimestampType.withoutZone())
);
@Test
@SuppressWarnings("unchecked")
public void testPartitionPath() {
PartitionSpec spec = PartitionSpec.builderFor(SCHEMA)
.hour("ts")
.bucket("id", 10)
.build();
Transform hour = spec.getFieldBySourceId(3).transform();
Transform bucket = spec.getFieldBySourceId(1).transform();
Literal<Long> ts = Literal.of("2017-12-01T10:12:55.038194").to(Types.TimestampType.withoutZone());
Object tsHour = hour.apply(ts.value());
Object idBucket = bucket.apply(1);
Row partition = Row.of(tsHour, idBucket);
Assert.assertEquals("Should produce expected partition key",
"ts_hour=2017-12-01-10/id_bucket=" + idBucket, spec.partitionToPath(partition));
}
@Test
public void testEscapedStrings() {
PartitionSpec spec = PartitionSpec.builderFor(SCHEMA)
.identity("data")
.truncate("data", 10)
.build();
Assert.assertEquals("Should escape / as %2F",
"data=a%2Fb%2Fc%2Fd/data_trunc=a%2Fb%2Fc%2Fd",
spec.partitionToPath(Row.of("a/b/c/d", "a/b/c/d")));
}
}
| 2,025 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/types/TestReadabilityChecks.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.netflix.iceberg.Schema;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestReadabilityChecks {
private static final Type.PrimitiveType[] PRIMITIVES = new Type.PrimitiveType[] {
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withoutZone(),
Types.TimestampType.withZone(),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(3),
Types.FixedType.ofLength(4),
Types.BinaryType.get(),
Types.DecimalType.of(9, 2),
Types.DecimalType.of(11, 2),
Types.DecimalType.of(9, 3)
};
@Test
public void testPrimitiveTypes() {
for (Type.PrimitiveType from : PRIMITIVES) {
Schema fromSchema = new Schema(required(1, "from_field", from));
for (Type.PrimitiveType to : PRIMITIVES) {
List<String> errors = CheckCompatibility.writeCompatibilityErrors(
new Schema(required(1, "to_field", to)), fromSchema);
if (TypeUtil.isPromotionAllowed(from, to)) {
Assert.assertEquals("Should produce 0 error messages", 0, errors.size());
} else {
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that promotion is not allowed",
errors.get(0).contains("cannot be promoted to"));
}
}
{
Schema structSchema = new Schema(required(1, "struct_field", Types.StructType.of(
required(2, "from", from))
));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(structSchema, fromSchema);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that primitive to struct is not allowed",
errors.get(0).contains("cannot be read as a struct"));
}
{
Schema listSchema = new Schema(required(1, "list_field", Types.ListType.ofRequired(2, from)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(listSchema, fromSchema);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that primitive to list is not allowed",
errors.get(0).contains("cannot be read as a list"));
}
{
Schema mapSchema = new Schema(required(1, "map_field",
Types.MapType.ofRequired(2, 3, Types.StringType.get(), from)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(mapSchema, fromSchema);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that primitive to map is not allowed",
errors.get(0).contains("cannot be read as a map"));
}
{
Schema mapSchema = new Schema(required(1, "map_field",
Types.MapType.ofRequired(2, 3, from, Types.StringType.get())));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(mapSchema, fromSchema);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that primitive to map is not allowed",
errors.get(0).contains("cannot be read as a map"));
}
}
}
@Test
public void testRequiredSchemaField() {
Schema write = new Schema(optional(1, "from_field", Types.IntegerType.get()));
Schema read = new Schema(required(1, "to_field", Types.IntegerType.get()));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that a required column is optional",
errors.get(0).contains("should be required, but is optional"));
}
@Test
public void testMissingSchemaField() {
Schema write = new Schema(required(0, "other_field", Types.IntegerType.get()));
Schema read = new Schema(required(1, "to_field", Types.IntegerType.get()));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that a required column is missing",
errors.get(0).contains("is required, but is missing"));
}
@Test
public void testRequiredStructField() {
Schema write = new Schema(required(0, "nested", Types.StructType.of(
optional(1, "from_field", Types.IntegerType.get())
)));
Schema read = new Schema(required(0, "nested", Types.StructType.of(
required(1, "to_field", Types.IntegerType.get())
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that a required field is optional",
errors.get(0).contains("should be required, but is optional"));
}
@Test
public void testMissingRequiredStructField() {
Schema write = new Schema(required(0, "nested", Types.StructType.of(
optional(2, "from_field", Types.IntegerType.get())
)));
Schema read = new Schema(required(0, "nested", Types.StructType.of(
required(1, "to_field", Types.IntegerType.get())
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that a required field is missing",
errors.get(0).contains("is required, but is missing"));
}
@Test
public void testMissingOptionalStructField() {
Schema write = new Schema(required(0, "nested", Types.StructType.of(
required(2, "from_field", Types.IntegerType.get())
)));
Schema read = new Schema(required(0, "nested", Types.StructType.of(
optional(1, "to_field", Types.IntegerType.get())
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce no error messages", 0, errors.size());
}
@Test
public void testIncompatibleStructField() {
Schema write = new Schema(required(0, "nested", Types.StructType.of(
required(1, "from_field", Types.IntegerType.get())
)));
Schema read = new Schema(required(0, "nested", Types.StructType.of(
required(1, "to_field", Types.FloatType.get())
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("cannot be promoted to float"));
}
@Test
public void testIncompatibleStructAndPrimitive() {
Schema write = new Schema(required(0, "nested", Types.StructType.of(
required(1, "from_field", Types.StringType.get())
)));
Schema read = new Schema(required(0, "nested", Types.StringType.get()));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("struct cannot be read as a string"));
}
@Test
public void testMultipleErrors() {
// required field is optional and cannot be promoted to the read type
Schema write = new Schema(required(0, "nested", Types.StructType.of(
optional(1, "from_field", Types.IntegerType.get())
)));
Schema read = new Schema(required(0, "nested", Types.StructType.of(
required(1, "to_field", Types.FloatType.get())
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 2, errors.size());
Assert.assertTrue("Should complain that a required field is optional",
errors.get(0).contains("should be required, but is optional"));
Assert.assertTrue("Should complain about incompatible types",
errors.get(1).contains("cannot be promoted to float"));
}
@Test
public void testRequiredMapValue() {
Schema write = new Schema(required(0, "map_field", Types.MapType.ofOptional(
1, 2, Types.StringType.get(), Types.IntegerType.get()
)));
Schema read = new Schema(required(0, "map_field", Types.MapType.ofRequired(
1, 2, Types.StringType.get(), Types.IntegerType.get()
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that values are optional",
errors.get(0).contains("values should be required, but are optional"));
}
@Test
public void testIncompatibleMapKey() {
Schema write = new Schema(required(0, "map_field", Types.MapType.ofOptional(
1, 2, Types.IntegerType.get(), Types.StringType.get()
)));
Schema read = new Schema(required(0, "map_field", Types.MapType.ofOptional(
1, 2, Types.DoubleType.get(), Types.StringType.get()
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("cannot be promoted to double"));
}
@Test
public void testIncompatibleMapValue() {
Schema write = new Schema(required(0, "map_field", Types.MapType.ofOptional(
1, 2, Types.StringType.get(), Types.IntegerType.get()
)));
Schema read = new Schema(required(0, "map_field", Types.MapType.ofOptional(
1, 2, Types.StringType.get(), Types.DoubleType.get()
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("cannot be promoted to double"));
}
@Test
public void testIncompatibleMapAndPrimitive() {
Schema write = new Schema(required(0, "map_field", Types.MapType.ofOptional(
1, 2, Types.StringType.get(), Types.IntegerType.get()
)));
Schema read = new Schema(required(0, "map_field", Types.StringType.get()));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("map cannot be read as a string"));
}
@Test
public void testRequiredListElement() {
Schema write = new Schema(required(0, "list_field", Types.ListType.ofOptional(
1, Types.IntegerType.get()
)));
Schema read = new Schema(required(0, "list_field", Types.ListType.ofRequired(
1, Types.IntegerType.get()
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that elements are optional",
errors.get(0).contains("elements should be required, but are optional"));
}
@Test
public void testIncompatibleListElement() {
Schema write = new Schema(required(0, "list_field", Types.ListType.ofOptional(
1, Types.IntegerType.get()
)));
Schema read = new Schema(required(0, "list_field", Types.ListType.ofOptional(
1, Types.StringType.get()
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("cannot be promoted to string"));
}
@Test
public void testIncompatibleListAndPrimitive() {
Schema write = new Schema(required(0, "list_field", Types.ListType.ofOptional(
1, Types.IntegerType.get()
)));
Schema read = new Schema(required(0, "list_field", Types.StringType.get()));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("list cannot be read as a string"));
}
@Test
public void testStructWriteReordering() {
// writes should not reorder fields
Schema read = new Schema(required(0, "nested", Types.StructType.of(
required(1, "field_a", Types.IntegerType.get()),
required(2, "field_b", Types.IntegerType.get())
)));
Schema write = new Schema(required(0, "nested", Types.StructType.of(
required(2, "field_b", Types.IntegerType.get()),
required(1, "field_a", Types.IntegerType.get())
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
System.err.println(errors);
Assert.assertTrue("Should complain about field_b before field_a",
errors.get(0).contains("field_b is out of order, before field_a"));
}
@Test
public void testStructReadReordering() {
// reads should allow reordering
Schema read = new Schema(required(0, "nested", Types.StructType.of(
required(1, "field_a", Types.IntegerType.get()),
required(2, "field_b", Types.IntegerType.get())
)));
Schema write = new Schema(required(0, "nested", Types.StructType.of(
required(2, "field_b", Types.IntegerType.get()),
required(1, "field_a", Types.IntegerType.get())
)));
List<String> errors = CheckCompatibility.readCompatibilityErrors(read, write);
Assert.assertEquals("Should produce no error messages", 0, errors.size());
}
}
| 2,026 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/types/TestComparableComparator.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.netflix.iceberg.expressions.Literal;
import org.junit.Assert;
import org.junit.Test;
import java.util.Comparator;
/**
* This tests the Comparator returned by ComparableLiteral, which is used for most types.
* <p>
* The tests use assertTrue instead of assertEquals because the return value is not necessarily one
* of {-1, 0, 1}. It is also more clear to compare the return value to 0 because the same operation
* can be used: a < b is equivalent to compare(a, b) < 0.
*/
public class TestComparableComparator {
@Test
public void testNaturalOrder() {
Comparator<Long> cmp = Literal.of(34L).comparator();
Assert.assertTrue("Should use the natural order for non-null values",
cmp.compare(33L, 34L) < 0);
Assert.assertTrue("Should use signed ordering",
cmp.compare(33L, -34L) > 0);
}
@Test
public void testNullHandling() {
Comparator<Long> cmp = Literal.of(34L).comparator();
Assert.assertTrue("null comes before non-null", cmp.compare(null, 34L) < 0);
Assert.assertTrue("null comes before non-null", cmp.compare(34L, null) > 0);
Assert.assertEquals("null equals null", 0, cmp.compare(null, null));
}
}
| 2,027 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/types/TestSerializableTypes.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.TestHelpers;
import org.junit.Assert;
import org.junit.Test;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestSerializableTypes {
@Test
public void testIdentityTypes() throws Exception {
// these types make a strong guarantee than equality, instances are identical
Type[] identityPrimitives = new Type[] {
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withoutZone(),
Types.TimestampType.withZone(),
Types.StringType.get(),
Types.UUIDType.get(),
Types.BinaryType.get()
};
for (Type type : identityPrimitives) {
Assert.assertSame("Serialization result should be identical to starting type",
type, TestHelpers.roundTripSerialize(type));
}
}
@Test
public void testEqualTypes() throws Exception {
Type[] equalityPrimitives = new Type[] {
Types.DecimalType.of(9, 3),
Types.DecimalType.of(11, 0),
Types.FixedType.ofLength(4),
Types.FixedType.ofLength(34)
};
for (Type type : equalityPrimitives) {
Assert.assertEquals("Serialization result should be equal to starting type",
type, TestHelpers.roundTripSerialize(type));
}
}
@Test
public void testStructs() throws Exception {
Types.StructType struct = Types.StructType.of(
Types.NestedField.required(34, "Name!", Types.StringType.get()),
Types.NestedField.optional(35, "col", Types.DecimalType.of(38, 2)));
Type copy = TestHelpers.roundTripSerialize(struct);
Assert.assertEquals("Struct serialization should be equal to starting type", struct, copy);
Type stringType = copy.asNestedType().asStructType().fieldType("Name!");
Assert.assertSame("Struct serialization should preserve identity type",
Types.StringType.get(), stringType);
Type decimalType = copy.asNestedType().asStructType().field(35).type();
Assert.assertEquals("Struct serialization should support id lookup",
Types.DecimalType.of(38, 2), decimalType);
}
@Test
public void testMaps() throws Exception {
Type[] maps = new Type[] {
Types.MapType.ofOptional(1, 2, Types.StringType.get(), Types.LongType.get()),
Types.MapType.ofRequired(4, 5, Types.StringType.get(), Types.LongType.get())
};
for (Type map : maps) {
Type copy = TestHelpers.roundTripSerialize(map);
Assert.assertEquals("Map serialization should be equal to starting type", map, copy);
Assert.assertSame("Map serialization should preserve identity type",
Types.LongType.get(), map.asNestedType().asMapType().valueType());
}
}
@Test
public void testLists() throws Exception {
Type[] maps = new Type[] {
Types.ListType.ofOptional(2, Types.DoubleType.get()),
Types.ListType.ofRequired(5, Types.DoubleType.get())
};
for (Type list : maps) {
Type copy = TestHelpers.roundTripSerialize(list);
Assert.assertEquals("List serialization should be equal to starting type", list, copy);
Assert.assertSame("List serialization should preserve identity type",
Types.DoubleType.get(), list.asNestedType().asListType().elementType());
}
}
@Test
public void testSchema() throws Exception {
Schema schema = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "data", Types.StringType.get()),
optional(3, "preferences", Types.StructType.of(
required(8, "feature1", Types.BooleanType.get()),
optional(9, "feature2", Types.BooleanType.get())
)),
required(4, "locations", Types.MapType.ofRequired(10, 11,
Types.StringType.get(),
Types.StructType.of(
required(12, "lat", Types.FloatType.get()),
required(13, "long", Types.FloatType.get())
))),
optional(5, "points", Types.ListType.ofOptional(14,
Types.StructType.of(
required(15, "x", Types.LongType.get()),
required(16, "y", Types.LongType.get())
))),
required(6, "doubles", Types.ListType.ofRequired(17,
Types.DoubleType.get()
)),
optional(7, "properties", Types.MapType.ofOptional(18, 19,
Types.StringType.get(),
Types.StringType.get()
)),
required(8, "complex_key_map", Types.MapType.ofOptional(20, 21,
Types.StructType.of(
required(22, "x", Types.LongType.get()),
optional(23, "y", Types.LongType.get())),
Types.StringType.get()))
);
Assert.assertEquals("Schema serialziation should be equal to starting schema",
schema.asStruct(), TestHelpers.roundTripSerialize(schema).asStruct());
}
}
| 2,028 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/types/TestCharSeqComparator.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.netflix.iceberg.expressions.Literal;
import org.apache.avro.util.Utf8;
import org.junit.Assert;
import org.junit.Test;
import java.util.Comparator;
/**
* Tests the comparator returned by CharSequence literals.
* <p>
* The tests use assertTrue instead of assertEquals because the return value is not necessarily one
* of {-1, 0, 1}. It is also more clear to compare the return value to 0 because the same operation
* can be used: a < b is equivalent to compare(a, b) < 0.
*/
public class TestCharSeqComparator {
@Test
public void testStringAndUtf8() {
String s1 = "abc";
Utf8 s2 = new Utf8("abc");
Comparator<CharSequence> stringComp = Literal.of(s1).comparator();
Assert.assertEquals("Should consider String and Utf8 equal",
0, stringComp.compare(s1, s2));
Comparator<CharSequence> utf8Comp = Literal.of(s2).comparator();
Assert.assertEquals("Should consider String and Utf8 equal",
0, utf8Comp.compare(s1, s2));
}
@Test
public void testSeqLength() {
String s1 = "abc";
String s2 = "abcd";
Comparator<CharSequence> cmp = Literal.of(s1).comparator();
// Sanity check that String.compareTo gives the same result
Assert.assertTrue("When one string is a substring of the other, the longer is greater",
s1.compareTo(s2) < 0);
Assert.assertTrue("When one string is a substring of the other, the longer is greater",
s2.compareTo(s1) > 0);
// Test the comparator
Assert.assertTrue("When one string is a substring of the other, the longer is greater",
cmp.compare(s1, s2) < 0);
Assert.assertTrue("When one string is a substring of the other, the longer is greater",
cmp.compare(s2, s1) > 0);
}
@Test
public void testCharOrderBeforeLength() {
// abcd < adc even though abcd is longer
String s1 = "adc";
String s2 = "abcd";
Comparator<CharSequence> cmp = Literal.of(s1).comparator();
// Sanity check that String.compareTo gives the same result
Assert.assertTrue("First difference takes precedence over length",
s1.compareTo(s2) > 0);
Assert.assertTrue("First difference takes precedence over length",
s2.compareTo(s1) < 0);
// Test the comparator
Assert.assertTrue("First difference takes precedence over length",
cmp.compare(s1, s2) > 0);
Assert.assertTrue("First difference takes precedence over length",
cmp.compare(s2, s1) < 0);
}
@Test
public void testNullHandling() {
String s1 = "abc";
Comparator<CharSequence> cmp = Literal.of(s1).comparator();
Assert.assertTrue("null comes before non-null", cmp.compare(null, s1) < 0);
Assert.assertTrue("null comes before non-null", cmp.compare(s1, null) > 0);
Assert.assertEquals("null equals null", 0, cmp.compare(null, null));
}
}
| 2,029 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/types/TestBinaryComparator.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.netflix.iceberg.expressions.Literal;
import org.junit.Assert;
import org.junit.Test;
import java.nio.ByteBuffer;
import java.util.Comparator;
/**
* Tests the comparator returned by binary and fixed literals.
* <p>
* The tests use assertTrue instead of assertEquals because the return value is not necessarily one
* of {-1, 0, 1}. It is also more clear to compare the return value to 0 because the same operation
* can be used: a < b is equivalent to compare(a, b) < 0.
*/
public class TestBinaryComparator {
@Test
public void testBinaryUnsignedComparator() {
// b1 < b2 because comparison is unsigned, and -1 has msb set
ByteBuffer b1 = ByteBuffer.wrap(new byte[] { 1, 1, 2 });
ByteBuffer b2 = ByteBuffer.wrap(new byte[] { 1, -1, 2 });
Comparator<ByteBuffer> cmp = Literal.of(b1).comparator();
Assert.assertTrue("Negative bytes should sort after positive bytes",
cmp.compare(b1, b2) < 0);
}
@Test
public void testFixedUnsignedComparator() {
// b1 < b2 because comparison is unsigned, and -1 has msb set
ByteBuffer b1 = ByteBuffer.wrap(new byte[] { 1, 1, 2 });
ByteBuffer b2 = ByteBuffer.wrap(new byte[] { 1, -1, 2 });
Literal<ByteBuffer> fixedLit = Literal.of(b1).to(Types.FixedType.ofLength(3));
Comparator<ByteBuffer> cmp = fixedLit.comparator();
Assert.assertTrue("Negative bytes should sort after positive bytes",
cmp.compare(b1, b2) < 0);
}
@Test
public void testNullHandling() {
ByteBuffer buf = ByteBuffer.allocate(0);
Comparator<ByteBuffer> cmp = Literal.of(buf).comparator();
Assert.assertTrue("null comes before non-null", cmp.compare(null, buf) < 0);
Assert.assertTrue("null comes before non-null", cmp.compare(buf, null) > 0);
Assert.assertEquals("null equals null", 0, cmp.compare(null, null));
}
}
| 2,030 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestBucketing.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.transforms;
import com.google.common.base.Charsets;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hashing;
import com.netflix.iceberg.expressions.Literal;
import com.netflix.iceberg.types.Types;
import org.apache.avro.util.Utf8;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Random;
import java.util.UUID;
public class TestBucketing {
private static final HashFunction MURMUR3 = Hashing.murmur3_32();
private static Constructor<UUID> uuidBytesConstructor;
@BeforeClass
public static void getUUIDConstrutor() {
try {
uuidBytesConstructor = UUID.class.getDeclaredConstructor(byte[].class);
uuidBytesConstructor.setAccessible(true);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
}
private Random testRandom = null;
@Before
public void initRandom() {
// reinitialize random for each test to avoid dependence on run order
this.testRandom = new Random(314358);
}
@Test
public void testSpecValues() {
Assert.assertEquals("Spec example: hash(true) = 1392991556",
1392991556, Bucket.<Integer>get(Types.IntegerType.get(), 100).hash(1));
Assert.assertEquals("Spec example: hash(34) = 2017239379",
2017239379, Bucket.<Integer>get(Types.IntegerType.get(), 100).hash(34));
Assert.assertEquals("Spec example: hash(34L) = 2017239379",
2017239379, Bucket.<Long>get(Types.LongType.get(), 100).hash(34L));
Assert.assertEquals("Spec example: hash(17.11F) = -142385009",
-142385009, new Bucket.BucketFloat(100).hash(1.0F));
Assert.assertEquals("Spec example: hash(17.11D) = -142385009",
-142385009, new Bucket.BucketDouble(100).hash(1.0D));
Assert.assertEquals("Spec example: hash(decimal2(14.20)) = -500754589",
-500754589,
Bucket.<BigDecimal>get(Types.DecimalType.of(9,2), 100).hash(new BigDecimal("14.20")));
Assert.assertEquals("Spec example: hash(decimal2(14.20)) = -500754589",
-500754589,
Bucket.<BigDecimal>get(Types.DecimalType.of(9,2), 100).hash(new BigDecimal("14.20")));
Literal<Integer> date = Literal.of("2017-11-16").to(Types.DateType.get());
Assert.assertEquals("Spec example: hash(2017-11-16) = -653330422",
-653330422,
Bucket.<Integer>get(Types.DateType.get(), 100).hash(date.value()));
Literal<Long> timeValue = Literal.of("22:31:08").to(Types.TimeType.get());
Assert.assertEquals("Spec example: hash(22:31:08) = -662762989",
-662762989,
Bucket.<Long>get(Types.TimeType.get(), 100).hash(timeValue.value()));
Literal<Long> timestampVal = Literal.of("2017-11-16T22:31:08")
.to(Types.TimestampType.withoutZone());
Assert.assertEquals("Spec example: hash(2017-11-16T22:31:08) = -2047944441",
-2047944441,
Bucket.<Long>get(Types.TimestampType.withoutZone(), 100).hash(timestampVal.value()));
Literal<Long> timestamptzVal = Literal.of("2017-11-16T14:31:08-08:00")
.to(Types.TimestampType.withZone());
Assert.assertEquals("Spec example: hash(2017-11-16T14:31:08-08:00) = -2047944441",
-2047944441,
Bucket.<Long>get(Types.TimestampType.withZone(), 100).hash(timestamptzVal.value()));
Assert.assertEquals("Spec example: hash(\"iceberg\") = 1210000089",
1210000089, Bucket.<String>get(Types.StringType.get(), 100).hash("iceberg"));
Assert.assertEquals("Spec example: hash(\"iceberg\") = 1210000089",
1210000089, Bucket.<Utf8>get(Types.StringType.get(), 100).hash(new Utf8("iceberg")));
Literal<UUID> uuid = Literal.of("f79c3e09-677c-4bbd-a479-3f349cb785e7")
.to(Types.UUIDType.get());
Assert.assertEquals("Spec example: hash(f79c3e09-677c-4bbd-a479-3f349cb785e7) = 1488055340",
1488055340, Bucket.<UUID>get(Types.UUIDType.get(), 100).hash(uuid.value()));
ByteBuffer bytes = ByteBuffer.wrap(new byte[] {0, 1, 2, 3});
Assert.assertEquals("Spec example: hash([00 01 02 03]) = -188683207",
-188683207, Bucket.<ByteBuffer>get(Types.BinaryType.get(), 100).hash(bytes));
Assert.assertEquals("Spec example: hash([00 01 02 03]) = -188683207",
-188683207, Bucket.<ByteBuffer>get(Types.BinaryType.get(), 100).hash(bytes));
}
@Test
public void testInteger() {
int num = testRandom.nextInt();
ByteBuffer buffer = ByteBuffer.allocate(8);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.putLong((long) num);
Bucket<Integer> bucketFunc = Bucket.get(Types.IntegerType.get(), 100);
Assert.assertEquals("Integer hash should match hash of little-endian bytes",
hashBytes(buffer.array()), bucketFunc.hash(num));
}
@Test
public void testLong() {
long num = testRandom.nextLong();
ByteBuffer buffer = ByteBuffer.allocate(8);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.putLong(num);
Bucket<Long> bucketFunc = Bucket.get(Types.LongType.get(), 100);
Assert.assertEquals("Long hash should match hash of little-endian bytes",
hashBytes(buffer.array()), bucketFunc.hash(num));
}
@Test
public void testIntegerTypePromotion() {
Bucket<Integer> bucketInts = Bucket.get(Types.IntegerType.get(), 100);
Bucket<Long> bucketLongs = Bucket.get(Types.LongType.get(), 100);
int r = testRandom.nextInt();
Assert.assertEquals("Integer and Long bucket results should match",
bucketInts.apply(r), bucketLongs.apply((long) r));
}
@Test
public void testFloat() {
float num = testRandom.nextFloat();
ByteBuffer buffer = ByteBuffer.allocate(8);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.putDouble((double) num);
Bucket<Float> bucketFunc = new Bucket.BucketFloat(100);
Assert.assertEquals("Float hash should match hash of little-endian bytes",
hashBytes(buffer.array()), bucketFunc.hash(num));
}
@Test
public void testDouble() {
double num = testRandom.nextDouble();
ByteBuffer buffer = ByteBuffer.allocate(8);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.putDouble(num);
Bucket<Double> bucketFunc = new Bucket.BucketDouble(100);
Assert.assertEquals("Double hash should match hash of little-endian bytes",
hashBytes(buffer.array()), bucketFunc.hash(num));
}
@Test
public void testFloatTypePromotion() {
Bucket<Float> bucketFloats = new Bucket.BucketFloat(100);
Bucket<Double> bucketDoubles = new Bucket.BucketDouble(100);
float r = testRandom.nextFloat();
Assert.assertEquals("Float and Double bucket results should match",
bucketFloats.apply(r), bucketDoubles.apply((double) r));
}
@Test
public void testDecimal() {
double num = testRandom.nextDouble();
BigDecimal decimal = BigDecimal.valueOf(num);
byte[] unscaledBytes = decimal.unscaledValue().toByteArray();
Bucket<BigDecimal> bucketFunc = Bucket.get(Types.DecimalType.of(9, 2), 100);
Assert.assertEquals("Decimal hash should match hash of backing bytes",
hashBytes(unscaledBytes), bucketFunc.hash(decimal));
}
@Test
public void testString() {
String string = "string to test murmur3 hash";
byte[] asBytes = string.getBytes(Charsets.UTF_8);
Bucket<CharSequence> bucketFunc = Bucket.get(Types.StringType.get(), 100);
Assert.assertEquals("String hash should match hash of UTF-8 bytes",
hashBytes(asBytes), bucketFunc.hash(string));
}
@Test
public void testUtf8() {
Utf8 utf8 = new Utf8("string to test murmur3 hash");
byte[] asBytes = utf8.toString().getBytes(Charsets.UTF_8);
Bucket<CharSequence> bucketFunc = Bucket.get(Types.StringType.get(), 100);
Assert.assertEquals("String hash should match hash of UTF-8 bytes",
hashBytes(asBytes), bucketFunc.hash(utf8));
}
@Test
public void testByteBufferOnHeap() {
byte[] bytes = randomBytes(128);
ByteBuffer buffer = ByteBuffer.wrap(bytes, 5, 100);
Bucket<ByteBuffer> bucketFunc = Bucket.get(Types.BinaryType.get(), 100);
Assert.assertEquals(
"HeapByteBuffer hash should match hash for correct slice",
hashBytes(bytes, 5, 100), bucketFunc.hash(buffer));
// verify that the buffer was not modified
Assert.assertEquals("Buffer position should not change", 5, buffer.position());
Assert.assertEquals("Buffer limit should not change", 105, buffer.limit());
}
@Test
public void testByteBufferOffHeap() {
byte[] bytes = randomBytes(128);
ByteBuffer buffer = ByteBuffer.allocateDirect(128);
// copy to the middle of the off-heap buffer
buffer.position(5);
buffer.limit(105);
buffer.mark();
buffer.put(bytes, 5, 100);
buffer.reset();
Bucket<ByteBuffer> bucketFunc = Bucket.get(Types.BinaryType.get(), 100);
Assert.assertEquals(
"DirectByteBuffer hash should match hash for correct slice",
hashBytes(bytes, 5, 100), bucketFunc.hash(buffer));
// verify that the buffer was not modified
Assert.assertEquals("Buffer position should not change", 5, buffer.position());
Assert.assertEquals("Buffer limit should not change", 105, buffer.limit());
}
@Test
public void testUUIDHash() {
byte[] uuidBytes = randomBytes(16);
UUID uuid = newUUID(uuidBytes);
Bucket<UUID> bucketFunc = Bucket.get(Types.UUIDType.get(), 100);
Assert.assertEquals("UUID hash should match hash of backing bytes",
hashBytes(uuidBytes), bucketFunc.hash(uuid));
}
private byte[] randomBytes(int length) {
byte[] bytes = new byte[length];
testRandom.nextBytes(bytes);
return bytes;
}
private int hashBytes(byte[] bytes) {
return hashBytes(bytes, 0, bytes.length);
}
private int hashBytes(byte[] bytes, int offset, int length) {
return MURMUR3.hashBytes(bytes, offset, length).asInt();
}
/**
* This method returns a UUID for the bytes in the array without modification.
* @param bytes a 16-byte array
* @return a UUID for the bytes
*/
private static UUID newUUID(byte[] bytes) {
try {
return uuidBytesConstructor.newInstance((Object) bytes);
} catch (InstantiationException | IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
}
}
| 2,031 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestTruncate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.transforms;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
public class TestTruncate {
@Test
public void testTruncateInteger() {
Truncate<Integer> trunc = Truncate.get(Types.IntegerType.get(), 10);
Assert.assertEquals(0, (int) trunc.apply(0));
Assert.assertEquals(0, (int) trunc.apply(1));
Assert.assertEquals(0, (int) trunc.apply(5));
Assert.assertEquals(0, (int) trunc.apply(9));
Assert.assertEquals(10, (int) trunc.apply(10));
Assert.assertEquals(10, (int) trunc.apply(11));
Assert.assertEquals(-10, (int) trunc.apply(-1));
Assert.assertEquals(-10, (int) trunc.apply(-5));
Assert.assertEquals(-10, (int) trunc.apply(-10));
Assert.assertEquals(-20, (int) trunc.apply(-11));
}
@Test
public void testTruncateLong() {
Truncate<Long> trunc = Truncate.get(Types.LongType.get(), 10);
Assert.assertEquals(0L, (long) trunc.apply(0L));
Assert.assertEquals(0L, (long) trunc.apply(1L));
Assert.assertEquals(0L, (long) trunc.apply(5L));
Assert.assertEquals(0L, (long) trunc.apply(9L));
Assert.assertEquals(10L, (long) trunc.apply(10L));
Assert.assertEquals(10L, (long) trunc.apply(11L));
Assert.assertEquals(-10L, (long) trunc.apply(-1L));
Assert.assertEquals(-10L, (long) trunc.apply(-5L));
Assert.assertEquals(-10L, (long) trunc.apply(-10L));
Assert.assertEquals(-20L, (long) trunc.apply(-11L));
}
@Test
public void testTruncateDecimal() {
// decimal truncation works by applying the decimal scale to the width: 10 scale 2 = 0.10
Truncate<BigDecimal> trunc = Truncate.get(Types.DecimalType.of(9, 2), 10);
Assert.assertEquals(new BigDecimal("12.30"), trunc.apply(new BigDecimal("12.34")));
Assert.assertEquals(new BigDecimal("12.30"), trunc.apply(new BigDecimal("12.30")));
Assert.assertEquals(new BigDecimal("12.20"), trunc.apply(new BigDecimal("12.29")));
Assert.assertEquals(new BigDecimal("0.00"), trunc.apply(new BigDecimal("0.05")));
Assert.assertEquals(new BigDecimal("-0.10"), trunc.apply(new BigDecimal("-0.05")));
}
@Test
public void testTruncateString() {
Truncate<String> trunc = Truncate.get(Types.StringType.get(), 5);
Assert.assertEquals("Should truncate strings longer than length",
"abcde", trunc.apply("abcdefg"));
Assert.assertEquals("Should not pad strings shorter than length",
"abc", trunc.apply("abc"));
}
@Test
public void testTruncateByteBuffer() throws Exception {
Truncate<ByteBuffer> trunc = Truncate.get(Types.BinaryType.get(), 4);
Assert.assertEquals("Should truncate binary longer than length",
ByteBuffer.wrap("abcd".getBytes("UTF-8")),
trunc.apply(ByteBuffer.wrap("abcdefg".getBytes("UTF-8"))));
Assert.assertEquals("Should not pad binary shorter than length",
ByteBuffer.wrap("abc".getBytes("UTF-8")),
trunc.apply(ByteBuffer.wrap("abc".getBytes("UTF-8"))));
}
}
| 2,032 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestTimestamps.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.transforms;
import com.netflix.iceberg.expressions.Literal;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
public class TestTimestamps {
@Test
public void testTimestampWithoutZoneToHumanString() {
Types.TimestampType type = Types.TimestampType.withoutZone();
Literal<Integer> date = Literal.of("2017-12-01T10:12:55.038194").to(type);
Transform<Integer, Integer> year = Transforms.year(type);
Assert.assertEquals("Should produce the correct Human string",
"2017", year.toHumanString(year.apply(date.value())));
Transform<Integer, Integer> month = Transforms.month(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12", month.toHumanString(month.apply(date.value())));
Transform<Integer, Integer> day = Transforms.day(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12-01", day.toHumanString(day.apply(date.value())));
Transform<Integer, Integer> hour = Transforms.hour(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12-01-10", hour.toHumanString(hour.apply(date.value())));
}
@Test
public void testTimestampWithZoneToHumanString() {
Types.TimestampType type = Types.TimestampType.withZone();
Literal<Integer> date = Literal.of("2017-12-01T10:12:55.038194-08:00").to(type);
Transform<Integer, Integer> year = Transforms.year(type);
Assert.assertEquals("Should produce the correct Human string",
"2017", year.toHumanString(year.apply(date.value())));
Transform<Integer, Integer> month = Transforms.month(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12", month.toHumanString(month.apply(date.value())));
Transform<Integer, Integer> day = Transforms.day(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12-01", day.toHumanString(day.apply(date.value())));
// the hour is 18 because the value is always UTC
Transform<Integer, Integer> hour = Transforms.hour(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12-01-18", hour.toHumanString(hour.apply(date.value())));
}
@Test
public void testNullHumanString() {
Types.TimestampType type = Types.TimestampType.withZone();
Assert.assertEquals("Should produce \"null\" for null",
"null", Transforms.year(type).toHumanString(null));
Assert.assertEquals("Should produce \"null\" for null",
"null", Transforms.month(type).toHumanString(null));
Assert.assertEquals("Should produce \"null\" for null",
"null", Transforms.day(type).toHumanString(null));
Assert.assertEquals("Should produce \"null\" for null",
"null", Transforms.hour(type).toHumanString(null));
}
}
| 2,033 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestDates.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.transforms;
import com.netflix.iceberg.expressions.Literal;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
public class TestDates {
@Test
public void testDateToHumanString() {
Types.DateType type = Types.DateType.get();
Literal<Integer> date = Literal.of("2017-12-01").to(type);
Transform<Integer, Integer> year = Transforms.year(type);
Assert.assertEquals("Should produce the correct Human string",
"2017", year.toHumanString(year.apply(date.value())));
Transform<Integer, Integer> month = Transforms.month(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12", month.toHumanString(month.apply(date.value())));
Transform<Integer, Integer> day = Transforms.day(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12-01", day.toHumanString(day.apply(date.value())));
}
@Test
public void testNullHumanString() {
Types.DateType type = Types.DateType.get();
Assert.assertEquals("Should produce \"null\" for null",
"null", Transforms.year(type).toHumanString(null));
Assert.assertEquals("Should produce \"null\" for null",
"null", Transforms.month(type).toHumanString(null));
Assert.assertEquals("Should produce \"null\" for null",
"null", Transforms.day(type).toHumanString(null));
}
}
| 2,034 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestIdentity.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.transforms;
import com.netflix.iceberg.expressions.Literal;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
public class TestIdentity {
@Test
public void testNullHumanString() {
Types.LongType longType = Types.LongType.get();
Transform<Long, Long> identity = Transforms.identity(longType);
Assert.assertEquals("Should produce \"null\" for null",
"null", identity.toHumanString(null));
}
@Test
public void testBinaryHumanString() {
Types.BinaryType binary = Types.BinaryType.get();
Transform<ByteBuffer, ByteBuffer> identity = Transforms.identity(binary);
Assert.assertEquals("Should base64-encode binary",
"AQID", identity.toHumanString(ByteBuffer.wrap(new byte[] {1, 2, 3})));
}
@Test
public void testFixedHumanString() {
Types.FixedType fixed3 = Types.FixedType.ofLength(3);
Transform<byte[], byte[]> identity = Transforms.identity(fixed3);
Assert.assertEquals("Should base64-encode binary",
"AQID", identity.toHumanString(new byte[] {1, 2, 3}));
}
@Test
public void testDateHumanString() {
Types.DateType date = Types.DateType.get();
Transform<Integer, Integer> identity = Transforms.identity(date);
String dateString = "2017-12-01";
Literal<Integer> d = Literal.of(dateString).to(date);
Assert.assertEquals("Should produce identical date",
dateString, identity.toHumanString(d.value()));
}
@Test
public void testTimeHumanString() {
Types.TimeType time = Types.TimeType.get();
Transform<Long, Long> identity = Transforms.identity(time);
String timeString = "10:12:55.038194";
Literal<Long> d = Literal.of(timeString).to(time);
Assert.assertEquals("Should produce identical time",
timeString, identity.toHumanString(d.value()));
}
@Test
public void testTimestampWithZoneHumanString() {
Types.TimestampType timestamptz = Types.TimestampType.withZone();
Transform<Long, Long> identity = Transforms.identity(timestamptz);
Literal<Long> ts = Literal.of("2017-12-01T10:12:55.038194-08:00").to(timestamptz);
// value will always be in UTC
Assert.assertEquals("Should produce timestamp with time zone adjusted to UTC",
"2017-12-01T18:12:55.038194Z", identity.toHumanString(ts.value()));
}
@Test
public void testTimestampWithoutZoneHumanString() {
Types.TimestampType timestamp = Types.TimestampType.withoutZone();
Transform<Long, Long> identity = Transforms.identity(timestamp);
String tsString = "2017-12-01T10:12:55.038194";
Literal<Long> ts = Literal.of(tsString).to(timestamp);
// value is not changed
Assert.assertEquals("Should produce identical timestamp without time zone",
tsString, identity.toHumanString(ts.value()));
}
@Test
public void testLongToHumanString() {
Types.LongType longType = Types.LongType.get();
Transform<Long, Long> identity = Transforms.identity(longType);
Assert.assertEquals("Should use Long toString",
"-1234567890000", identity.toHumanString(-1234567890000L));
}
@Test
public void testStringToHumanString() {
Types.StringType string = Types.StringType.get();
Transform<String, String> identity = Transforms.identity(string);
String withSlash = "a/b/c=d";
Assert.assertEquals("Should not modify Strings", withSlash, identity.toHumanString(withSlash));
}
@Test
public void testBigDecimalToHumanString() {
Types.DecimalType decimal = Types.DecimalType.of(9, 2);
Transform<BigDecimal, BigDecimal> identity = Transforms.identity(decimal);
String decimalString = "-1.50";
BigDecimal d = new BigDecimal(decimalString);
Assert.assertEquals("Should not modify Strings", decimalString, identity.toHumanString(d));
}
}
| 2,035 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestResiduals.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.transforms;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.TestHelpers.Row;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.ResidualEvaluator;
import com.netflix.iceberg.expressions.UnboundPredicate;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import static com.netflix.iceberg.TestHelpers.assertAndUnwrapUnbound;
import static com.netflix.iceberg.expressions.Expression.Operation.GT;
import static com.netflix.iceberg.expressions.Expression.Operation.LT;
import static com.netflix.iceberg.expressions.Expressions.alwaysFalse;
import static com.netflix.iceberg.expressions.Expressions.alwaysTrue;
import static com.netflix.iceberg.expressions.Expressions.and;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.greaterThan;
import static com.netflix.iceberg.expressions.Expressions.lessThan;
import static com.netflix.iceberg.expressions.Expressions.or;
public class TestResiduals {
@Test
public void testIdentityTransformResiduals() {
Schema schema = new Schema(
Types.NestedField.optional(50, "dateint", Types.IntegerType.get()),
Types.NestedField.optional(51, "hour", Types.IntegerType.get())
);
PartitionSpec spec = PartitionSpec.builderFor(schema)
.identity("dateint")
.build();
ResidualEvaluator resEval = new ResidualEvaluator(spec, or(or(
and(lessThan("dateint", 20170815), greaterThan("dateint", 20170801)),
and(equal("dateint", 20170815), lessThan("hour", 12))),
and(equal("dateint", 20170801), greaterThan("hour", 11)))
);
// equal to the upper date bound
Expression residual = resEval.residualFor(Row.of(20170815));
UnboundPredicate<?> unbound = assertAndUnwrapUnbound(residual);
Assert.assertEquals("Residual should be hour < 12", LT, unbound.op());
Assert.assertEquals("Residual should be hour < 12", "hour", unbound.ref().name());
Assert.assertEquals("Residual should be hour < 12", 12, unbound.literal().value());
// equal to the lower date bound
residual = resEval.residualFor(Row.of(20170801));
unbound = assertAndUnwrapUnbound(residual);
Assert.assertEquals("Residual should be hour > 11", GT, unbound.op());
Assert.assertEquals("Residual should be hour > 11", "hour", unbound.ref().name());
Assert.assertEquals("Residual should be hour > 11", 11, unbound.literal().value());
// inside the date range
residual = resEval.residualFor(Row.of(20170812));
Assert.assertEquals("Residual should be alwaysTrue", alwaysTrue(), residual);
// outside the date range
residual = resEval.residualFor(Row.of(20170817));
Assert.assertEquals("Residual should be alwaysFalse", alwaysFalse(), residual);
}
}
| 2,036 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestProjection.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.transforms;
import com.google.common.collect.Lists;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.expressions.BoundPredicate;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Expressions;
import com.netflix.iceberg.expressions.Or;
import com.netflix.iceberg.expressions.Projections;
import com.netflix.iceberg.expressions.UnboundPredicate;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
import static com.netflix.iceberg.TestHelpers.assertAndUnwrap;
import static com.netflix.iceberg.TestHelpers.assertAndUnwrapUnbound;
import static com.netflix.iceberg.expressions.Expressions.and;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.or;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestProjection {
private static final Schema SCHEMA = new Schema(
optional(16, "id", Types.LongType.get())
);
@Test
public void testIdentityProjection() {
List<UnboundPredicate<?>> predicates = Lists.newArrayList(
Expressions.notNull("id"),
Expressions.isNull("id"),
Expressions.lessThan("id", 100),
Expressions.lessThanOrEqual("id", 101),
Expressions.greaterThan("id", 102),
Expressions.greaterThanOrEqual("id", 103),
Expressions.equal("id", 104),
Expressions.notEqual("id", 105)
);
PartitionSpec spec = PartitionSpec.builderFor(SCHEMA)
.identity("id")
.build();
for (UnboundPredicate<?> predicate : predicates) {
// get the projected predicate
Expression expr = Projections.inclusive(spec).project(predicate);
UnboundPredicate<?> projected = assertAndUnwrapUnbound(expr);
// check inclusive the bound predicate to ensure the types are correct
BoundPredicate<?> bound = assertAndUnwrap(predicate.bind(spec.schema().asStruct()));
Assert.assertEquals("Field name should match partition struct field",
"id", projected.ref().name());
Assert.assertEquals("Operation should match", bound.op(), projected.op());
if (bound.literal() != null) {
Assert.assertEquals("Literal should be equal",
bound.literal().value(), projected.literal().value());
} else {
Assert.assertNull("Literal should be null", projected.literal());
}
}
}
@Test
public void testStrictIdentityProjection() {
List<UnboundPredicate<?>> predicates = Lists.newArrayList(
Expressions.notNull("id"),
Expressions.isNull("id"),
Expressions.lessThan("id", 100),
Expressions.lessThanOrEqual("id", 101),
Expressions.greaterThan("id", 102),
Expressions.greaterThanOrEqual("id", 103),
Expressions.equal("id", 104),
Expressions.notEqual("id", 105)
);
PartitionSpec spec = PartitionSpec.builderFor(SCHEMA)
.identity("id")
.build();
for (UnboundPredicate<?> predicate : predicates) {
// get the projected predicate
Expression expr = Projections.strict(spec).project(predicate);
UnboundPredicate<?> projected = assertAndUnwrapUnbound(expr);
// check inclusive the bound predicate to ensure the types are correct
BoundPredicate<?> bound = assertAndUnwrap(predicate.bind(spec.schema().asStruct()));
Assert.assertEquals("Field name should match partition struct field",
"id", projected.ref().name());
Assert.assertEquals("Operation should match", bound.op(), projected.op());
if (bound.literal() != null) {
Assert.assertEquals("Literal should be equal",
bound.literal().value(), projected.literal().value());
} else {
Assert.assertNull("Literal should be null", projected.literal());
}
}
}
@Test
public void testBadSparkPartitionFilter() {
// this tests a case that results in a full table scan in Spark with Hive tables. because the
// hour field is not a partition, mixing it with partition columns in the filter expression
// prevents the day/hour boundaries from being pushed to the metastore. this is an easy mistake
// when tables are normally partitioned by both hour and dateint. the the filter is:
//
// WHERE dateint = 20180416
// OR (dateint = 20180415 and hour >= 20)
// OR (dateint = 20180417 and hour <= 4)
Schema schema = new Schema(
required(1, "id", Types.LongType.get()),
optional(2, "data", Types.StringType.get()),
required(3, "hour", Types.IntegerType.get()),
required(4, "dateint", Types.IntegerType.get()));
PartitionSpec spec = PartitionSpec.builderFor(schema)
.identity("dateint")
.build();
Expression filter = or(equal("dateint", 20180416), or(
and(equal("dateint", 20180415), greaterThanOrEqual("hour", 20)),
and(equal("dateint", 20180417), lessThanOrEqual("hour", 4))));
Expression projection = Projections.inclusive(spec).project(filter);
Assert.assertTrue(projection instanceof Or);
Or or1 = (Or) projection;
UnboundPredicate<?> dateint1 = assertAndUnwrapUnbound(or1.left());
Assert.assertEquals("Should be a dateint predicate", "dateint", dateint1.ref().name());
Assert.assertEquals("Should be dateint=20180416", 20180416, dateint1.literal().value());
Assert.assertTrue(or1.right() instanceof Or);
Or or2 = (Or) or1.right();
UnboundPredicate<?> dateint2 = assertAndUnwrapUnbound(or2.left());
Assert.assertEquals("Should be a dateint predicate", "dateint", dateint2.ref().name());
Assert.assertEquals("Should be dateint=20180415", 20180415, dateint2.literal().value());
UnboundPredicate<?> dateint3 = assertAndUnwrapUnbound(or2.right());
Assert.assertEquals("Should be a dateint predicate", "dateint", dateint3.ref().name());
Assert.assertEquals("Should be dateint=20180417", 20180417, dateint3.literal().value());
}
}
| 2,037 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestTransformSerialization.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.transforms;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.TestHelpers;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
public class TestTransformSerialization {
@Test
public void testTransforms() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(1, "i", Types.IntegerType.get()),
Types.NestedField.required(2, "l", Types.LongType.get()),
Types.NestedField.required(3, "d", Types.DateType.get()),
Types.NestedField.required(4, "t", Types.TimeType.get()),
Types.NestedField.required(5, "ts", Types.TimestampType.withoutZone()),
Types.NestedField.required(6, "dec", Types.DecimalType.of(9, 2)),
Types.NestedField.required(7, "s", Types.StringType.get()),
Types.NestedField.required(8, "u", Types.UUIDType.get()),
Types.NestedField.required(9, "f", Types.FixedType.ofLength(3)),
Types.NestedField.required(10, "b", Types.BinaryType.get())
);
// a spec with all of the allowed transform/type pairs
PartitionSpec spec = PartitionSpec.builderFor(schema)
.identity("i")
.identity("l")
.identity("d")
.identity("t")
.identity("ts")
.identity("dec")
.identity("s")
.identity("u")
.identity("f")
.identity("b")
.bucket("i", 128)
.bucket("l", 128)
.bucket("d", 128)
.bucket("t", 128)
.bucket("ts", 128)
.bucket("dec", 128)
.bucket("s", 128)
.bucket("u", 128)
.bucket("f", 128)
.bucket("b", 128)
.year("d")
.month("d")
.day("d")
.year("ts")
.month("ts")
.day("ts")
.hour("ts")
.truncate("i", 10)
.truncate("l", 10)
.truncate("dec", 10)
.truncate("s", 10)
.build();
Assert.assertEquals("Deserialization should produce equal partition spec",
spec, TestHelpers.roundTripSerialize(spec));
}
}
| 2,038 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestLiteralSerialization.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.expressions;
import com.netflix.iceberg.TestHelpers;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import java.math.BigDecimal;
import java.util.UUID;
public class TestLiteralSerialization {
@Test
public void testLiterals() throws Exception {
Literal[] literals = new Literal[] {
Literal.of(false),
Literal.of(34),
Literal.of(35L),
Literal.of(36.75F),
Literal.of(8.75D),
Literal.of("2017-11-29").to(Types.DateType.get()),
Literal.of("11:30:07").to(Types.TimeType.get()),
Literal.of("2017-11-29T11:30:07.123").to(Types.TimestampType.withoutZone()),
Literal.of("2017-11-29T11:30:07.123+01:00").to(Types.TimestampType.withZone()),
Literal.of("abc"),
Literal.of(UUID.randomUUID()),
Literal.of(new byte[] { 1, 2, 3 }).to(Types.FixedType.ofLength(3)),
Literal.of(new byte[] { 3, 4, 5, 6 }).to(Types.BinaryType.get()),
Literal.of(new BigDecimal("122.50")),
};
for (Literal<?> lit : literals) {
checkValue(lit);
}
}
private <T> void checkValue(Literal<T> lit) throws Exception {
Literal<T> copy = TestHelpers.roundTripSerialize(lit);
Assert.assertEquals("Literal's comparator should consider values equal",
0, lit.comparator().compare(lit.value(), copy.value()));
Assert.assertEquals("Copy's comparator should consider values equal",
0, copy.comparator().compare(lit.value(), copy.value()));
}
}
| 2,039 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestStringLiteralConversions.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.expressions;
import com.netflix.iceberg.types.Types;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.data.TimeConversions;
import org.joda.time.DateTimeZone;
import org.joda.time.LocalDate;
import org.joda.time.LocalDateTime;
import org.joda.time.LocalTime;
import org.junit.Assert;
import org.junit.Test;
import java.math.BigDecimal;
import java.time.DateTimeException;
import java.util.UUID;
public class TestStringLiteralConversions {
@Test
public void testStringToStringLiteral() {
Literal<CharSequence> string = Literal.of("abc");
Assert.assertSame("Should return same instance", string, string.to(Types.StringType.get()));
}
@Test
public void testStringToDateLiteral() {
Literal<CharSequence> dateStr = Literal.of("2017-08-18");
Literal<Integer> date = dateStr.to(Types.DateType.get());
// use Avro's date conversion to validate the result
Schema avroSchema = LogicalTypes.date().addToSchema(Schema.create(Schema.Type.INT));
TimeConversions.DateConversion avroConversion = new TimeConversions.DateConversion();
int avroValue = avroConversion.toInt(
new LocalDate(2017, 8, 18),
avroSchema, avroSchema.getLogicalType());
Assert.assertEquals("Date should match", avroValue, (int) date.value());
}
@Test
public void testStringToTimeLiteral() {
// use Avro's time conversion to validate the result
Schema avroSchema = LogicalTypes.timeMicros().addToSchema(Schema.create(Schema.Type.LONG));
TimeConversions.LossyTimeMicrosConversion avroConversion =
new TimeConversions.LossyTimeMicrosConversion();
Literal<CharSequence> timeStr = Literal.of("14:21:01.919");
Literal<Long> time = timeStr.to(Types.TimeType.get());
long avroValue = avroConversion.toLong(
new LocalTime(14, 21, 1, 919),
avroSchema, avroSchema.getLogicalType());
Assert.assertEquals("Time should match", avroValue, (long) time.value());
}
@Test
public void testStringToTimestampLiteral() {
// use Avro's timestamp conversion to validate the result
Schema avroSchema = LogicalTypes.timestampMicros().addToSchema(Schema.create(Schema.Type.LONG));
TimeConversions.LossyTimestampMicrosConversion avroConversion =
new TimeConversions.LossyTimestampMicrosConversion();
// Timestamp with explicit UTC offset, +00:00
Literal<CharSequence> timestampStr = Literal.of("2017-08-18T14:21:01.919+00:00");
Literal<Long> timestamp = timestampStr.to(Types.TimestampType.withZone());
long avroValue = avroConversion.toLong(
new LocalDateTime(2017, 8, 18, 14, 21, 1, 919).toDateTime(DateTimeZone.UTC),
avroSchema, avroSchema.getLogicalType());
Assert.assertEquals("Timestamp should match", avroValue, (long) timestamp.value());
// Timestamp without an explicit zone should be UTC (equal to the previous converted value)
timestampStr = Literal.of("2017-08-18T14:21:01.919");
timestamp = timestampStr.to(Types.TimestampType.withoutZone());
Assert.assertEquals("Timestamp without zone should match UTC",
avroValue, (long) timestamp.value());
// Timestamp with an explicit offset should be adjusted to UTC
timestampStr = Literal.of("2017-08-18T14:21:01.919-07:00");
timestamp = timestampStr.to(Types.TimestampType.withZone());
avroValue = avroConversion.toLong(
new LocalDateTime(2017, 8, 18, 21, 21, 1, 919).toDateTime(DateTimeZone.UTC),
avroSchema, avroSchema.getLogicalType());
Assert.assertEquals("Timestamp without zone should match UTC",
avroValue, (long) timestamp.value());
}
@Test(expected = DateTimeException.class)
public void testTimestampWithZoneWithoutZoneInLiteral() {
// Zone must be present in literals when converting to timestamp with zone
Literal<CharSequence> timestampStr = Literal.of("2017-08-18T14:21:01.919");
timestampStr.to(Types.TimestampType.withZone());
}
@Test(expected = DateTimeException.class)
public void testTimestampWithoutZoneWithZoneInLiteral() {
// Zone must not be present in literals when converting to timestamp without zone
Literal<CharSequence> timestampStr = Literal.of("2017-08-18T14:21:01.919+07:00");
timestampStr.to(Types.TimestampType.withoutZone());
}
@Test
public void testStringToUUIDLiteral() {
UUID expected = UUID.randomUUID();
Literal<CharSequence> uuidStr = Literal.of(expected.toString());
Literal<UUID> uuid = uuidStr.to(Types.UUIDType.get());
Assert.assertEquals("UUID should match", expected, uuid.value());
}
@Test
public void testStringToDecimalLiteral() {
BigDecimal expected = new BigDecimal("34.560");
Literal<CharSequence> decimalStr = Literal.of("34.560");
Literal<BigDecimal> decimal = decimalStr.to(Types.DecimalType.of(9, 3));
Assert.assertEquals("Decimal should have scale 3", 3, decimal.value().scale());
Assert.assertEquals("Decimal should match", expected, decimal.value());
Assert.assertNull("Wrong scale in conversion should return null",
decimalStr.to(Types.DecimalType.of(9, 2)));
Assert.assertNull("Wrong scale in conversion should return null",
decimalStr.to(Types.DecimalType.of(9, 4)));
}
}
| 2,040 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestPredicateBinding.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.expressions;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.types.Types.StructType;
import org.junit.Assert;
import org.junit.Test;
import java.math.BigDecimal;
import java.util.Arrays;
import java.util.List;
import static com.netflix.iceberg.expressions.Expression.Operation.EQ;
import static com.netflix.iceberg.expressions.Expression.Operation.GT;
import static com.netflix.iceberg.expressions.Expression.Operation.GT_EQ;
import static com.netflix.iceberg.expressions.Expression.Operation.IS_NULL;
import static com.netflix.iceberg.expressions.Expression.Operation.LT;
import static com.netflix.iceberg.expressions.Expression.Operation.LT_EQ;
import static com.netflix.iceberg.expressions.Expression.Operation.NOT_EQ;
import static com.netflix.iceberg.expressions.Expression.Operation.NOT_NULL;
import static com.netflix.iceberg.expressions.Expressions.ref;
import static com.netflix.iceberg.TestHelpers.assertAndUnwrap;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestPredicateBinding {
private static List<Expression.Operation> COMPARISONS = Arrays.asList(
LT, LT_EQ, GT, GT_EQ, EQ, NOT_EQ);
@Test
@SuppressWarnings("unchecked")
public void testMultipleFields() {
StructType struct = StructType.of(
required(10, "x", Types.IntegerType.get()),
required(11, "y", Types.IntegerType.get()),
required(12, "z", Types.IntegerType.get())
);
UnboundPredicate<Integer> unbound = new UnboundPredicate<>(LT, ref("y"), 6);
Expression expr = unbound.bind(struct);
BoundPredicate<Integer> bound = assertAndUnwrap(expr);
Assert.assertEquals("Should reference correct field ID", 11, bound.ref().fieldId());
Assert.assertEquals("Should not change the comparison operation", LT, bound.op());
Assert.assertEquals("Should not alter literal value",
Integer.valueOf(6), bound.literal().value());
}
@Test
public void testMissingField() {
StructType struct = StructType.of(
required(13, "x", Types.IntegerType.get())
);
UnboundPredicate<Integer> unbound = new UnboundPredicate<>(LT, ref("missing"), 6);
try {
unbound.bind(struct);
Assert.fail("Binding a missing field should fail");
} catch (ValidationException e) {
Assert.assertTrue("Validation should complain about missing field",
e.getMessage().contains("Cannot find field 'missing' in struct:"));
}
}
@Test
@SuppressWarnings("unchecked")
public void testComparisonPredicateBinding() {
StructType struct = StructType.of(required(14, "x", Types.IntegerType.get()));
for (Expression.Operation op : COMPARISONS) {
UnboundPredicate<Integer> unbound = new UnboundPredicate<>(op, ref("x"), 5);
Expression expr = unbound.bind(struct);
BoundPredicate<Integer> bound = assertAndUnwrap(expr);
Assert.assertEquals("Should not alter literal value",
Integer.valueOf(5), bound.literal().value());
Assert.assertEquals("Should reference correct field ID", 14, bound.ref().fieldId());
Assert.assertEquals("Should not change the comparison operation", op, bound.op());
}
}
@Test
@SuppressWarnings("unchecked")
public void testLiteralConversion() {
StructType struct = StructType.of(required(15, "d", Types.DecimalType.of(9, 2)));
for (Expression.Operation op : COMPARISONS) {
UnboundPredicate<String> unbound = new UnboundPredicate<>(op, ref("d"), "12.40");
Expression expr = unbound.bind(struct);
BoundPredicate<BigDecimal> bound = assertAndUnwrap(expr);
Assert.assertEquals("Should convert literal value to decimal",
new BigDecimal("12.40"), bound.literal().value());
Assert.assertEquals("Should reference correct field ID", 15, bound.ref().fieldId());
Assert.assertEquals("Should not change the comparison operation", op, bound.op());
}
}
@Test
public void testInvalidConversions() {
StructType struct = StructType.of(required(16, "f", Types.FloatType.get()));
for (Expression.Operation op : COMPARISONS) {
UnboundPredicate<String> unbound = new UnboundPredicate<>(op, ref("f"), "12.40");
try {
unbound.bind(struct);
Assert.fail("Should not convert string to float");
} catch (ValidationException e) {
Assert.assertEquals("Should ",
e.getMessage(),
"Invalid value for comparison inclusive type float: 12.40 (java.lang.String)");
}
}
}
@Test
@SuppressWarnings("unchecked")
public void testLongToIntegerConversion() {
StructType struct = StructType.of(required(17, "i", Types.IntegerType.get()));
UnboundPredicate<Long> lt = new UnboundPredicate<>(
LT, ref("i"), (long) Integer.MAX_VALUE + 1L);
Assert.assertEquals("Less than above max should be alwaysTrue",
Expressions.alwaysTrue(), lt.bind(struct));
UnboundPredicate<Long> lteq = new UnboundPredicate<>(
LT_EQ, ref("i"), (long) Integer.MAX_VALUE + 1L);
Assert.assertEquals("Less than or equal above max should be alwaysTrue",
Expressions.alwaysTrue(), lteq.bind(struct));
UnboundPredicate<Long> gt = new UnboundPredicate<>(
GT, ref("i"), (long) Integer.MIN_VALUE - 1L);
Assert.assertEquals("Greater than below min should be alwaysTrue",
Expressions.alwaysTrue(), gt.bind(struct));
UnboundPredicate<Long> gteq = new UnboundPredicate<>(
GT_EQ, ref("i"), (long) Integer.MIN_VALUE - 1L);
Assert.assertEquals("Greater than or equal below min should be alwaysTrue",
Expressions.alwaysTrue(), gteq.bind(struct));
UnboundPredicate<Long> gtMax = new UnboundPredicate<>(
GT, ref("i"), (long) Integer.MAX_VALUE + 1L);
Assert.assertEquals("Greater than above max should be alwaysFalse",
Expressions.alwaysFalse(), gtMax.bind(struct));
UnboundPredicate<Long> gteqMax = new UnboundPredicate<>(
GT_EQ, ref("i"), (long) Integer.MAX_VALUE + 1L);
Assert.assertEquals("Greater than or equal above max should be alwaysFalse",
Expressions.alwaysFalse(), gteqMax.bind(struct));
UnboundPredicate<Long> ltMin = new UnboundPredicate<>(
LT, ref("i"), (long) Integer.MIN_VALUE - 1L);
Assert.assertEquals("Less than below min should be alwaysFalse",
Expressions.alwaysFalse(), ltMin.bind(struct));
UnboundPredicate<Long> lteqMin = new UnboundPredicate<>(
LT_EQ, ref("i"), (long) Integer.MIN_VALUE - 1L);
Assert.assertEquals("Less than or equal below min should be alwaysFalse",
Expressions.alwaysFalse(), lteqMin.bind(struct));
Expression ltExpr = new UnboundPredicate<>(LT, ref("i"), (long) Integer.MAX_VALUE).bind(struct);
BoundPredicate<Integer> ltMax = assertAndUnwrap(ltExpr);
Assert.assertEquals("Should translate bound to Integer",
(Integer) Integer.MAX_VALUE, ltMax.literal().value());
Expression lteqExpr = new UnboundPredicate<>(LT_EQ, ref("i"), (long) Integer.MAX_VALUE)
.bind(struct);
BoundPredicate<Integer> lteqMax = assertAndUnwrap(lteqExpr);
Assert.assertEquals("Should translate bound to Integer",
(Integer) Integer.MAX_VALUE, lteqMax.literal().value());
Expression gtExpr = new UnboundPredicate<>(GT, ref("i"), (long) Integer.MIN_VALUE).bind(struct);
BoundPredicate<Integer> gtMin = assertAndUnwrap(gtExpr);
Assert.assertEquals("Should translate bound to Integer",
(Integer) Integer.MIN_VALUE, gtMin.literal().value());
Expression gteqExpr = new UnboundPredicate<>(GT_EQ, ref("i"), (long) Integer.MIN_VALUE)
.bind(struct);
BoundPredicate<Integer> gteqMin = assertAndUnwrap(gteqExpr);
Assert.assertEquals("Should translate bound to Integer",
(Integer) Integer.MIN_VALUE, gteqMin.literal().value());
}
@Test
@SuppressWarnings("unchecked")
public void testDoubleToFloatConversion() {
StructType struct = StructType.of(required(18, "f", Types.FloatType.get()));
UnboundPredicate<Double> lt = new UnboundPredicate<>(
LT, ref("f"), (double) Float.MAX_VALUE * 2);
Assert.assertEquals("Less than above max should be alwaysTrue",
Expressions.alwaysTrue(), lt.bind(struct));
UnboundPredicate<Double> lteq = new UnboundPredicate<>(
LT_EQ, ref("f"), (double) Float.MAX_VALUE * 2);
Assert.assertEquals("Less than or equal above max should be alwaysTrue",
Expressions.alwaysTrue(), lteq.bind(struct));
UnboundPredicate<Double> gt = new UnboundPredicate<>(
GT, ref("f"), (double) Float.MAX_VALUE * -2);
Assert.assertEquals("Greater than below min should be alwaysTrue",
Expressions.alwaysTrue(), gt.bind(struct));
UnboundPredicate<Double> gteq = new UnboundPredicate<>(
GT_EQ, ref("f"), (double) Float.MAX_VALUE * -2);
Assert.assertEquals("Greater than or equal below min should be alwaysTrue",
Expressions.alwaysTrue(), gteq.bind(struct));
UnboundPredicate<Double> gtMax = new UnboundPredicate<>(
GT, ref("f"), (double) Float.MAX_VALUE * 2);
Assert.assertEquals("Greater than above max should be alwaysFalse",
Expressions.alwaysFalse(), gtMax.bind(struct));
UnboundPredicate<Double> gteqMax = new UnboundPredicate<>(
GT_EQ, ref("f"), (double) Float.MAX_VALUE * 2);
Assert.assertEquals("Greater than or equal above max should be alwaysFalse",
Expressions.alwaysFalse(), gteqMax.bind(struct));
UnboundPredicate<Double> ltMin = new UnboundPredicate<>(
LT, ref("f"), (double) Float.MAX_VALUE * -2);
Assert.assertEquals("Less than below min should be alwaysFalse",
Expressions.alwaysFalse(), ltMin.bind(struct));
UnboundPredicate<Double> lteqMin = new UnboundPredicate<>(
LT_EQ, ref("f"), (double) Float.MAX_VALUE * -2);
Assert.assertEquals("Less than or equal below min should be alwaysFalse",
Expressions.alwaysFalse(), lteqMin.bind(struct));
Expression ltExpr = new UnboundPredicate<>(LT, ref("f"), (double) Float.MAX_VALUE).bind(struct);
BoundPredicate<Float> ltMax = assertAndUnwrap(ltExpr);
Assert.assertEquals("Should translate bound to Float",
(Float) Float.MAX_VALUE, ltMax.literal().value());
Expression lteqExpr = new UnboundPredicate<>(LT_EQ, ref("f"), (double) Float.MAX_VALUE)
.bind(struct);
BoundPredicate<Float> lteqMax = assertAndUnwrap(lteqExpr);
Assert.assertEquals("Should translate bound to Float",
(Float) Float.MAX_VALUE, lteqMax.literal().value());
Expression gtExpr = new UnboundPredicate<>(GT, ref("f"), (double) -Float.MAX_VALUE).bind(struct);
BoundPredicate<Float> gtMin = assertAndUnwrap(gtExpr);
Assert.assertEquals("Should translate bound to Float",
Float.valueOf(-Float.MAX_VALUE), gtMin.literal().value());
Expression gteqExpr = new UnboundPredicate<>(GT_EQ, ref("f"), (double) -Float.MAX_VALUE)
.bind(struct);
BoundPredicate<Float> gteqMin = assertAndUnwrap(gteqExpr);
Assert.assertEquals("Should translate bound to Float",
Float.valueOf(-Float.MAX_VALUE), gteqMin.literal().value());
}
@Test
@SuppressWarnings("unchecked")
public void testIsNull() {
StructType optional = StructType.of(optional(19, "s", Types.StringType.get()));
UnboundPredicate<?> unbound = new UnboundPredicate<>(IS_NULL, ref("s"));
Expression expr = unbound.bind(optional);
BoundPredicate<?> bound = assertAndUnwrap(expr);
Assert.assertEquals("Should use the same operation", IS_NULL, bound.op());
Assert.assertEquals("Should use the correct field", 19, bound.ref().fieldId());
Assert.assertNull("Should not have a literal value", bound.literal());
StructType required = StructType.of(required(20, "s", Types.StringType.get()));
Assert.assertEquals("IsNull inclusive a required field should be alwaysFalse",
Expressions.alwaysFalse(), unbound.bind(required));
}
@Test
public void testNotNull() {
StructType optional = StructType.of(optional(21, "s", Types.StringType.get()));
UnboundPredicate<?> unbound = new UnboundPredicate<>(NOT_NULL, ref("s"));
Expression expr = unbound.bind(optional);
BoundPredicate<?> bound = assertAndUnwrap(expr);
Assert.assertEquals("Should use the same operation", NOT_NULL, bound.op());
Assert.assertEquals("Should use the correct field", 21, bound.ref().fieldId());
Assert.assertNull("Should not have a literal value", bound.literal());
StructType required = StructType.of(required(22, "s", Types.StringType.get()));
Assert.assertEquals("NotNull inclusive a required field should be alwaysTrue",
Expressions.alwaysTrue(), unbound.bind(required));
}
}
| 2,041 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestStrictMetricsEvaluator.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.expressions;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.DataFile;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.TestHelpers;
import com.netflix.iceberg.TestHelpers.Row;
import com.netflix.iceberg.TestHelpers.TestDataFile;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.types.Types.IntegerType;
import com.netflix.iceberg.types.Types.StringType;
import org.junit.Assert;
import org.junit.Ignore;
import org.junit.Test;
import static com.netflix.iceberg.expressions.Expressions.and;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.greaterThan;
import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.isNull;
import static com.netflix.iceberg.expressions.Expressions.lessThan;
import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.not;
import static com.netflix.iceberg.expressions.Expressions.notEqual;
import static com.netflix.iceberg.expressions.Expressions.notNull;
import static com.netflix.iceberg.expressions.Expressions.or;
import static com.netflix.iceberg.types.Conversions.toByteBuffer;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestStrictMetricsEvaluator {
private static final Schema SCHEMA = new Schema(
required(1, "id", IntegerType.get()),
optional(2, "no_stats", IntegerType.get()),
required(3, "required", StringType.get()),
optional(4, "all_nulls", StringType.get()),
optional(5, "some_nulls", StringType.get()),
optional(6, "no_nulls", StringType.get()),
required(7, "always_5", IntegerType.get())
);
private static final DataFile FILE = new TestDataFile("file.avro", Row.of(), 50,
// any value counts, including nulls
ImmutableMap.of(
4, 50L,
5, 50L,
6, 50L),
// null value counts
ImmutableMap.of(
4, 50L,
5, 10L,
6, 0L),
// lower bounds
ImmutableMap.of(
1, toByteBuffer(IntegerType.get(), 30),
7, toByteBuffer(IntegerType.get(), 5)),
// upper bounds
ImmutableMap.of(
1, toByteBuffer(IntegerType.get(), 79),
7, toByteBuffer(IntegerType.get(), 5)));
@Test
public void testAllNulls() {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, notNull("all_nulls")).eval(FILE);
Assert.assertFalse("Should not match: no non-null value in all null column", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, notNull("some_nulls")).eval(FILE);
Assert.assertFalse("Should not match: column with some nulls contains a non-null value", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, notNull("no_nulls")).eval(FILE);
Assert.assertTrue("Should match: non-null column contains no null values", shouldRead);
}
@Test
public void testNoNulls() {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, isNull("all_nulls")).eval(FILE);
Assert.assertTrue("Should match: all values are null", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, isNull("some_nulls")).eval(FILE);
Assert.assertFalse("Should not match: not all values are null", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, isNull("no_nulls")).eval(FILE);
Assert.assertFalse("Should not match: no values are null", shouldRead);
}
@Test
public void testRequiredColumn() {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, notNull("required")).eval(FILE);
Assert.assertTrue("Should match: required columns are always non-null", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, isNull("required")).eval(FILE);
Assert.assertFalse("Should not match: required columns never contain null", shouldRead);
}
@Test
public void testMissingColumn() {
TestHelpers.assertThrows("Should complain about missing column in expression",
ValidationException.class, "Cannot find field 'missing'",
() -> new StrictMetricsEvaluator(SCHEMA, lessThan("missing", 5)).eval(FILE));
}
@Test
public void testMissingStats() {
DataFile missingStats = new TestDataFile("file.parquet", Row.of(), 50);
Expression[] exprs = new Expression[] {
lessThan("no_stats", 5), lessThanOrEqual("no_stats", 30), equal("no_stats", 70),
greaterThan("no_stats", 78), greaterThanOrEqual("no_stats", 90), notEqual("no_stats", 101),
isNull("no_stats"), notNull("no_stats")
};
for (Expression expr : exprs) {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, expr).eval(missingStats);
Assert.assertFalse("Should never match when stats are missing for expr: " + expr, shouldRead);
}
}
@Test
public void testZeroRecordFile() {
DataFile empty = new TestDataFile("file.parquet", Row.of(), 0);
Expression[] exprs = new Expression[] {
lessThan("id", 5), lessThanOrEqual("id", 30), equal("id", 70), greaterThan("id", 78),
greaterThanOrEqual("id", 90), notEqual("id", 101), isNull("some_nulls"),
notNull("some_nulls")
};
for (Expression expr : exprs) {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, expr).eval(empty);
Assert.assertTrue("Should always match 0-record file: " + expr, shouldRead);
}
}
@Test
public void testNot() {
// this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, not(lessThan("id", 5))).eval(FILE);
Assert.assertTrue("Should not match: not(false)", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, not(greaterThan("id", 5))).eval(FILE);
Assert.assertFalse("Should match: not(true)", shouldRead);
}
@Test
public void testAnd() {
// this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA,
and(greaterThan("id", 5), lessThanOrEqual("id", 30))).eval(FILE);
Assert.assertFalse("Should not match: range overlaps data", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA,
and(lessThan("id", 5), greaterThanOrEqual("id", 0))).eval(FILE);
Assert.assertFalse("Should match: range does not overlap data", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA,
and(lessThan("id", 85), greaterThanOrEqual("id", 0))).eval(FILE);
Assert.assertTrue("Should match: range includes all data", shouldRead);
}
@Test
public void testOr() {
// this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA,
or(lessThan("id", 5), greaterThanOrEqual("id", 80))).eval(FILE);
Assert.assertFalse("Should not match: no matching values", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA,
or(lessThan("id", 5), greaterThanOrEqual("id", 60))).eval(FILE);
Assert.assertFalse("Should not match: some values do not match", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA,
or(lessThan("id", 5), greaterThanOrEqual("id", 30))).eval(FILE);
Assert.assertTrue("Should match: all values match > 30", shouldRead);
}
@Test
public void testIntegerLt() {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThan("id", 30)).eval(FILE);
Assert.assertFalse("Should not match: always false", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThan("id", 31)).eval(FILE);
Assert.assertFalse("Should not match: 32 and greater not in range", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThan("id", 79)).eval(FILE);
Assert.assertFalse("Should not match: 79 not in range", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThan("id", 80)).eval(FILE);
Assert.assertTrue("Should match: all values in range", shouldRead);
}
@Test
public void testIntegerLtEq() {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 29)).eval(FILE);
Assert.assertFalse("Should not match: always false", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 30)).eval(FILE);
Assert.assertFalse("Should not match: 31 and greater not in range", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 79)).eval(FILE);
Assert.assertTrue("Should match: all values in range", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 80)).eval(FILE);
Assert.assertTrue("Should match: all values in range", shouldRead);
}
@Test
public void testIntegerGt() {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThan("id", 79)).eval(FILE);
Assert.assertFalse("Should not match: always false", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThan("id", 78)).eval(FILE);
Assert.assertFalse("Should not match: 77 and less not in range", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThan("id", 30)).eval(FILE);
Assert.assertFalse("Should not match: 30 not in range", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThan("id", 29)).eval(FILE);
Assert.assertTrue("Should match: all values in range", shouldRead);
}
@Test
public void testIntegerGtEq() {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 80)).eval(FILE);
Assert.assertFalse("Should not match: no values in range", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 79)).eval(FILE);
Assert.assertFalse("Should not match: 78 and lower are not in range", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 31)).eval(FILE);
Assert.assertFalse("Should not match: 30 not in range", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 30)).eval(FILE);
Assert.assertTrue("Should match: all values in range", shouldRead);
}
@Test
public void testIntegerEq() {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", 5)).eval(FILE);
Assert.assertFalse("Should not match: all values != 5", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", 30)).eval(FILE);
Assert.assertFalse("Should not match: some values != 30", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", 75)).eval(FILE);
Assert.assertFalse("Should not match: some values != 75", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", 79)).eval(FILE);
Assert.assertFalse("Should not match: some values != 79", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", 80)).eval(FILE);
Assert.assertFalse("Should not match: some values != 80", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("always_5", 5)).eval(FILE);
Assert.assertTrue("Should match: all values == 5", shouldRead);
}
@Test
public void testIntegerNotEq() {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 5)).eval(FILE);
Assert.assertTrue("Should match: no values == 5", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 29)).eval(FILE);
Assert.assertTrue("Should match: no values == 39", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 30)).eval(FILE);
Assert.assertFalse("Should not match: some value may be == 30", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 75)).eval(FILE);
Assert.assertFalse("Should not match: some value may be == 75", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 79)).eval(FILE);
Assert.assertFalse("Should not match: some value may be == 79", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 80)).eval(FILE);
Assert.assertTrue("Should match: no values == 80", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 85)).eval(FILE);
Assert.assertTrue("Should read: no values == 85", shouldRead);
}
@Test
public void testIntegerNotEqRewritten() {
boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 5))).eval(FILE);
Assert.assertTrue("Should match: no values == 5", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 29))).eval(FILE);
Assert.assertTrue("Should match: no values == 39", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 30))).eval(FILE);
Assert.assertFalse("Should not match: some value may be == 30", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 75))).eval(FILE);
Assert.assertFalse("Should not match: some value may be == 75", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 79))).eval(FILE);
Assert.assertFalse("Should not match: some value may be == 79", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 80))).eval(FILE);
Assert.assertTrue("Should match: no values == 80", shouldRead);
shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 85))).eval(FILE);
Assert.assertTrue("Should read: no values == 85", shouldRead);
}
}
| 2,042 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestExpressionHelpers.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.expressions;
import com.netflix.iceberg.TestHelpers;
import org.junit.Assert;
import org.junit.Test;
import static com.netflix.iceberg.expressions.Expressions.alwaysFalse;
import static com.netflix.iceberg.expressions.Expressions.alwaysTrue;
import static com.netflix.iceberg.expressions.Expressions.and;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.lessThan;
import static com.netflix.iceberg.expressions.Expressions.not;
import static com.netflix.iceberg.expressions.Expressions.or;
public class TestExpressionHelpers {
private final UnboundPredicate<?> pred = lessThan("x", 7);
@Test
public void testSimplifyOr() {
Assert.assertEquals("alwaysTrue or pred => alwaysTrue",
alwaysTrue(), or(alwaysTrue(), pred));
Assert.assertEquals("pred or alwaysTrue => alwaysTrue",
alwaysTrue(), or(pred, alwaysTrue()));
Assert.assertEquals("alwaysFalse or pred => pred",
pred, or(alwaysFalse(), pred));
Assert.assertEquals("pred or alwaysTrue => pred",
pred, or(pred, alwaysFalse()));
}
@Test
public void testSimplifyAnd() {
Assert.assertEquals("alwaysTrue and pred => pred",
pred, and(alwaysTrue(), pred));
Assert.assertEquals("pred and alwaysTrue => pred",
pred, and(pred, alwaysTrue()));
Assert.assertEquals("alwaysFalse and pred => alwaysFalse",
alwaysFalse(), and(alwaysFalse(), pred));
Assert.assertEquals("pred and alwaysFalse => alwaysFalse",
alwaysFalse(), and(pred, alwaysFalse()));
}
@Test
public void testSimplifyNot() {
Assert.assertEquals("not(alwaysTrue) => alwaysFalse",
alwaysFalse(), not(alwaysTrue()));
Assert.assertEquals("not(alwaysFalse) => alwaysTrue",
alwaysTrue(), not(alwaysFalse()));
Assert.assertEquals("not(not(pred)) => pred",
pred, not(not(pred)));
}
@Test
public void testNullName() {
TestHelpers.assertThrows("Should catch null column names when creating expressions",
NullPointerException.class, "Name cannot be null", () -> equal(null, 5));
}
}
| 2,043 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestNumericLiteralConversions.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.expressions;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import java.math.BigDecimal;
public class TestNumericLiteralConversions {
@Test
public void testIntegerToLongConversion() {
Literal<Integer> lit = Literal.of(34);
Literal<Long> longLit = lit.to(Types.LongType.get());
Assert.assertEquals("Value should match", 34L, (long) longLit.value());
}
@Test
public void testIntegerToFloatConversion() {
Literal<Integer> lit = Literal.of(34);
Literal<Float> floatLit = lit.to(Types.FloatType.get());
Assert.assertEquals("Value should match", 34.0F, floatLit.value(), 0.0000000001D);
}
@Test
public void testIntegerToDoubleConversion() {
Literal<Integer> lit = Literal.of(34);
Literal<Double> doubleLit = lit.to(Types.DoubleType.get());
Assert.assertEquals("Value should match", 34.0D, doubleLit.value(), 0.0000000001D);
}
@Test
public void testIntegerToDecimalConversion() {
Literal<Integer> lit = Literal.of(34);
Assert.assertEquals("Value should match",
new BigDecimal("34"), lit.to(Types.DecimalType.of(9, 0)).value());
Assert.assertEquals("Value should match",
new BigDecimal("34.00"), lit.to(Types.DecimalType.of(9, 2)).value());
Assert.assertEquals("Value should match",
new BigDecimal("34.0000"), lit.to(Types.DecimalType.of(9, 4)).value());
}
@Test
public void testLongToIntegerConversion() {
Literal<Long> lit = Literal.of(34L);
Literal<Integer> intLit = lit.to(Types.IntegerType.get());
Assert.assertEquals("Value should match", 34, (int) intLit.value());
Assert.assertEquals("Values above Integer.MAX_VALUE should be Literals.aboveMax()",
Literals.aboveMax(), Literal.of((long) Integer.MAX_VALUE + 1L).to(Types.IntegerType.get()));
Assert.assertEquals("Values below Integer.MIN_VALUE should be Literals.belowMin()",
Literals.belowMin(), Literal.of((long) Integer.MIN_VALUE - 1L).to(Types.IntegerType.get()));
}
@Test
public void testLongToFloatConversion() {
Literal<Long> lit = Literal.of(34L);
Literal<Float> floatLit = lit.to(Types.FloatType.get());
Assert.assertEquals("Value should match", 34.0F, floatLit.value(), 0.0000000001D);
}
@Test
public void testLongToDoubleConversion() {
Literal<Long> lit = Literal.of(34L);
Literal<Double> doubleLit = lit.to(Types.DoubleType.get());
Assert.assertEquals("Value should match", 34.0D, doubleLit.value(), 0.0000000001D);
}
@Test
public void testLongToDecimalConversion() {
Literal<Long> lit = Literal.of(34L);
Assert.assertEquals("Value should match",
new BigDecimal("34"), lit.to(Types.DecimalType.of(9, 0)).value());
Assert.assertEquals("Value should match",
new BigDecimal("34.00"), lit.to(Types.DecimalType.of(9, 2)).value());
Assert.assertEquals("Value should match",
new BigDecimal("34.0000"), lit.to(Types.DecimalType.of(9, 4)).value());
}
@Test
public void testFloatToDoubleConversion() {
Literal<Float> lit = Literal.of(34.56F);
Literal<Double> doubleLit = lit.to(Types.DoubleType.get());
Assert.assertEquals("Value should match", 34.56D, doubleLit.value(), 0.001D);
}
@Test
public void testFloatToDecimalConversion() {
Literal<Float> lit = Literal.of(34.56F);
Assert.assertEquals("Value should round using HALF_UP",
new BigDecimal("34.6"), lit.to(Types.DecimalType.of(9, 1)).value());
Assert.assertEquals("Value should match",
new BigDecimal("34.56"), lit.to(Types.DecimalType.of(9, 2)).value());
Assert.assertEquals("Value should match",
new BigDecimal("34.5600"), lit.to(Types.DecimalType.of(9, 4)).value());
}
@Test
public void testDoubleToFloatConversion() {
Literal<Double> lit = Literal.of(34.56D);
Literal<Float> doubleLit = lit.to(Types.FloatType.get());
Assert.assertEquals("Value should match", 34.56F, doubleLit.value(), 0.001D);
// this adjusts Float.MAX_VALUE using multipliers because most integer adjustments are lost by
// floating point precision.
Assert.assertEquals("Values above Float.MAX_VALUE should be Literals.aboveMax()",
Literals.aboveMax(), Literal.of(2 * ((double) Float.MAX_VALUE)).to(Types.FloatType.get()));
Assert.assertEquals("Values below Float.MIN_VALUE should be Literals.belowMin()",
Literals.belowMin(), Literal.of(-2 * ((double) Float.MAX_VALUE)).to(Types.FloatType.get()));
}
@Test
public void testDoubleToDecimalConversion() {
Literal<Double> lit = Literal.of(34.56D);
Assert.assertEquals("Value should round using HALF_UP",
new BigDecimal("34.6"), lit.to(Types.DecimalType.of(9, 1)).value());
Assert.assertEquals("Value should match",
new BigDecimal("34.56"), lit.to(Types.DecimalType.of(9, 2)).value());
Assert.assertEquals("Value should match",
new BigDecimal("34.5600"), lit.to(Types.DecimalType.of(9, 4)).value());
}
@Test
public void testDecimalToDecimalConversion() {
Literal<BigDecimal> lit = Literal.of(new BigDecimal("34.11"));
Assert.assertSame("Should return identical object when converting to same scale",
lit, lit.to(Types.DecimalType.of(9, 2)));
Assert.assertSame("Should return identical object when converting to same scale",
lit, lit.to(Types.DecimalType.of(11, 2)));
Assert.assertNull("Changing decimal scale is not allowed",
lit.to(Types.DecimalType.of(9, 0)));
Assert.assertNull("Changing decimal scale is not allowed",
lit.to(Types.DecimalType.of(9, 1)));
Assert.assertNull("Changing decimal scale is not allowed",
lit.to(Types.DecimalType.of(9, 3)));
}
}
| 2,044 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestExpressionBinding.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.expressions;
import com.netflix.iceberg.TestHelpers;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.types.Types.StructType;
import org.junit.Assert;
import org.junit.Test;
import static com.netflix.iceberg.expressions.Expressions.alwaysFalse;
import static com.netflix.iceberg.expressions.Expressions.alwaysTrue;
import static com.netflix.iceberg.expressions.Expressions.and;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.greaterThan;
import static com.netflix.iceberg.expressions.Expressions.lessThan;
import static com.netflix.iceberg.expressions.Expressions.not;
import static com.netflix.iceberg.expressions.Expressions.or;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestExpressionBinding {
private static final StructType STRUCT = StructType.of(
required(0, "x", Types.IntegerType.get()),
required(1, "y", Types.IntegerType.get()),
required(2, "z", Types.IntegerType.get())
);
@Test
public void testMissingReference() {
Expression expr = and(equal("t", 5), equal("x", 7));
try {
Binder.bind(STRUCT, expr);
Assert.fail("Should not successfully bind to struct without field 't'");
} catch (ValidationException e) {
Assert.assertTrue("Should complain about missing field",
e.getMessage().contains("Cannot find field 't' in struct:"));
}
}
@Test(expected = IllegalStateException.class)
public void testBoundExpressionFails() {
Expression expr = not(equal("x", 7));
Binder.bind(STRUCT, Binder.bind(STRUCT, expr));
}
@Test
public void testSingleReference() {
Expression expr = not(equal("x", 7));
TestHelpers.assertAllReferencesBound("Single reference", Binder.bind(STRUCT, expr));
}
@Test
public void testMultipleReferences() {
Expression expr = or(and(equal("x", 7), lessThan("y", 100)), greaterThan("z", -100));
TestHelpers.assertAllReferencesBound("Multiple references", Binder.bind(STRUCT, expr));
}
@Test
public void testAnd() {
Expression expr = and(equal("x", 7), lessThan("y", 100));
Expression boundExpr = Binder.bind(STRUCT, expr);
TestHelpers.assertAllReferencesBound("And", boundExpr);
// make sure the result is an And
And and = TestHelpers.assertAndUnwrap(boundExpr, And.class);
// make sure the refs are for the right fields
BoundPredicate<?> left = TestHelpers.assertAndUnwrap(and.left());
Assert.assertEquals("Should bind x correctly", 0, left.ref().fieldId());
BoundPredicate<?> right = TestHelpers.assertAndUnwrap(and.right());
Assert.assertEquals("Should bind y correctly", 1, right.ref().fieldId());
}
@Test
public void testOr() {
Expression expr = or(greaterThan("z", -100), lessThan("y", 100));
Expression boundExpr = Binder.bind(STRUCT, expr);
TestHelpers.assertAllReferencesBound("Or", boundExpr);
// make sure the result is an Or
Or or = TestHelpers.assertAndUnwrap(boundExpr, Or.class);
// make sure the refs are for the right fields
BoundPredicate<?> left = TestHelpers.assertAndUnwrap(or.left());
Assert.assertEquals("Should bind z correctly", 2, left.ref().fieldId());
BoundPredicate<?> right = TestHelpers.assertAndUnwrap(or.right());
Assert.assertEquals("Should bind y correctly", 1, right.ref().fieldId());
}
@Test
public void testNot() {
Expression expr = not(equal("x", 7));
Expression boundExpr = Binder.bind(STRUCT, expr);
TestHelpers.assertAllReferencesBound("Not", boundExpr);
// make sure the result is a Not
Not not = TestHelpers.assertAndUnwrap(boundExpr, Not.class);
// make sure the refs are for the right fields
BoundPredicate<?> child = TestHelpers.assertAndUnwrap(not.child());
Assert.assertEquals("Should bind x correctly", 0, child.ref().fieldId());
}
@Test
public void testAlwaysTrue() {
Assert.assertEquals("Should not change alwaysTrue",
alwaysTrue(),
Binder.bind(STRUCT, alwaysTrue()));
}
@Test
public void testAlwaysFalse() {
Assert.assertEquals("Should not change alwaysFalse",
alwaysFalse(),
Binder.bind(STRUCT, alwaysFalse()));
}
@Test
public void testBasicSimplification() {
// this tests that a basic simplification is done by calling the helpers in Expressions. those
// are more thoroughly tested in TestExpressionHelpers.
// the second predicate is always true once it is bound because z is an integer and the literal
// is less than any 32-bit integer value
Assert.assertEquals("Should simplify or expression to alwaysTrue",
alwaysTrue(), Binder.bind(STRUCT, or(lessThan("y", 100), greaterThan("z", -9999999999L))));
// similarly, the second predicate is always false
Assert.assertEquals("Should simplify and expression to predicate",
alwaysFalse(), Binder.bind(STRUCT, and(lessThan("y", 100), lessThan("z", -9999999999L))));
Expression bound = Binder.bind(STRUCT, not(not(lessThan("y", 100))));
BoundPredicate<?> pred = TestHelpers.assertAndUnwrap(bound);
Assert.assertEquals("Should have the correct bound field", 1, pred.ref().fieldId());
}
}
| 2,045 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestInclusiveManifestEvaluator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg.expressions;
import com.google.common.collect.ImmutableList;
import com.netflix.iceberg.ManifestFile;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.TestHelpers;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import java.nio.ByteBuffer;
import static com.netflix.iceberg.expressions.Expressions.and;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.greaterThan;
import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.isNull;
import static com.netflix.iceberg.expressions.Expressions.lessThan;
import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.not;
import static com.netflix.iceberg.expressions.Expressions.notEqual;
import static com.netflix.iceberg.expressions.Expressions.notNull;
import static com.netflix.iceberg.expressions.Expressions.or;
import static com.netflix.iceberg.types.Conversions.toByteBuffer;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestInclusiveManifestEvaluator {
private static final Schema SCHEMA = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(4, "all_nulls", Types.StringType.get()),
optional(5, "some_nulls", Types.StringType.get()),
optional(6, "no_nulls", Types.StringType.get())
);
private static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA)
.withSpecId(0)
.identity("id")
.identity("all_nulls")
.identity("some_nulls")
.identity("no_nulls")
.build();
private static final ByteBuffer INT_MIN = toByteBuffer(Types.IntegerType.get(), 30);
private static final ByteBuffer INT_MAX = toByteBuffer(Types.IntegerType.get(), 79);
private static final ByteBuffer STRING_MIN = toByteBuffer(Types.StringType.get(), "a");
private static final ByteBuffer STRING_MAX = toByteBuffer(Types.StringType.get(), "z");
private static final ManifestFile NO_STATS = new TestHelpers.TestManifestFile(
"manifest-list.avro", 1024, 0, System.currentTimeMillis(), null, null, null, null);
private static final ManifestFile FILE = new TestHelpers.TestManifestFile("manifest-list.avro",
1024, 0, System.currentTimeMillis(), 5, 10, 0, ImmutableList.of(
new TestHelpers.TestFieldSummary(false, INT_MIN, INT_MAX),
new TestHelpers.TestFieldSummary(true, null, null),
new TestHelpers.TestFieldSummary(true, STRING_MIN, STRING_MAX),
new TestHelpers.TestFieldSummary(false, STRING_MIN, STRING_MAX)));
@Test
public void testAllNulls() {
boolean shouldRead = new InclusiveManifestEvaluator(SPEC, notNull("all_nulls")).eval(FILE);
Assert.assertFalse("Should skip: no non-null value in all null column", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, notNull("some_nulls")).eval(FILE);
Assert.assertTrue("Should read: column with some nulls contains a non-null value", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, notNull("no_nulls")).eval(FILE);
Assert.assertTrue("Should read: non-null column contains a non-null value", shouldRead);
}
@Test
public void testNoNulls() {
boolean shouldRead = new InclusiveManifestEvaluator(SPEC, isNull("all_nulls")).eval(FILE);
Assert.assertTrue("Should read: at least one null value in all null column", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, isNull("some_nulls")).eval(FILE);
Assert.assertTrue("Should read: column with some nulls contains a null value", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, isNull("no_nulls")).eval(FILE);
Assert.assertFalse("Should skip: non-null column contains no null values", shouldRead);
}
@Test
public void testMissingColumn() {
TestHelpers.assertThrows("Should complain about missing column in expression",
ValidationException.class, "Cannot find field 'missing'",
() -> new InclusiveManifestEvaluator(SPEC, lessThan("missing", 5)).eval(FILE));
}
@Test
public void testMissingStats() {
Expression[] exprs = new Expression[] {
lessThan("id", 5), lessThanOrEqual("id", 30), equal("id", 70),
greaterThan("id", 78), greaterThanOrEqual("id", 90), notEqual("id", 101),
isNull("id"), notNull("id")
};
for (Expression expr : exprs) {
boolean shouldRead = new InclusiveManifestEvaluator(SPEC, expr).eval(NO_STATS);
Assert.assertTrue("Should read when missing stats for expr: " + expr, shouldRead);
}
}
@Test
public void testNot() {
// this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out
boolean shouldRead = new InclusiveManifestEvaluator(SPEC, not(lessThan("id", 5))).eval(FILE);
Assert.assertTrue("Should read: not(false)", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, not(greaterThan("id", 5))).eval(FILE);
Assert.assertFalse("Should skip: not(true)", shouldRead);
}
@Test
public void testAnd() {
// this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out
boolean shouldRead = new InclusiveManifestEvaluator(
SPEC, and(lessThan("id", 5), greaterThanOrEqual("id", 0))).eval(FILE);
Assert.assertFalse("Should skip: and(false, false)", shouldRead);
shouldRead = new InclusiveManifestEvaluator(
SPEC, and(greaterThan("id", 5), lessThanOrEqual("id", 30))).eval(FILE);
Assert.assertTrue("Should read: and(true, true)", shouldRead);
}
@Test
public void testOr() {
// this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out
boolean shouldRead = new InclusiveManifestEvaluator(
SPEC, or(lessThan("id", 5), greaterThanOrEqual("id", 80))).eval(FILE);
Assert.assertFalse("Should skip: or(false, false)", shouldRead);
shouldRead = new InclusiveManifestEvaluator(
SPEC, or(lessThan("id", 5), greaterThanOrEqual("id", 60))).eval(FILE);
Assert.assertTrue("Should read: or(false, true)", shouldRead);
}
@Test
public void testIntegerLt() {
boolean shouldRead = new InclusiveManifestEvaluator(SPEC, lessThan("id", 5)).eval(FILE);
Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, lessThan("id", 30)).eval(FILE);
Assert.assertFalse("Should not read: id range below lower bound (30 is not < 30)", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, lessThan("id", 31)).eval(FILE);
Assert.assertTrue("Should read: one possible id", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, lessThan("id", 79)).eval(FILE);
Assert.assertTrue("Should read: may possible ids", shouldRead);
}
@Test
public void testIntegerLtEq() {
boolean shouldRead = new InclusiveManifestEvaluator(SPEC, lessThanOrEqual("id", 5)).eval(FILE);
Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, lessThanOrEqual("id", 29)).eval(FILE);
Assert.assertFalse("Should not read: id range below lower bound (29 < 30)", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, lessThanOrEqual("id", 30)).eval(FILE);
Assert.assertTrue("Should read: one possible id", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, lessThanOrEqual("id", 79)).eval(FILE);
Assert.assertTrue("Should read: many possible ids", shouldRead);
}
@Test
public void testIntegerGt() {
boolean shouldRead = new InclusiveManifestEvaluator(SPEC, greaterThan("id", 85)).eval(FILE);
Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, greaterThan("id", 79)).eval(FILE);
Assert.assertFalse("Should not read: id range above upper bound (79 is not > 79)", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, greaterThan("id", 78)).eval(FILE);
Assert.assertTrue("Should read: one possible id", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, greaterThan("id", 75)).eval(FILE);
Assert.assertTrue("Should read: may possible ids", shouldRead);
}
@Test
public void testIntegerGtEq() {
boolean shouldRead = new InclusiveManifestEvaluator(
SPEC, greaterThanOrEqual("id", 85)).eval(FILE);
Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead);
shouldRead = new InclusiveManifestEvaluator(
SPEC, greaterThanOrEqual("id", 80)).eval(FILE);
Assert.assertFalse("Should not read: id range above upper bound (80 > 79)", shouldRead);
shouldRead = new InclusiveManifestEvaluator(
SPEC, greaterThanOrEqual("id", 79)).eval(FILE);
Assert.assertTrue("Should read: one possible id", shouldRead);
shouldRead = new InclusiveManifestEvaluator(
SPEC, greaterThanOrEqual("id", 75)).eval(FILE);
Assert.assertTrue("Should read: may possible ids", shouldRead);
}
@Test
public void testIntegerEq() {
boolean shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 5)).eval(FILE);
Assert.assertFalse("Should not read: id below lower bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 29)).eval(FILE);
Assert.assertFalse("Should not read: id below lower bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 30)).eval(FILE);
Assert.assertTrue("Should read: id equal to lower bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 75)).eval(FILE);
Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 79)).eval(FILE);
Assert.assertTrue("Should read: id equal to upper bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 80)).eval(FILE);
Assert.assertFalse("Should not read: id above upper bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 85)).eval(FILE);
Assert.assertFalse("Should not read: id above upper bound", shouldRead);
}
@Test
public void testIntegerNotEq() {
boolean shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 5)).eval(FILE);
Assert.assertTrue("Should read: id below lower bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 29)).eval(FILE);
Assert.assertTrue("Should read: id below lower bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 30)).eval(FILE);
Assert.assertTrue("Should read: id equal to lower bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 75)).eval(FILE);
Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 79)).eval(FILE);
Assert.assertTrue("Should read: id equal to upper bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 80)).eval(FILE);
Assert.assertTrue("Should read: id above upper bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 85)).eval(FILE);
Assert.assertTrue("Should read: id above upper bound", shouldRead);
}
@Test
public void testIntegerNotEqRewritten() {
boolean shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 5))).eval(FILE);
Assert.assertTrue("Should read: id below lower bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 29))).eval(FILE);
Assert.assertTrue("Should read: id below lower bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 30))).eval(FILE);
Assert.assertTrue("Should read: id equal to lower bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 75))).eval(FILE);
Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 79))).eval(FILE);
Assert.assertTrue("Should read: id equal to upper bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 80))).eval(FILE);
Assert.assertTrue("Should read: id above upper bound", shouldRead);
shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 85))).eval(FILE);
Assert.assertTrue("Should read: id above upper bound", shouldRead);
}
}
| 2,046 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestExpressionSerialization.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.expressions;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.TestHelpers;
import com.netflix.iceberg.expressions.Expression.Operation;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
public class TestExpressionSerialization {
@Test
public void testExpressions() throws Exception {
Schema schema = new Schema(
Types.NestedField.optional(34, "a", Types.IntegerType.get())
);
Expression[] expressions = new Expression[] {
Expressions.alwaysFalse(),
Expressions.alwaysTrue(),
Expressions.lessThan("x", 5),
Expressions.lessThanOrEqual("y", -3),
Expressions.greaterThan("z", 0),
Expressions.greaterThanOrEqual("t", 129),
Expressions.equal("col", "data"),
Expressions.notEqual("col", "abc"),
Expressions.notNull("maybeNull"),
Expressions.isNull("maybeNull2"),
Expressions.not(Expressions.greaterThan("a", 10)),
Expressions.and(Expressions.greaterThanOrEqual("a", 0), Expressions.lessThan("a", 3)),
Expressions.or(Expressions.lessThan("a", 0), Expressions.greaterThan("a", 10)),
Expressions.equal("a", 5).bind(schema.asStruct())
};
for (Expression expression : expressions) {
Expression copy = TestHelpers.roundTripSerialize(expression);
Assert.assertTrue(
"Expression should equal the deserialized copy: " + expression + " != " + copy,
equals(expression, copy));
}
}
// You may be wondering why this isn't implemented as Expression.equals. The reason is that
// expression equality implies equivalence, which is wider than structural equality. For example,
// lessThan("a", 3) is equivalent to not(greaterThanOrEqual("a", 4)). To avoid confusion, equals
// only guarantees object identity.
private static boolean equals(Expression left, Expression right) {
if (left.op() != right.op()) {
return false;
}
if (left instanceof Predicate) {
if (!(left.getClass().isInstance(right))) {
return false;
}
return equals((Predicate) left, (Predicate) right);
}
switch (left.op()) {
case FALSE:
case TRUE:
return true;
case NOT:
return equals(((Not) left).child(), ((Not) right).child());
case AND:
return (
equals(((And) left).left(), ((And) right).left()) &&
equals(((And) left).right(), ((And) right).right())
);
case OR:
return (
equals(((Or) left).left(), ((Or) right).left()) &&
equals(((Or) left).right(), ((Or) right).right())
);
default:
return false;
}
}
@SuppressWarnings("unchecked")
private static boolean equals(Predicate left, Predicate right) {
if (left.op() != right.op()) {
return false;
}
if (!equals(left.ref(), right.ref())) {
return false;
}
if (left.op() == Operation.IS_NULL || left.op() == Operation.NOT_NULL) {
return true;
}
return left.literal().comparator()
.compare(left.literal().value(), right.literal().value()) == 0;
}
private static boolean equals(Reference left, Reference right) {
if (left instanceof NamedReference) {
if (!(right instanceof NamedReference)) {
return false;
}
NamedReference lref = (NamedReference) left;
NamedReference rref = (NamedReference) right;
return lref.name.equals(rref.name);
} else if (left instanceof BoundReference) {
if (!(right instanceof BoundReference)) {
return false;
}
BoundReference lref = (BoundReference) left;
BoundReference rref = (BoundReference) right;
return (
lref.fieldId() == rref.fieldId() &&
lref.type().equals(rref.type())
);
}
return false;
}
}
| 2,047 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestInclusiveMetricsEvaluator.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.expressions;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.DataFile;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.TestHelpers;
import com.netflix.iceberg.TestHelpers.Row;
import com.netflix.iceberg.TestHelpers.TestDataFile;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.types.Types.IntegerType;
import org.junit.Assert;
import org.junit.Test;
import static com.netflix.iceberg.expressions.Expressions.and;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.greaterThan;
import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.isNull;
import static com.netflix.iceberg.expressions.Expressions.lessThan;
import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.not;
import static com.netflix.iceberg.expressions.Expressions.notEqual;
import static com.netflix.iceberg.expressions.Expressions.notNull;
import static com.netflix.iceberg.expressions.Expressions.or;
import static com.netflix.iceberg.types.Conversions.toByteBuffer;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestInclusiveMetricsEvaluator {
private static final Schema SCHEMA = new Schema(
required(1, "id", IntegerType.get()),
optional(2, "no_stats", Types.IntegerType.get()),
required(3, "required", Types.StringType.get()),
optional(4, "all_nulls", Types.StringType.get()),
optional(5, "some_nulls", Types.StringType.get()),
optional(6, "no_nulls", Types.StringType.get())
);
private static final DataFile FILE = new TestDataFile("file.avro", Row.of(), 50,
// any value counts, including nulls
ImmutableMap.of(
4, 50L,
5, 50L,
6, 50L),
// null value counts
ImmutableMap.of(
4, 50L,
5, 10L,
6, 0L),
// lower bounds
ImmutableMap.of(
1, toByteBuffer(IntegerType.get(), 30)),
// upper bounds
ImmutableMap.of(
1, toByteBuffer(IntegerType.get(), 79)));
@Test
public void testAllNulls() {
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notNull("all_nulls")).eval(FILE);
Assert.assertFalse("Should skip: no non-null value in all null column", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notNull("some_nulls")).eval(FILE);
Assert.assertTrue("Should read: column with some nulls contains a non-null value", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notNull("no_nulls")).eval(FILE);
Assert.assertTrue("Should read: non-null column contains a non-null value", shouldRead);
}
@Test
public void testNoNulls() {
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNull("all_nulls")).eval(FILE);
Assert.assertTrue("Should read: at least one null value in all null column", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNull("some_nulls")).eval(FILE);
Assert.assertTrue("Should read: column with some nulls contains a null value", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNull("no_nulls")).eval(FILE);
Assert.assertFalse("Should skip: non-null column contains no null values", shouldRead);
}
@Test
public void testRequiredColumn() {
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notNull("required")).eval(FILE);
Assert.assertTrue("Should read: required columns are always non-null", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNull("required")).eval(FILE);
Assert.assertFalse("Should skip: required columns are always non-null", shouldRead);
}
@Test
public void testMissingColumn() {
TestHelpers.assertThrows("Should complain about missing column in expression",
ValidationException.class, "Cannot find field 'missing'",
() -> new InclusiveMetricsEvaluator(SCHEMA, lessThan("missing", 5)).eval(FILE));
}
@Test
public void testMissingStats() {
DataFile missingStats = new TestDataFile("file.parquet", Row.of(), 50);
Expression[] exprs = new Expression[] {
lessThan("no_stats", 5), lessThanOrEqual("no_stats", 30), equal("no_stats", 70),
greaterThan("no_stats", 78), greaterThanOrEqual("no_stats", 90), notEqual("no_stats", 101),
isNull("no_stats"), notNull("no_stats")
};
for (Expression expr : exprs) {
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, expr).eval(missingStats);
Assert.assertTrue("Should read when missing stats for expr: " + expr, shouldRead);
}
}
@Test
public void testZeroRecordFile() {
DataFile empty = new TestDataFile("file.parquet", Row.of(), 0);
Expression[] exprs = new Expression[] {
lessThan("id", 5), lessThanOrEqual("id", 30), equal("id", 70), greaterThan("id", 78),
greaterThanOrEqual("id", 90), notEqual("id", 101), isNull("some_nulls"),
notNull("some_nulls")
};
for (Expression expr : exprs) {
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, expr).eval(empty);
Assert.assertFalse("Should never read 0-record file: " + expr, shouldRead);
}
}
@Test
public void testNot() {
// this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(lessThan("id", 5))).eval(FILE);
Assert.assertTrue("Should read: not(false)", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(greaterThan("id", 5))).eval(FILE);
Assert.assertFalse("Should skip: not(true)", shouldRead);
}
@Test
public void testAnd() {
// this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA,
and(lessThan("id", 5), greaterThanOrEqual("id", 0))).eval(FILE);
Assert.assertFalse("Should skip: and(false, false)", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA,
and(greaterThan("id", 5), lessThanOrEqual("id", 30))).eval(FILE);
Assert.assertTrue("Should read: and(true, true)", shouldRead);
}
@Test
public void testOr() {
// this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA,
or(lessThan("id", 5), greaterThanOrEqual("id", 80))).eval(FILE);
Assert.assertFalse("Should skip: or(false, false)", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA,
or(lessThan("id", 5), greaterThanOrEqual("id", 60))).eval(FILE);
Assert.assertTrue("Should read: or(false, true)", shouldRead);
}
@Test
public void testIntegerLt() {
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThan("id", 5)).eval(FILE);
Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThan("id", 30)).eval(FILE);
Assert.assertFalse("Should not read: id range below lower bound (30 is not < 30)", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThan("id", 31)).eval(FILE);
Assert.assertTrue("Should read: one possible id", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThan("id", 79)).eval(FILE);
Assert.assertTrue("Should read: may possible ids", shouldRead);
}
@Test
public void testIntegerLtEq() {
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 5)).eval(FILE);
Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 29)).eval(FILE);
Assert.assertFalse("Should not read: id range below lower bound (29 < 30)", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 30)).eval(FILE);
Assert.assertTrue("Should read: one possible id", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 79)).eval(FILE);
Assert.assertTrue("Should read: many possible ids", shouldRead);
}
@Test
public void testIntegerGt() {
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThan("id", 85)).eval(FILE);
Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThan("id", 79)).eval(FILE);
Assert.assertFalse("Should not read: id range above upper bound (79 is not > 79)", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThan("id", 78)).eval(FILE);
Assert.assertTrue("Should read: one possible id", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThan("id", 75)).eval(FILE);
Assert.assertTrue("Should read: may possible ids", shouldRead);
}
@Test
public void testIntegerGtEq() {
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 85)).eval(FILE);
Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 80)).eval(FILE);
Assert.assertFalse("Should not read: id range above upper bound (80 > 79)", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 79)).eval(FILE);
Assert.assertTrue("Should read: one possible id", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 75)).eval(FILE);
Assert.assertTrue("Should read: may possible ids", shouldRead);
}
@Test
public void testIntegerEq() {
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 5)).eval(FILE);
Assert.assertFalse("Should not read: id below lower bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 29)).eval(FILE);
Assert.assertFalse("Should not read: id below lower bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 30)).eval(FILE);
Assert.assertTrue("Should read: id equal to lower bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 75)).eval(FILE);
Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 79)).eval(FILE);
Assert.assertTrue("Should read: id equal to upper bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 80)).eval(FILE);
Assert.assertFalse("Should not read: id above upper bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 85)).eval(FILE);
Assert.assertFalse("Should not read: id above upper bound", shouldRead);
}
@Test
public void testIntegerNotEq() {
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 5)).eval(FILE);
Assert.assertTrue("Should read: id below lower bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 29)).eval(FILE);
Assert.assertTrue("Should read: id below lower bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 30)).eval(FILE);
Assert.assertTrue("Should read: id equal to lower bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 75)).eval(FILE);
Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 79)).eval(FILE);
Assert.assertTrue("Should read: id equal to upper bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 80)).eval(FILE);
Assert.assertTrue("Should read: id above upper bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 85)).eval(FILE);
Assert.assertTrue("Should read: id above upper bound", shouldRead);
}
@Test
public void testIntegerNotEqRewritten() {
boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 5))).eval(FILE);
Assert.assertTrue("Should read: id below lower bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 29))).eval(FILE);
Assert.assertTrue("Should read: id below lower bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 30))).eval(FILE);
Assert.assertTrue("Should read: id equal to lower bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 75))).eval(FILE);
Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 79))).eval(FILE);
Assert.assertTrue("Should read: id equal to upper bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 80))).eval(FILE);
Assert.assertTrue("Should read: id above upper bound", shouldRead);
shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 85))).eval(FILE);
Assert.assertTrue("Should read: id above upper bound", shouldRead);
}
}
| 2,048 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestMiscLiteralConversions.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.expressions;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.List;
import java.util.UUID;
public class TestMiscLiteralConversions {
@Test
public void testIdentityConversions() {
List<Pair<Literal<?>, Type>> pairs = Arrays.asList(
Pair.of(Literal.of(true), Types.BooleanType.get()),
Pair.of(Literal.of(34), Types.IntegerType.get()),
Pair.of(Literal.of(34L), Types.LongType.get()),
Pair.of(Literal.of(34.11F), Types.FloatType.get()),
Pair.of(Literal.of(34.55D), Types.DoubleType.get()),
Pair.of(Literal.of("34.55"), Types.DecimalType.of(9, 2)),
Pair.of(Literal.of("2017-08-18"), Types.DateType.get()),
Pair.of(Literal.of("14:21:01.919"), Types.TimeType.get()),
Pair.of(Literal.of("2017-08-18T14:21:01.919"), Types.TimestampType.withoutZone()),
Pair.of(Literal.of("abc"), Types.StringType.get()),
Pair.of(Literal.of(UUID.randomUUID()), Types.UUIDType.get()),
Pair.of(Literal.of(new byte[] {0, 1, 2}), Types.FixedType.ofLength(3)),
Pair.of(Literal.of(ByteBuffer.wrap(new byte[] {0, 1, 2})), Types.BinaryType.get())
);
for (Pair<Literal<?>, Type> pair : pairs) {
Literal<?> lit = pair.first();
Type type = pair.second();
// first, convert the literal to the target type (date/times start as strings)
Literal<?> expected = lit.to(type);
// then check that converting again to the same type results in an identical literal
Assert.assertSame("Converting twice should produce identical values",
expected, expected.to(type));
}
}
@Test
public void testBinaryToFixed() {
Literal<ByteBuffer> lit = Literal.of(ByteBuffer.wrap(new byte[] {0, 1, 2}));
Literal<ByteBuffer> fixedLit = lit.to(Types.FixedType.ofLength(3));
Assert.assertNotNull("Should allow conversion to correct fixed length", fixedLit);
Assert.assertEquals("Conversion should not change value",
lit.value().duplicate(), fixedLit.value().duplicate());
Assert.assertNull("Should not allow conversion to different fixed length",
lit.to(Types.FixedType.ofLength(4)));
Assert.assertNull("Should not allow conversion to different fixed length",
lit.to(Types.FixedType.ofLength(2)));
}
@Test
public void testFixedToBinary() {
Literal<ByteBuffer> lit = Literal.of(new byte[] {0, 1, 2});
Literal<ByteBuffer> binaryLit = lit.to(Types.BinaryType.get());
Assert.assertNotNull("Should allow conversion to binary", binaryLit);
Assert.assertEquals("Conversion should not change value",
lit.value().duplicate(), binaryLit.value().duplicate());
}
@Test
public void testInvalidBooleanConversions() {
testInvalidConversions(Literal.of(true),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withZone(),
Types.TimestampType.withoutZone(),
Types.DecimalType.of(9, 2),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(1),
Types.BinaryType.get()
);
}
@Test
public void testInvalidIntegerConversions() {
testInvalidConversions(Literal.of(34),
Types.BooleanType.get(),
Types.TimeType.get(),
Types.TimestampType.withZone(),
Types.TimestampType.withoutZone(),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(1),
Types.BinaryType.get()
);
}
@Test
public void testInvalidLongConversions() {
testInvalidConversions(Literal.of(34L),
Types.BooleanType.get(),
Types.DateType.get(),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(1),
Types.BinaryType.get()
);
}
@Test
public void testInvalidFloatConversions() {
testInvalidConversions(Literal.of(34.11F),
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withZone(),
Types.TimestampType.withoutZone(),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(1),
Types.BinaryType.get()
);
}
@Test
public void testInvalidDoubleConversions() {
testInvalidConversions(Literal.of(34.11D),
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withZone(),
Types.TimestampType.withoutZone(),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(1),
Types.BinaryType.get()
);
}
@Test
public void testInvalidDateConversions() {
testInvalidConversions(Literal.of("2017-08-18").to(Types.DateType.get()),
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.TimeType.get(),
Types.TimestampType.withZone(),
Types.TimestampType.withoutZone(),
Types.DecimalType.of(9, 4),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(1),
Types.BinaryType.get()
);
}
@Test
public void testInvalidTimeConversions() {
testInvalidConversions(
Literal.of("14:21:01.919").to(Types.TimeType.get()),
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.DateType.get(),
Types.TimestampType.withZone(),
Types.TimestampType.withoutZone(),
Types.DecimalType.of(9, 4),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(1),
Types.BinaryType.get()
);
}
@Test
public void testInvalidTimestampConversions() {
testInvalidConversions(
Literal.of("2017-08-18T14:21:01.919").to(Types.TimestampType.withoutZone()),
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.TimeType.get(),
Types.DecimalType.of(9, 4),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(1),
Types.BinaryType.get()
);
}
@Test
public void testInvalidDecimalConversions() {
testInvalidConversions(Literal.of(new BigDecimal("34.11")),
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withZone(),
Types.TimestampType.withoutZone(),
Types.DecimalType.of(9, 4),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(1),
Types.BinaryType.get()
);
}
@Test
public void testInvalidStringConversions() {
// Strings can be used for types that are difficult to construct, like decimal or timestamp,
// but are not intended to support parsing strings to any type
testInvalidConversions(Literal.of("abc"),
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.FixedType.ofLength(1),
Types.BinaryType.get()
);
}
@Test
public void testInvalidUUIDConversions() {
testInvalidConversions(Literal.of(UUID.randomUUID()),
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withZone(),
Types.TimestampType.withoutZone(),
Types.DecimalType.of(9, 2),
Types.StringType.get(),
Types.FixedType.ofLength(1),
Types.BinaryType.get()
);
}
@Test
public void testInvalidFixedConversions() {
testInvalidConversions(Literal.of(new byte[] {0, 1, 2}),
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withZone(),
Types.TimestampType.withoutZone(),
Types.DecimalType.of(9, 2),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(1)
);
}
@Test
public void testInvalidBinaryConversions() {
testInvalidConversions(Literal.of(ByteBuffer.wrap(new byte[] {0, 1, 2})),
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withZone(),
Types.TimestampType.withoutZone(),
Types.DecimalType.of(9, 2),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(1)
);
}
private void testInvalidConversions(Literal<?> lit, Type... invalidTypes) {
for (Type type : invalidTypes) {
Assert.assertNull(
lit.value().getClass().getName() + " literal to " + type + " is not allowed",
lit.to(type));
}
}
private static class Pair<X, Y> {
public static <X, Y> Pair<X, Y> of(X x, Y y) {
return new Pair<>(x, y);
}
private final X x;
private final Y y;
private Pair(X x, Y y) {
this.x = x;
this.y = y;
}
public X first() {
return x;
}
public Y second() {
return y;
}
}
}
| 2,049 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestEvaluatior.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.expressions;
import com.netflix.iceberg.TestHelpers;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.types.Types.StructType;
import org.apache.avro.util.Utf8;
import org.junit.Assert;
import org.junit.Test;
import static com.netflix.iceberg.expressions.Expressions.alwaysFalse;
import static com.netflix.iceberg.expressions.Expressions.alwaysTrue;
import static com.netflix.iceberg.expressions.Expressions.and;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.greaterThan;
import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.isNull;
import static com.netflix.iceberg.expressions.Expressions.lessThan;
import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.not;
import static com.netflix.iceberg.expressions.Expressions.notEqual;
import static com.netflix.iceberg.expressions.Expressions.notNull;
import static com.netflix.iceberg.expressions.Expressions.or;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestEvaluatior {
private static final StructType STRUCT = StructType.of(
required(13, "x", Types.IntegerType.get()),
required(14, "y", Types.IntegerType.get()),
optional(15, "z", Types.IntegerType.get())
);
@Test
public void testLessThan() {
Evaluator evaluator = new Evaluator(STRUCT, lessThan("x", 7));
Assert.assertFalse("7 < 7 => false", evaluator.eval(TestHelpers.Row.of(7, 8, null)));
Assert.assertTrue("6 < 7 => true", evaluator.eval(TestHelpers.Row.of(6, 8, null)));
}
@Test
public void testLessThanOrEqual() {
Evaluator evaluator = new Evaluator(STRUCT, lessThanOrEqual("x", 7));
Assert.assertTrue("7 <= 7 => true", evaluator.eval(TestHelpers.Row.of(7, 8, null)));
Assert.assertTrue("6 <= 7 => true", evaluator.eval(TestHelpers.Row.of(6, 8, null)));
Assert.assertFalse("8 <= 7 => false", evaluator.eval(TestHelpers.Row.of(8, 8, null)));
}
@Test
public void testGreaterThan() {
Evaluator evaluator = new Evaluator(STRUCT, greaterThan("x", 7));
Assert.assertFalse("7 > 7 => false", evaluator.eval(TestHelpers.Row.of(7, 8, null)));
Assert.assertFalse("6 > 7 => false", evaluator.eval(TestHelpers.Row.of(6, 8, null)));
Assert.assertTrue("8 > 7 => true", evaluator.eval(TestHelpers.Row.of(8, 8, null)));
}
@Test
public void testGreaterThanOrEqual() {
Evaluator evaluator = new Evaluator(STRUCT, greaterThanOrEqual("x", 7));
Assert.assertTrue("7 >= 7 => true", evaluator.eval(TestHelpers.Row.of(7, 8, null)));
Assert.assertFalse("6 >= 7 => false", evaluator.eval(TestHelpers.Row.of(6, 8, null)));
Assert.assertTrue("8 >= 7 => true", evaluator.eval(TestHelpers.Row.of(8, 8, null)));
}
@Test
public void testEqual() {
Evaluator evaluator = new Evaluator(STRUCT, equal("x", 7));
Assert.assertTrue("7 == 7 => true", evaluator.eval(TestHelpers.Row.of(7, 8, null)));
Assert.assertFalse("6 == 7 => false", evaluator.eval(TestHelpers.Row.of(6, 8, null)));
}
@Test
public void testNotEqual() {
Evaluator evaluator = new Evaluator(STRUCT, notEqual("x", 7));
Assert.assertFalse("7 != 7 => false", evaluator.eval(TestHelpers.Row.of(7, 8, null)));
Assert.assertTrue("6 != 7 => true", evaluator.eval(TestHelpers.Row.of(6, 8, null)));
}
@Test
public void testAlwaysTrue() {
Evaluator evaluator = new Evaluator(STRUCT, alwaysTrue());
Assert.assertTrue("always true", evaluator.eval(TestHelpers.Row.of()));
}
@Test
public void testAlwaysFalse() {
Evaluator evaluator = new Evaluator(STRUCT, alwaysFalse());
Assert.assertFalse("always false", evaluator.eval(TestHelpers.Row.of()));
}
@Test
public void testIsNull() {
Evaluator evaluator = new Evaluator(STRUCT, isNull("z"));
Assert.assertTrue("null is null", evaluator.eval(TestHelpers.Row.of(1, 2, null)));
Assert.assertFalse("3 is not null", evaluator.eval(TestHelpers.Row.of(1, 2, 3)));
}
@Test
public void testNotNull() {
Evaluator evaluator = new Evaluator(STRUCT, notNull("z"));
Assert.assertFalse("null is null", evaluator.eval(TestHelpers.Row.of(1, 2, null)));
Assert.assertTrue("3 is not null", evaluator.eval(TestHelpers.Row.of(1, 2, 3)));
}
@Test
public void testAnd() {
Evaluator evaluator = new Evaluator(STRUCT, and(equal("x", 7), notNull("z")));
Assert.assertTrue("7, 3 => true", evaluator.eval(TestHelpers.Row.of(7, 0, 3)));
Assert.assertFalse("8, 3 => false", evaluator.eval(TestHelpers.Row.of(8, 0, 3)));
Assert.assertFalse("7, null => false", evaluator.eval(TestHelpers.Row.of(7, 0, null)));
Assert.assertFalse("8, null => false", evaluator.eval(TestHelpers.Row.of(8, 0, null)));
}
@Test
public void testOr() {
Evaluator evaluator = new Evaluator(STRUCT, or(equal("x", 7), notNull("z")));
Assert.assertTrue("7, 3 => true", evaluator.eval(TestHelpers.Row.of(7, 0, 3)));
Assert.assertTrue("8, 3 => true", evaluator.eval(TestHelpers.Row.of(8, 0, 3)));
Assert.assertTrue("7, null => true", evaluator.eval(TestHelpers.Row.of(7, 0, null)));
Assert.assertFalse("8, null => false", evaluator.eval(TestHelpers.Row.of(8, 0, null)));
}
@Test
public void testNot() {
Evaluator evaluator = new Evaluator(STRUCT, not(equal("x", 7)));
Assert.assertFalse("not(7 == 7) => false", evaluator.eval(TestHelpers.Row.of(7)));
Assert.assertTrue("not(8 == 7) => false", evaluator.eval(TestHelpers.Row.of(8)));
}
@Test
public void testCharSeqValue() {
StructType struct = StructType.of(required(34, "s", Types.StringType.get()));
Evaluator evaluator = new Evaluator(struct, equal("s", "abc"));
Assert.assertTrue("string(abc) == utf8(abc) => true",
evaluator.eval(TestHelpers.Row.of(new Utf8("abc"))));
Assert.assertFalse("string(abc) == utf8(abcd) => false",
evaluator.eval(TestHelpers.Row.of(new Utf8("abcd"))));
}
}
| 2,050 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/events/TestListeners.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.events;
import org.junit.Assert;
import org.junit.Test;
public class TestListeners {
static {
Listeners.register(TestListener.get()::event1, Event1.class);
Listeners.register(TestListener.get()::event2, Event2.class);
}
public static class Event1 {
}
public static class Event2 {
}
public static class TestListener {
private static final TestListener INSTANCE = new TestListener();
public static TestListener get() {
return INSTANCE;
}
private Event1 e1 = null;
private Event2 e2 = null;
public void event1(Event1 e) {
this.e1 = e;
}
public void event2(Event2 e) {
this.e2 = e;
}
}
@Test
public void testEvent1() {
Event1 e1 = new Event1();
Listeners.notifyAll(e1);
Assert.assertEquals(e1, TestListener.get().e1);
}
@Test
public void testEvent2() {
Event2 e2 = new Event2();
Listeners.notifyAll(e2);
Assert.assertEquals(e2, TestListener.get().e2);
}
@Test
public void testMultipleListeners() {
TestListener other = new TestListener();
Listeners.register(other::event1, Event1.class);
Event1 e1 = new Event1();
Listeners.notifyAll(e1);
Assert.assertEquals(e1, TestListener.get().e1);
Assert.assertEquals(e1, other.e1);
}
}
| 2,051 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/FileFormat.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.types.Comparators;
/**
* Enum of supported file formats.
*/
public enum FileFormat {
ORC("orc"),
PARQUET("parquet"),
AVRO("avro");
private final String ext;
FileFormat(String ext) {
this.ext = "." + ext;
}
/**
* Returns filename with this format's extension added, if necessary.
*
* @param filename a filename or path
* @return if the ext is present, the filename, otherwise the filename with ext added
*/
public String addExtension(String filename) {
if (filename.endsWith(ext)) {
return filename;
}
return filename + ext;
}
public static FileFormat fromFileName(CharSequence filename) {
int lastIndex = lastIndexOf('.', filename);
if (lastIndex < 0) {
return null;
}
CharSequence ext = filename.subSequence(lastIndex, filename.length());
for (FileFormat format : FileFormat.values()) {
if (Comparators.charSequences().compare(format.ext, ext) == 0) {
return format;
}
}
return null;
}
private static int lastIndexOf(char c, CharSequence seq) {
for (int i = seq.length() - 1; i >= 0; i -= 1) {
if (seq.charAt(i) == c) {
return i;
}
}
return -1;
}
}
| 2,052 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/CombinedScanTask.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import java.util.Collection;
/**
* A scan task made of several ranges from files.
*/
public interface CombinedScanTask extends ScanTask {
/**
* Return the {@link FileScanTask tasks} in this combined task.
* @return a Collection of FileScanTask instances.
*/
Collection<FileScanTask> files();
@Override
default CombinedScanTask asCombinedScanTask() {
return this;
}
}
| 2,053 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Filterable.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Lists;
import com.netflix.iceberg.expressions.Expression;
import java.util.Collection;
/**
* Methods to filter files in a snapshot or manifest when reading.
*
* @param <T> Java class returned by filter methods, also filterable
*/
public interface Filterable<T extends Filterable<T>> extends Iterable<DataFile> {
/**
* Selects the columns of a file manifest to read.
* <p>
* If columns are set multiple times, the last set of columns will be read.
* <p>
* If the Filterable object has partition filters, they will be added to the returned partial.
* <p>
* For a list of column names, see the table format specification.
*
* @param columns String column names to load from the manifest file
* @return a Filterable that will load only the given columns
*/
default T select(String... columns) {
return select(Lists.newArrayList(columns));
}
/**
* Selects the columns of a file manifest to read.
* <p>
* If columns are set multiple times, the last set of columns will be read.
* <p>
* If the Filterable object has partition filters, they will be added to the returned partial.
* <p>
* For a list of column names, see the table format specification.
*
* @param columns String column names to load from the manifest file
* @return a Filterable that will load only the given columns
*/
T select(Collection<String> columns);
/**
* Adds a filter expression on partition data for matching files.
* <p>
* If the Filterable object already has partition filters, the new filter will be added as an
* additional requirement. The result filter expression will be the result of expr and any
* existing filters.
* <p>
* If the Filterable object has columns selected, they will be added to the returned partial.
*
* @param expr An expression for filtering this Filterable using partition data
* @return a Filterable that will load only rows that match expr
*/
T filterPartitions(Expression expr);
/**
* Adds a filter expression on data rows for matching files.
* <p>
* Expressions passed to this function will be converted to partition expressions before they are
* used to filter data files.
* <p>
* If the Filterable object already has partition filters, the new filter will be added as an
* additional requirement. The result filter expression will be the result of expr and any
* existing filters.
* <p>
* If the Filterable object has columns selected, they will be added to the returned partial.
*
* @param expr An expression for filtering this Filterable using row data
* @return a Filterable that will load only rows that match expr
*/
T filterRows(Expression expr);
}
| 2,054 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/UpdateProperties.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import java.util.Map;
/**
* API for updating table properties.
* <p>
* Apply returns the updated table properties as a map for validation.
* <p>
* When committing, these changes will be applied to the current table metadata. Commit conflicts
* will be resolved by applying the pending changes to the new table metadata.
*/
public interface UpdateProperties extends PendingUpdate<Map<String, String>> {
/**
* Add a key/value property to the table.
*
* @param key a String key
* @param value a String value
* @return this for method chaining
* @throws NullPointerException If either the key or value is null
*/
UpdateProperties set(String key, String value);
/**
* Remove the given property key from the table.
*
* @param key a String key
* @return this for method chaining
* @throws NullPointerException If the key is null
*/
UpdateProperties remove(String key);
/**
* Set the default file format for the table.
* @param format a file format
* @return this
*/
UpdateProperties defaultFormat(FileFormat format);
}
| 2,055 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/ReplacePartitions.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
/**
* Not recommended: API for overwriting files in a table by partition.
* <p>
* This is provided to implement SQL compatible with Hive table operations but is not recommended.
* Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
* <p>
* This API accumulates file additions and produces a new {@link Snapshot} of the table by replacing
* all files in partitions with new data with the new additions. This operation is used to implement
* dynamic partition replacement.
* <p>
* When committing, these changes will be applied to the latest table snapshot. Commit conflicts
* will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
* This has no requirements for the latest snapshot and will not fail based on other snapshot
* changes.
*/
public interface ReplacePartitions extends PendingUpdate<Snapshot> {
/**
* Add a {@link DataFile} to the table.
*
* @param file a data file
* @return this for method chaining
*/
ReplacePartitions addFile(DataFile file);
/**
* Validate that no partitions will be replaced and the operation is append-only.
*
* @return this for method chaining
*/
ReplacePartitions validateAppendOnly();
}
| 2,056 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Schema.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.BiMap;
import com.google.common.collect.ImmutableBiMap;
import com.google.common.collect.Sets;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import java.io.Serializable;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
/**
* The schema of a data table.
*/
public class Schema implements Serializable {
private static final Joiner NEWLINE = Joiner.on('\n');
private static final String ALL_COLUMNS = "*";
private final Types.StructType struct;
private transient BiMap<String, Integer> aliasToId = null;
private transient Map<Integer, Types.NestedField> idToField = null;
private transient BiMap<String, Integer> nameToId = null;
public Schema(List<Types.NestedField> columns, Map<String, Integer> aliases) {
this.struct = Types.StructType.of(columns);
this.aliasToId = aliases != null ? ImmutableBiMap.copyOf(aliases) : null;
}
public Schema(List<Types.NestedField> columns) {
this.struct = Types.StructType.of(columns);
}
private Map<Integer, Types.NestedField> lazyIdToField() {
if (idToField == null) {
this.idToField = TypeUtil.indexById(struct);
}
return idToField;
}
private BiMap<String, Integer> lazyNameToId() {
if (nameToId == null) {
this.nameToId = ImmutableBiMap.copyOf(TypeUtil.indexByName(struct));
}
return nameToId;
}
public Schema(Types.NestedField... columns) {
this(Arrays.asList(columns));
}
/**
* Returns an alias map for this schema, if set.
* <p>
* Alias maps are created when translating an external schema, like an Avro Schema, to this
* format. The original column names can be provided in a Map when constructing this Schema.
*
* @return a Map of column aliases to field ids
*/
public Map<String, Integer> getAliases() {
return aliasToId;
}
/**
* Returns the underlying {@link Types.StructType struct type} for this schema.
*
* @return the StructType version of this schema.
*/
public Types.StructType asStruct() {
return struct;
}
/**
* @return a List of the {@link Types.NestedField columns} in this Schema.
*/
public List<Types.NestedField> columns() {
return struct.fields();
}
public Type findType(String name) {
Preconditions.checkArgument(!name.isEmpty(), "Invalid column name: (empty)");
return findType(lazyNameToId().get(name));
}
/**
* Returns the {@link Type} of a sub-field identified by the field id.
*
* @param id a field id
* @return a Type for the sub-field or null if it is not found
*/
public Type findType(int id) {
Types.NestedField field = lazyIdToField().get(id);
if (field != null) {
return field.type();
}
return null;
}
/**
* Returns the sub-field identified by the field id as a {@link Types.NestedField}.
*
* @param id a field id
* @return the sub-field or null if it is not found
*/
public Types.NestedField findField(int id) {
return lazyIdToField().get(id);
}
/**
* Returns a sub-field field by name as a {@link Types.NestedField}.
* <p>
* The result may be a nested field.
*
* @param name a String name
* @return a Type for the sub-field or null if it is not found
*/
public Types.NestedField findField(String name) {
Preconditions.checkArgument(!name.isEmpty(), "Invalid column name: (empty)");
Integer id = lazyNameToId().get(name);
if (id != null) {
return lazyIdToField().get(id);
}
return null;
}
/**
* Returns the full column name for the given id.
*
* @param id a field id
* @return the full column name in this schema that resolves to the id
*/
public String findColumnName(int id) {
return lazyNameToId().inverse().get(id);
}
/**
* Returns the column id for the given column alias. Column aliases are set
* by conversions from Parquet or Avro to this Schema type.
*
* @param alias a full column name in the unconverted data schema
* @return the column id in this schema, or null if the column wasn't found
*/
public Integer aliasToId(String alias) {
if (aliasToId != null) {
return aliasToId.get(alias);
}
return null;
}
/**
* Returns the column id for the given column alias. Column aliases are set
* by conversions from Parquet or Avro to this Schema type.
*
* @param fieldId a column id in this schema
* @return the full column name in the unconverted data schema, or null if one wasn't found
*/
public String idToAlias(Integer fieldId) {
if (aliasToId != null) {
return aliasToId.inverse().get(fieldId);
}
return null;
}
/**
* Creates a projection schema for a subset of columns, selected by name.
* <p>
* Names that identify nested fields will select part or all of the field's top-level column.
*
* @param names String names for selected columns
* @return a projection schema from this schema, by name
*/
public Schema select(String... names) {
return select(Arrays.asList(names));
}
/**
* Creates a projection schema for a subset of columns, selected by name.
* <p>
* Names that identify nested fields will select part or all of the field's top-level column.
*
* @param names a List of String names for selected columns
* @return a projection schema from this schema, by name
*/
public Schema select(Collection<String> names) {
if (names.contains(ALL_COLUMNS)) {
return this;
}
Set<Integer> selected = Sets.newHashSet();
for (String name : names) {
Integer id = lazyNameToId().get(name);
if (id != null) {
selected.add(id);
}
}
return TypeUtil.select(this, selected);
}
@Override
public String toString() {
return String.format("table {\n%s\n}",
NEWLINE.join(struct.fields().stream()
.map(f -> " " + f)
.collect(Collectors.toList())));
}
}
| 2,057 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/PendingUpdate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.ValidationException;
/**
* API for table metadata changes.
*
* @param <T> Java class of changes from this update; returned by {@link #apply} for validation.
*/
public interface PendingUpdate<T> {
/**
* Apply the pending changes and return the uncommitted changes for validation.
* <p>
* This does not result in a permanent update.
*
* @return the uncommitted changes that would be committed by calling {@link #commit()}
* @throws ValidationException If the pending changes cannot be applied to the current metadata
* @throws IllegalArgumentException If the pending changes are conflicting or invalid
*/
T apply();
/**
* Apply the pending changes and commit.
* <p>
* Changes are committed by calling the underlying table's commit method.
* <p>
* Once the commit is successful, the updated table will be refreshed.
*
* @throws ValidationException If the update cannot be applied to the current table metadata.
* @throws CommitFailedException If the update cannot be committed due to conflicts.
*/
void commit();
}
| 2,058 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Table.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import java.util.Map;
/**
* Represents a table.
*/
public interface Table {
/**
* Refresh the current table metadata.
*/
void refresh();
/**
* Create a new {@link TableScan scan} for this table.
* <p>
* Once a table scan is created, it can be refined to project columns and filter data.
*
* @return a table scan for this table
*/
TableScan newScan();
/**
* Return the {@link Schema schema} for this table.
*
* @return this table's schema
*/
Schema schema();
/**
* Return the {@link PartitionSpec partition spec} for this table.
*
* @return this table's partition spec
*/
PartitionSpec spec();
/**
* Return a map of string properties for this table.
*
* @return this table's properties map
*/
Map<String, String> properties();
/**
* Return the table's base location.
*
* @return this table's location
*/
String location();
/**
* Get the current {@link Snapshot snapshot} for this table, or null if there are no snapshots.
*
* @return the current table Snapshot.
*/
Snapshot currentSnapshot();
/**
* Get the {@link Snapshot snapshots} of this table.
*
* @return an Iterable of snapshots of this table.
*/
Iterable<Snapshot> snapshots();
/**
* Create a new {@link UpdateSchema} to alter the columns of this table and commit the change.
*
* @return a new {@link UpdateSchema}
*/
UpdateSchema updateSchema();
/**
* Create a new {@link UpdateProperties} to update table properties and commit the changes.
*
* @return a new {@link UpdateProperties}
*/
UpdateProperties updateProperties();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
*
* @return a new {@link AppendFiles}
*/
AppendFiles newAppend();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
* <p>
* Using this method signals to the underlying implementation that the append should not perform
* extra work in order to commit quickly. Fast appends are not recommended for normal writes
* because the fast commit may cause split planning to slow down over time.
* <p>
* Implementations may not support fast appends, in which case this will return the same appender
* as {@link #newAppend()}.
*
* @return a new {@link AppendFiles}
*/
default AppendFiles newFastAppend() {
return newAppend();
}
/**
* Create a new {@link RewriteFiles rewrite API} to replace files in this table and commit.
*
* @return a new {@link RewriteFiles}
*/
RewriteFiles newRewrite();
/**
* Create a new {@link OverwriteFiles overwrite API} to overwrite files by a filter expression.
*
* @return a new {@link OverwriteFiles}
*/
OverwriteFiles newOverwrite();
/**
* Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically
* overwrite partitions in the table with new data.
* <p>
* This is provided to implement SQL compatible with Hive table operations but is not recommended.
* Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
*
* @return a new {@link ReplacePartitions}
*/
ReplacePartitions newReplacePartitions();
/**
* Create a new {@link DeleteFiles delete API} to replace files in this table and commit.
*
* @return a new {@link DeleteFiles}
*/
DeleteFiles newDelete();
/**
* Create a new {@link ExpireSnapshots expire API} to manage snapshots in this table and commit.
*
* @return a new {@link ExpireSnapshots}
*/
ExpireSnapshots expireSnapshots();
/**
* Create a new {@link Rollback rollback API} to roll back to a previous snapshot and commit.
*
* @return a new {@link Rollback}
*/
Rollback rollback();
/**
* Create a new {@link Transaction transaction API} to commit multiple table operations at once.
*
* @return a new {@link Transaction}
*/
Transaction newTransaction();
}
| 2,059 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Files.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.AlreadyExistsException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.io.PositionOutputStream;
import com.netflix.iceberg.io.SeekableInputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.file.Paths;
public class Files {
public static OutputFile localOutput(File file) {
return new LocalOutputFile(file);
}
public static OutputFile localOutput(String file) {
return localOutput(Paths.get(file).toAbsolutePath().toFile());
}
private static class LocalOutputFile implements OutputFile {
private final File file;
private LocalOutputFile(File file) {
this.file = file;
}
@Override
public PositionOutputStream create() {
if (file.exists()) {
throw new AlreadyExistsException("File already exists: %s", file);
}
if (!file.getParentFile().isDirectory() && !file.getParentFile().mkdirs()) {
throw new RuntimeIOException(
String.format(
"Failed to create the file's directory at %s.",
file.getParentFile().getAbsolutePath()));
}
try {
return new PositionFileOutputStream(new RandomAccessFile(file, "rw"));
} catch (FileNotFoundException e) {
throw new RuntimeIOException(e, "Failed to create file: %s", file);
}
}
@Override
public PositionOutputStream createOrOverwrite() {
if (file.exists()) {
if (!file.delete()) {
throw new RuntimeIOException("Failed to delete: " + file);
}
}
return create();
}
@Override
public String location() {
return file.toString();
}
@Override
public InputFile toInputFile() {
return localInput(file);
}
@Override
public String toString() {
return location();
}
}
public static InputFile localInput(File file) {
return new LocalInputFile(file);
}
public static InputFile localInput(String file) {
if (file.startsWith("file:")) {
return localInput(new File(file.replaceFirst("file:", "")));
}
return localInput(new File(file));
}
private static class LocalInputFile implements InputFile {
private final File file;
private LocalInputFile(File file) {
this.file = file;
}
@Override
public long getLength() {
return file.length();
}
@Override
public SeekableInputStream newStream() {
try {
return new SeekableFileInputStream(new RandomAccessFile(file, "r"));
} catch (FileNotFoundException e) {
throw new RuntimeIOException(e, "Failed to read file: %s", file);
}
}
@Override
public String location() {
return file.toString();
}
@Override
public String toString() {
return location();
}
}
private static class SeekableFileInputStream extends SeekableInputStream {
private final RandomAccessFile stream;
private SeekableFileInputStream(RandomAccessFile stream) {
this.stream = stream;
}
@Override
public long getPos() throws IOException {
return stream.getFilePointer();
}
@Override
public void seek(long newPos) throws IOException {
stream.seek(newPos);
}
@Override
public int read() throws IOException {
return stream.read();
}
@Override
public int read(byte[] b) throws IOException {
return stream.read(b);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return stream.read(b, off, len);
}
@Override
public long skip(long n) throws IOException {
if (n > Integer.MAX_VALUE) {
return stream.skipBytes(Integer.MAX_VALUE);
} else {
return stream.skipBytes((int) n);
}
}
@Override
public void close() throws IOException {
stream.close();
}
}
private static class PositionFileOutputStream extends PositionOutputStream {
private final RandomAccessFile stream;
private PositionFileOutputStream(RandomAccessFile stream) {
this.stream = stream;
}
@Override
public long getPos() throws IOException {
return stream.getFilePointer();
}
@Override
public void write(byte[] b) throws IOException {
stream.write(b);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
stream.write(b, off, len);
}
@Override
public void close() throws IOException {
stream.close();
}
@Override
public void write(int b) throws IOException {
stream.write(b);
}
}
}
| 2,060 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/UpdateSchema.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.types.Type;
/**
* API for schema evolution.
* <p>
* When committing, these changes will be applied to the current table metadata. Commit conflicts
* will not be resolved and will result in a {@link CommitFailedException}.
*/
public interface UpdateSchema extends PendingUpdate<Schema> {
/**
* Add a new top-level column.
* <p>
* Because "." may be interpreted as a column path separator or may be used in field names, it is
* not allowed in names passed to this method. To add to nested structures or to add fields with
* names that contain ".", use {@link #addColumn(String, String, Type)}.
* <p>
* If type is a nested type, its field IDs are reassigned when added to the existing schema.
*
* @param name name for the new column
* @param type type for the new column
* @return this for method chaining
* @throws IllegalArgumentException If name contains "."
*/
UpdateSchema addColumn(String name, Type type);
/**
* Add a new column to a nested struct.
* <p>
* The parent name is used to find the parent using {@link Schema#findField(String)}. If the
* parent name is null, the new column will be added to the root as a top-level column. If parent
* identifies a struct, a new column is added to that struct. If it identifies a list, the column
* is added to the list element struct, and if it identifies a map, the new column is added to
* the map's value struct.
* <p>
* The given name is used to name the new column and names containing "." are not handled
* differently.
* <p>
* If type is a nested type, its field IDs are reassigned when added to the existing schema.
*
* @param parent name of the parent struct to the column will be added to
* @param name name for the new column
* @param type type for the new column
* @return this for method chaining
* @throws IllegalArgumentException If parent doesn't identify a struct
*/
UpdateSchema addColumn(String parent, String name, Type type);
/**
* Rename a column in the schema.
* <p>
* The name is used to find the column to rename using {@link Schema#findField(String)}.
* <p>
* The new name may contain "." and such names are not parsed or handled differently.
* <p>
* Columns may be updated and renamed in the same schema update.
*
* @param name name of the column to rename
* @param newName replacement name for the column
* @return this for method chaining
* @throws IllegalArgumentException If name doesn't identify a column in the schema or if this
* change conflicts with other additions, renames, or updates.
*/
UpdateSchema renameColumn(String name, String newName);
/**
* Update a column in the schema to a new primitive type.
* <p>
* The name is used to find the column to update using {@link Schema#findField(String)}.
* <p>
* Only updates that widen types are allowed.
* <p>
* Columns may be updated and renamed in the same schema update.
*
* @param name name of the column to rename
* @param newType replacement type for the column
* @return this for method chaining
* @throws IllegalArgumentException If name doesn't identify a column in the schema or if this
* change introduces a type incompatibility or if it conflicts
* with other additions, renames, or updates.
*/
UpdateSchema updateColumn(String name, Type.PrimitiveType newType);
/**
* Delete a column in the schema.
* <p>
* The name is used to find the column to delete using {@link Schema#findField(String)}.
*
* @param name name of the column to delete
* @return this for method chaining
* @throws IllegalArgumentException If name doesn't identify a column in the schema or if this
* change conflicts with other additions, renames, or updates.
*/
UpdateSchema deleteColumn(String name);
}
| 2,061 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Rollback.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
/**
* API for rolling table data back to the state at an older table {@link Snapshot snapshot}.
* <p>
* This API does not allow conflicting calls to {@link #toSnapshotId(long)} and
* {@link #toSnapshotAtTime(long)}.
* <p>
* When committing, these changes will be applied to the current table metadata. Commit conflicts
* will not be resolved and will result in a {@link CommitFailedException}.
*/
public interface Rollback extends PendingUpdate<Snapshot> {
/**
* Roll this table's data back to a specific {@link Snapshot} identified by id.
*
* @param snapshotId long id of the snapshot to roll back table data to
* @return this for method chaining
* @throws IllegalArgumentException If the table has no snapshot with the given id
*/
Rollback toSnapshotId(long snapshotId);
/**
* Roll this table's data back to the last {@link Snapshot} before the given timestamp.
*
* @param timestampMillis a long timestamp, as returned by {@link System#currentTimeMillis()}
* @return this for method chaining
* @throws IllegalArgumentException If the table has no old snapshot before the given timestamp
*/
Rollback toSnapshotAtTime(long timestampMillis);
}
| 2,062 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Snapshot.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import java.util.List;
/**
* A snapshot of the data in a table at a point in time.
* <p>
* A snapshot consist of one or more file manifests, and the complete table contents is the union
* of all the data files in those manifests.
* <p>
* Snapshots are created by table operations, like {@link AppendFiles} and {@link RewriteFiles}.
*/
public interface Snapshot {
/**
* Return this snapshot's ID.
*
* @return a long ID
*/
long snapshotId();
/**
* Return this snapshot's parent ID or null.
*
* @return a long ID for this snapshot's parent, or null if it has no parent
*/
Long parentId();
/**
* Return this snapshot's timestamp.
* <p>
* This timestamp is the same as those produced by {@link System#currentTimeMillis()}.
*
* @return a long timestamp in milliseconds
*/
long timestampMillis();
/**
* Return the location of all manifests in this snapshot.
* <p>
* The current table is made of the union of the data files in these manifests.
*
* @return a list of fully-qualified manifest locations
*/
List<ManifestFile> manifests();
/**
* Return all files added to the table in this snapshot.
* <p>
* The files returned include the following columns: file_path, file_format, partition,
* record_count, and file_size_in_bytes. Other columns will be null.
*
* @return all files added to the table in this snapshot.
*/
Iterable<DataFile> addedFiles();
/**
* Return all files deleted from the table in this snapshot.
* <p>
* The files returned include the following columns: file_path, file_format, partition,
* record_count, and file_size_in_bytes. Other columns will be null.
*
* @return all files deleted from the table in this snapshot.
*/
Iterable<DataFile> deletedFiles();
/**
* Return the location of this snapshot's manifest list, or null if it is not separate.
*
* @return the location of the manifest list for this Snapshot
*/
String manifestListLocation();
}
| 2,063 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Tables.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableMap;
import java.util.Map;
/**
* Generic interface for creating and loading a table implementation.
*
* The 'tableIdentifier' field should be interpreted by the underlying
* implementation (e.g. database.table_name)
*/
public interface Tables {
default Table create(Schema schema, String tableIdentifier) {
return create(schema, PartitionSpec.unpartitioned(), ImmutableMap.of(), tableIdentifier);
}
default Table create(Schema schema, PartitionSpec spec, String tableIdentifier) {
return create(schema, spec, ImmutableMap.of(), tableIdentifier);
}
Table create(Schema schema,
PartitionSpec spec,
Map<String, String> properties,
String tableIdentifier);
Table load(String tableIdentifier);
}
| 2,064 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/ManifestFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.types.Types;
import java.nio.ByteBuffer;
import java.util.List;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
/**
* Represents a manifest file that can be scanned to find data files in a table.
*/
public interface ManifestFile {
Schema SCHEMA = new Schema(
required(500, "manifest_path", Types.StringType.get()),
required(501, "manifest_length", Types.LongType.get()),
required(502, "partition_spec_id", Types.IntegerType.get()),
optional(503, "added_snapshot_id", Types.LongType.get()),
optional(504, "added_data_files_count", Types.IntegerType.get()),
optional(505, "existing_data_files_count", Types.IntegerType.get()),
optional(506, "deleted_data_files_count", Types.IntegerType.get()),
optional(507, "partitions", Types.ListType.ofRequired(508, Types.StructType.of(
required(509, "contains_null", Types.BooleanType.get()),
optional(510, "lower_bound", Types.BinaryType.get()), // null if no non-null values
optional(511, "upper_bound", Types.BinaryType.get())
))));
static Schema schema() {
return SCHEMA;
}
/**
* @return fully qualified path to the file, suitable for constructing a Hadoop Path
*/
String path();
/**
* @return length of the manifest file
*/
long length();
/**
* @return ID of the {@link PartitionSpec} used to write the manifest file
*/
int partitionSpecId();
/**
* @return ID of the snapshot that added the manifest file to table metadata
*/
Long snapshotId();
/**
* @return the number of data files with status ADDED in the manifest file
*/
Integer addedFilesCount();
/**
* @return the number of data files with status EXISTING in the manifest file
*/
Integer existingFilesCount();
/**
* @return the number of data files with status DELETED in the manifest file
*/
Integer deletedFilesCount();
/**
* Returns a list of {@link PartitionFieldSummary partition field summaries}.
* <p>
* Each summary corresponds to a field in the manifest file's partition spec, by ordinal. For
* example, the partition spec [ ts_day=date(ts), type=identity(type) ] will have 2 summaries.
* The first summary is for the ts_day partition field and the second is for the type partition
* field.
*
* @return a list of partition field summaries, one for each field in the manifest's spec
*/
List<PartitionFieldSummary> partitions();
/**
* Copies this {@link ManifestFile manifest file}. Readers can reuse manifest file instances; use
* this method to make defensive copies.
*
* @return a copy of this manifest file
*/
ManifestFile copy();
/**
* Summarizes the values of one partition field stored in a manifest file.
*/
interface PartitionFieldSummary {
Types.StructType TYPE = ManifestFile.schema()
.findType("partitions")
.asListType()
.elementType()
.asStructType();
static Types.StructType getType() {
return TYPE;
}
/**
* @return true if at least one data file in the manifest has a null value for the field
*/
boolean containsNull();
/**
* @return a ByteBuffer that contains a serialized bound lower than all values of the field
*/
ByteBuffer lowerBound();
/**
* @return a ByteBuffer that contains a serialized bound higher than all values of the field
*/
ByteBuffer upperBound();
/**
* Copies this {@link PartitionFieldSummary summary}. Readers can reuse instances; use this
* method to make defensive copies.
*
* @return a copy of this partition field summary
*/
PartitionFieldSummary copy();
}
}
| 2,065 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/OverwriteFiles.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Projections;
/**
* API for overwriting files in a table by filter expression.
* <p>
* This API accumulates file additions and produces a new {@link Snapshot} of the table by replacing
* all the files that match the filter expression with the set of additions. This operation is used
* to implement idempotent writes that always replace a section of a table with new data.
* <p>
* Overwrites can be validated
* <p>
* When committing, these changes will be applied to the latest table snapshot. Commit conflicts
* will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
* This has no requirements for the latest snapshot and will not fail based on other snapshot
* changes.
*/
public interface OverwriteFiles extends PendingUpdate<Snapshot> {
/**
* Delete files that match an {@link Expression} on data rows from the table.
* <p>
* A file is selected to be deleted by the expression if it could contain any rows that match the
* expression (candidate files are selected using an
* {@link Projections#inclusive(PartitionSpec) inclusive projection}). These candidate files are
* deleted if all of the rows in the file must match the expression (the partition data matches
* the expression's {@link Projections#strict(PartitionSpec)} strict projection}). This guarantees
* that files are deleted if and only if all rows in the file must match the expression.
* <p>
* Files that may contain some rows that match the expression and some rows that do not will
* result in a {@link ValidationException}.
*
* @param expr an expression on rows in the table
* @return this for method chaining
* @throws ValidationException If a file can contain both rows that match and rows that do not
*/
OverwriteFiles overwriteByRowFilter(Expression expr);
/**
* Add a {@link DataFile} to the table.
*
* @param file a data file
* @return this for method chaining
*/
OverwriteFiles addFile(DataFile file);
/**
* Signal that each file added to the table must match the overwrite expression.
* <p>
* If this method is called, each added file is validated on commit to ensure that it matches the
* overwrite row filter. This is used to ensure that writes are idempotent: that files cannot
* be added during a commit that would not be removed if the operation were run a second time.
*
* @return this for method chaining
*/
OverwriteFiles validateAddedFiles();
}
| 2,066 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/FileScanTask.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.expressions.Expression;
/**
* A scan task over a range of a single file.
*/
public interface FileScanTask extends ScanTask {
/**
* The {@link DataFile file} to scan.
*
* @return the file to scan
*/
DataFile file();
/**
* The {@link PartitionSpec spec} used to store this file.
*
* @return the partition spec from this file's manifest
*/
PartitionSpec spec();
/**
* The starting position of this scan range in the file.
*
* @return the start position of this scan range
*/
long start();
/**
* The number of bytes to scan from the {@link #start()} position in the file.
*
* @return the length of this scan range in bytes
*/
long length();
/**
* Returns the residual expression that should be applied to rows in this file scan.
* <p>
* The residual expression for a file is a filter expression created from the scan's filter, inclusive
* any predicates that are true or false for the entire file removed, based on the file's
* partition data.
*
* @return a residual expression to apply to rows from this scan
*/
Expression residual();
@Override
default boolean isFileScanTask() {
return true;
}
@Override
default FileScanTask asFileScanTask() {
return this;
}
}
| 2,067 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/PartitionField.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.netflix.iceberg.transforms.Transform;
import java.io.Serializable;
/**
* Represents a single field in a {@link PartitionSpec}.
*/
public class PartitionField implements Serializable {
private final int sourceId;
private final String name;
private final Transform<?, ?> transform;
PartitionField(int sourceId, String name, Transform<?, ?> transform) {
this.sourceId = sourceId;
this.name = name;
this.transform = transform;
}
/**
* @return the field id of the source field in the {@link PartitionSpec spec's} table schema
*/
public int sourceId() {
return sourceId;
}
/**
* @return the name of this partition field
*/
public String name() {
return name;
}
/**
* @return the transform used to produce partition values from source values
*/
public Transform<?, ?> transform() {
return transform;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
PartitionField that = (PartitionField) other;
return (
sourceId == that.sourceId &&
name.equals(that.name) &&
transform.equals(that.transform)
);
}
@Override
public int hashCode() {
return Objects.hashCode(sourceId, name, transform);
}
}
| 2,068 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/DeleteFiles.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Projections;
/**
* API for deleting files from a table.
* <p>
* This API accumulates file deletions, produces a new {@link Snapshot} of the table, and commits
* that snapshot as the current.
* <p>
* When committing, these changes will be applied to the latest table snapshot. Commit conflicts
* will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
*/
public interface DeleteFiles extends PendingUpdate<Snapshot> {
/**
* Delete a file path from the underlying table.
* <p>
* To remove a file from the table, this path must equal a path in the table's metadata. Paths
* that are different but equivalent will not be removed. For example, file:/path/file.avro is
* equivalent to file:///path/file.avro, but would not remove the latter path from the table.
*
* @param path a fully-qualified file path to remove from the table
* @return this for method chaining
*/
DeleteFiles deleteFile(CharSequence path);
/**
* Delete a file tracked by a {@link DataFile} from the underlying table.
*
* @param file a DataFile to remove from the table
* @return this for method chaining
*/
default DeleteFiles deleteFile(DataFile file) {
deleteFile(file.path());
return this;
}
/**
* Delete files that match an {@link Expression} on data rows from the table.
* <p>
* A file is selected to be deleted by the expression if it could contain any rows that match the
* expression (candidate files are selected using an
* {@link Projections#inclusive(PartitionSpec) inclusive projection}). These candidate files are
* deleted if all of the rows in the file must match the expression (the partition data matches
* the expression's {@link Projections#strict(PartitionSpec)} strict projection}). This guarantees
* that files are deleted if and only if all rows in the file must match the expression.
* <p>
* Files that may contain some rows that match the expression and some rows that do not will
* result in a {@link ValidationException}.
*
* @param expr an expression on rows in the table
* @return this for method chaining
* @throws ValidationException If a file can contain both rows that match and rows that do not
*/
DeleteFiles deleteFromRowFilter(Expression expr);
}
| 2,069 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/PartitionSpec.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.transforms.Transforms;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import java.io.Serializable;
import java.io.UnsupportedEncodingException;
import java.net.URLEncoder;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
/**
* Represents how to produce partition data for a table.
* <p>
* Partition data is produced by transforming columns in a table. Each column transform is
* represented by a named {@link PartitionField}.
*/
public class PartitionSpec implements Serializable {
// start assigning IDs for partition fields at 1000
private static final int PARTITION_DATA_ID_START = 1000;
private final Schema schema;
// this is ordered so that DataFile has a consistent schema
private final int specId;
private final PartitionField[] fields;
private transient Map<Integer, PartitionField> fieldsBySourceId = null;
private transient Map<String, PartitionField> fieldsByName = null;
private transient Class<?>[] javaClasses = null;
private transient List<PartitionField> fieldList = null;
private PartitionSpec(Schema schema, int specId, List<PartitionField> fields) {
this.schema = schema;
this.specId = specId;
this.fields = new PartitionField[fields.size()];
for (int i = 0; i < this.fields.length; i += 1) {
this.fields[i] = fields.get(i);
}
}
/**
* @return the {@link Schema} for this spec.
*/
public Schema schema() {
return schema;
}
/**
* @return the ID of this spec
*/
public int specId() {
return specId;
}
/**
* @return the list of {@link PartitionField partition fields} for this spec.
*/
public List<PartitionField> fields() {
return lazyFieldList();
}
/**
* @param fieldId a field id from the source schema
* @return the {@link PartitionField field} that partitions the given source field
*/
public PartitionField getFieldBySourceId(int fieldId) {
return lazyFieldsBySourceId().get(fieldId);
}
/**
* @return a {@link Types.StructType} for partition data defined by this spec.
*/
public Types.StructType partitionType() {
List<Types.NestedField> structFields = Lists.newArrayListWithExpectedSize(fields.length);
for (int i = 0; i < fields.length; i += 1) {
PartitionField field = fields[i];
Type sourceType = schema.findType(field.sourceId());
Type resultType = field.transform().getResultType(sourceType);
// assign ids for partition fields starting at 100 to leave room for data file's other fields
structFields.add(
Types.NestedField.optional(PARTITION_DATA_ID_START + i, field.name(), resultType));
}
return Types.StructType.of(structFields);
}
public Class<?>[] javaClasses() {
if (javaClasses == null) {
this.javaClasses = new Class<?>[fields.length];
for (int i = 0; i < fields.length; i += 1) {
PartitionField field = fields[i];
Type sourceType = schema.findType(field.sourceId());
Type result = field.transform().getResultType(sourceType);
javaClasses[i] = result.typeId().javaClass();
}
}
return javaClasses;
}
@SuppressWarnings("unchecked")
private <T> T get(StructLike data, int pos, Class<?> javaClass) {
return data.get(pos, (Class<T>) javaClass);
}
private String escape(String string) {
try {
return URLEncoder.encode(string, "UTF-8");
} catch (UnsupportedEncodingException e) {
throw new RuntimeException(e);
}
}
public String partitionToPath(StructLike data) {
StringBuilder sb = new StringBuilder();
Class<?>[] javaClasses = javaClasses();
for (int i = 0; i < javaClasses.length; i += 1) {
PartitionField field = fields[i];
String valueString = field.transform().toHumanString(get(data, i, javaClasses[i]));
if (i > 0) {
sb.append("/");
}
sb.append(field.name()).append("=").append(escape(valueString));
}
return sb.toString();
}
/**
* Returns true if this spec is equivalent to the other, with field names ignored. That is, if
* both specs have the same number of fields, field order, source columns, and transforms.
*
* @param other another PartitionSpec
* @return true if the specs have the same fields, source columns, and transforms.
*/
public boolean compatibleWith(PartitionSpec other) {
if (equals(other)) {
return true;
}
if (fields.length != other.fields.length) {
return false;
}
for (int i = 0; i < fields.length; i += 1) {
PartitionField thisField = fields[i];
PartitionField thatField = other.fields[i];
if (thisField.sourceId() != thatField.sourceId() ||
!thisField.transform().toString().equals(thatField.transform().toString())) {
return false;
}
}
return true;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
PartitionSpec that = (PartitionSpec) other;
if (this.specId != that.specId) {
return false;
}
return Arrays.equals(fields, that.fields);
}
@Override
public int hashCode() {
return Objects.hashCode(Arrays.hashCode(fields));
}
private List<PartitionField> lazyFieldList() {
if (fieldList == null) {
this.fieldList = ImmutableList.copyOf(fields);
}
return fieldList;
}
private Map<String, PartitionField> lazyFieldsByName() {
if (fieldsByName == null) {
ImmutableMap.Builder<String, PartitionField> builder = ImmutableMap.builder();
for (PartitionField field : fields) {
builder.put(field.name(), field);
}
this.fieldsByName = builder.build();
}
return fieldsByName;
}
private Map<Integer, PartitionField> lazyFieldsBySourceId() {
if (fieldsBySourceId == null) {
ImmutableMap.Builder<Integer, PartitionField> byIdBuilder = ImmutableMap.builder();
for (PartitionField field : fields) {
byIdBuilder.put(field.sourceId(), field);
}
this.fieldsBySourceId = byIdBuilder.build();
}
return fieldsBySourceId;
}
/**
* Returns the source field ids for identity partitions.
*
* @return a set of source ids for the identity partitions.
*/
public Set<Integer> identitySourceIds() {
Set<Integer> sourceIds = Sets.newHashSet();
List<PartitionField> fields = this.fields();
for (PartitionField field : fields) {
if ("identity".equals(field.transform().toString())) {
sourceIds.add(field.sourceId());
}
}
return sourceIds;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("[");
for (PartitionField field : fields) {
sb.append("\n");
sb.append(" ").append(field.name()).append(": ").append(field.transform())
.append("(").append(field.sourceId()).append(")");
}
if (fields.length > 0) {
sb.append("\n");
}
sb.append("]");
return sb.toString();
}
private static final PartitionSpec UNPARTITIONED_SPEC =
new PartitionSpec(new Schema(), 0, ImmutableList.of());
/**
* Returns a spec for unpartitioned tables.
*
* @return a partition spec with no partitions
*/
public static PartitionSpec unpartitioned() {
return UNPARTITIONED_SPEC;
}
/**
* Creates a new {@link Builder partition spec builder} for the given {@link Schema}.
*
* @param schema a schema
* @return a partition spec builder for the given schema
*/
public static Builder builderFor(Schema schema) {
return new Builder(schema);
}
/**
* Used to create valid {@link PartitionSpec partition specs}.
* <p>
* Call {@link #builderFor(Schema)} to create a new builder.
*/
public static class Builder {
private final Schema schema;
private final List<PartitionField> fields = Lists.newArrayList();
private final Set<String> partitionNames = Sets.newHashSet();
private int specId = 0;
private Builder(Schema schema) {
this.schema = schema;
}
private void checkAndAddPartitionName(String name) {
Preconditions.checkArgument(name != null && !name.isEmpty(),
"Cannot use empty or null partition name: %s", name);
Preconditions.checkArgument(!partitionNames.contains(name),
"Cannot use partition name more than once: %s", name);
partitionNames.add(name);
}
public Builder withSpecId(int specId) {
this.specId = specId;
return this;
}
private Types.NestedField findSourceColumn(String sourceName) {
Types.NestedField sourceColumn = schema.findField(sourceName);
Preconditions.checkNotNull(sourceColumn, "Cannot find source column: %s", sourceName);
return sourceColumn;
}
public Builder identity(String sourceName) {
checkAndAddPartitionName(sourceName);
Types.NestedField sourceColumn = findSourceColumn(sourceName);
fields.add(new PartitionField(
sourceColumn.fieldId(), sourceName, Transforms.identity(sourceColumn.type())));
return this;
}
public Builder year(String sourceName) {
String name = sourceName + "_year";
checkAndAddPartitionName(name);
Types.NestedField sourceColumn = findSourceColumn(sourceName);
fields.add(new PartitionField(
sourceColumn.fieldId(), name, Transforms.year(sourceColumn.type())));
return this;
}
public Builder month(String sourceName) {
String name = sourceName + "_month";
checkAndAddPartitionName(name);
Types.NestedField sourceColumn = findSourceColumn(sourceName);
fields.add(new PartitionField(
sourceColumn.fieldId(), name, Transforms.month(sourceColumn.type())));
return this;
}
public Builder day(String sourceName) {
String name = sourceName + "_day";
checkAndAddPartitionName(name);
Types.NestedField sourceColumn = findSourceColumn(sourceName);
fields.add(new PartitionField(
sourceColumn.fieldId(), name, Transforms.day(sourceColumn.type())));
return this;
}
public Builder hour(String sourceName) {
String name = sourceName + "_hour";
checkAndAddPartitionName(name);
Types.NestedField sourceColumn = findSourceColumn(sourceName);
fields.add(new PartitionField(
sourceColumn.fieldId(), name, Transforms.hour(sourceColumn.type())));
return this;
}
public Builder bucket(String sourceName, int numBuckets) {
String name = sourceName + "_bucket";
checkAndAddPartitionName(name);
Types.NestedField sourceColumn = findSourceColumn(sourceName);
fields.add(new PartitionField(
sourceColumn.fieldId(), name, Transforms.bucket(sourceColumn.type(), numBuckets)));
return this;
}
public Builder truncate(String sourceName, int width) {
String name = sourceName + "_trunc";
checkAndAddPartitionName(name);
Types.NestedField sourceColumn = findSourceColumn(sourceName);
fields.add(new PartitionField(
sourceColumn.fieldId(), name, Transforms.truncate(sourceColumn.type(), width)));
return this;
}
public Builder add(int sourceId, String name, String transform) {
checkAndAddPartitionName(name);
Types.NestedField column = schema.findField(sourceId);
Preconditions.checkNotNull(column, "Cannot find source column: %d", sourceId);
fields.add(new PartitionField(
sourceId, name, Transforms.fromString(column.type(), transform)));
return this;
}
public PartitionSpec build() {
PartitionSpec spec = new PartitionSpec(schema, specId, fields);
checkCompatibility(spec, schema);
return spec;
}
}
public static void checkCompatibility(PartitionSpec spec, Schema schema) {
for (PartitionField field : spec.fields) {
Type sourceType = schema.findType(field.sourceId());
ValidationException.check(sourceType.isPrimitiveType(),
"Cannot partition by non-primitive source field: %s", sourceType);
ValidationException.check(
field.transform().canTransform(sourceType),
"Invalid source type %s for transform: %s",
sourceType, field.transform());
}
}
}
| 2,070 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/DataFile.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.types.Types.BinaryType;
import com.netflix.iceberg.types.Types.IntegerType;
import com.netflix.iceberg.types.Types.ListType;
import com.netflix.iceberg.types.Types.LongType;
import com.netflix.iceberg.types.Types.MapType;
import com.netflix.iceberg.types.Types.StringType;
import com.netflix.iceberg.types.Types.StructType;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
/**
* Interface for files listed in a table manifest.
*/
public interface DataFile {
static StructType getType(StructType partitionType) {
// IDs start at 100 to leave room for changes to ManifestEntry
return StructType.of(
required(100, "file_path", StringType.get()),
required(101, "file_format", StringType.get()),
required(102, "partition", partitionType),
required(103, "record_count", LongType.get()),
required(104, "file_size_in_bytes", LongType.get()),
required(105, "block_size_in_bytes", LongType.get()),
optional(106, "file_ordinal", IntegerType.get()),
optional(107, "sort_columns", ListType.ofRequired(112, IntegerType.get())),
optional(108, "column_sizes", MapType.ofRequired(117, 118,
IntegerType.get(), LongType.get())),
optional(109, "value_counts", MapType.ofRequired(119, 120,
IntegerType.get(), LongType.get())),
optional(110, "null_value_counts", MapType.ofRequired(121, 122,
IntegerType.get(), LongType.get())),
optional(125, "lower_bounds", MapType.ofRequired(126, 127,
IntegerType.get(), BinaryType.get())),
optional(128, "upper_bounds", MapType.ofRequired(129, 130,
IntegerType.get(), BinaryType.get()))
// NEXT ID TO ASSIGN: 131
);
}
/**
* @return fully qualified path to the file, suitable for constructing a Hadoop Path
*/
CharSequence path();
/**
* @return format of the data file
*/
FileFormat format();
/**
* @return partition data for this file as a {@link StructLike}
*/
StructLike partition();
/**
* @return the number of top-level records in the data file
*/
long recordCount();
/**
* @return the data file size in bytes
*/
long fileSizeInBytes();
/**
* @return the data file block size in bytes (for split planning)
*/
long blockSizeInBytes();
/**
* @return file ordinal if written in a global ordering, or null
*/
Integer fileOrdinal();
/**
* @return list of columns the file records are sorted by, or null
*/
List<Integer> sortColumns();
/**
* @return if collected, map from column ID to the size of the column in bytes, null otherwise
*/
Map<Integer, Long> columnSizes();
/**
* @return if collected, map from column ID to the count of its non-null values, null otherwise
*/
Map<Integer, Long> valueCounts();
/**
* @return if collected, map from column ID to its null value count, null otherwise
*/
Map<Integer, Long> nullValueCounts();
/**
* @return if collected, map from column ID to value lower bounds, null otherwise
*/
Map<Integer, ByteBuffer> lowerBounds();
/**
* @return if collected, map from column ID to value upper bounds, null otherwise
*/
Map<Integer, ByteBuffer> upperBounds();
/**
* Copies this {@link DataFile data file}. Manifest readers can reuse data file instances; use
* this method to copy data when collecting files from tasks.
*
* @return a copy of this data file
*/
DataFile copy();
}
| 2,071 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/AppendFiles.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
/**
* API for appending new files in a table.
* <p>
* This API accumulates file additions, produces a new {@link Snapshot} of the table, and commits
* that snapshot as the current.
* <p>
* When committing, these changes will be applied to the latest table snapshot. Commit conflicts
* will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
*/
public interface AppendFiles extends PendingUpdate<Snapshot> {
/**
* Append a {@link DataFile} to the table.
*
* @param file a data file
* @return this for method chaining
*/
AppendFiles appendFile(DataFile file);
}
| 2,072 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/TableScan.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Lists;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.io.CloseableIterable;
import java.util.Collection;
/**
* API for configuring a table scan.
* <p>
* TableScan objects are immutable and can be shared between threads. Refinement methods, like
* {@link #select(Collection)} and {@link #filter(Expression)}, create new TableScan instances.
*/
public interface TableScan {
/**
* Returns the {@link Table} from which this scan loads data.
*
* @return this scan's table
*/
Table table();
/**
* Create a new {@link TableScan} from this scan's configuration that will use the given snapshot
* by ID.
*
* @param snapshotId a snapshot ID
* @return a new scan based on this with the given snapshot ID
* @throws IllegalArgumentException if the snapshot cannot be found
*/
TableScan useSnapshot(long snapshotId);
/**
* Create a new {@link TableScan} from this scan's configuration that will use the most recent
* snapshot as of the given time in milliseconds.
*
* @param timestampMillis a timestamp in milliseconds.
* @return a new scan based on this with the current snapshot at the given time
* @throws IllegalArgumentException if the snapshot cannot be found
*/
TableScan asOfTime(long timestampMillis);
/**
* Create a new {@link TableScan} from this with the schema as its projection.
*
* @param schema a projection schema
* @return a new scan based on this with the given projection
*/
TableScan project(Schema schema);
/**
* Create a new {@link TableScan} from this that will read the given data columns. This produces
* an expected schema that includes all fields that are either selected or used by this scan's
* filter expression.
*
* @param columns column names from the table's schema
* @return a new scan based on this with the given projection columns
*/
default TableScan select(String... columns) {
return select(Lists.newArrayList(columns));
}
/**
* Create a new {@link TableScan} from this that will read the given data columns. This produces
* an expected schema that includes all fields that are either selected or used by this scan's
* filter expression.
*
* @param columns column names from the manifest file schema
* @return a new scan based on this with the given manifest columns
*/
TableScan select(Collection<String> columns);
/**
* Create a new {@link TableScan} from the results of this filtered by the {@link Expression}.
*
* @param expr a filter expression
* @return a new scan based on this with results filtered by the expression
*/
TableScan filter(Expression expr);
/**
* Plan the {@link FileScanTask files} that will be read by this scan.
* <p>
* Each file has a residual expression that should be applied to filter the file's rows.
* <p>
* This simple plan returns file scans for each file from position 0 to the file's length. For
* planning that will combine small files, split large files, and attempt to balance work, use
* {@link #planTasks()} instead.
*
* @return an Iterable of file tasks that are required by this scan
*/
CloseableIterable<FileScanTask> planFiles();
/**
* Plan the {@link CombinedScanTask tasks} for this scan.
* <p>
* Tasks created by this method may read partial input files, multiple input files, or both.
*
* @return an Iterable of tasks for this scan
*/
CloseableIterable<CombinedScanTask> planTasks();
/**
* Returns this scan's projection {@link Schema}.
* <p>
* If the projection schema was set directly using {@link #project(Schema)}, returns that schema.
* <p>
* If the projection schema was set by calling {@link #select(Collection)}, returns a projection
* schema that includes the selected data fields and any fields used in the filter expression.
*
* @return this scan's projection schema
*/
Schema schema();
/**
* Returns this scan's filter {@link Expression}.
*
* @return this scan's filter expression
*/
Expression filter();
}
| 2,073 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/ExpireSnapshots.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import java.util.List;
import java.util.function.Consumer;
/**
* API for removing old {@link Snapshot snapshots} from a table.
* <p>
* This API accumulates snapshot deletions and commits the new list to the table. This API does not
* allow deleting the current snapshot.
* <p>
* When committing, these changes will be applied to the latest table metadata. Commit conflicts
* will be resolved by applying the changes to the new latest metadata and reattempting the commit.
* <p>
* Manifest files that are no longer used by valid snapshots will be deleted. Data files that were
* deleted by snapshots that are expired will be deleted. {@link #deleteWith(Consumer)} can be used
* to pass an alternative deletion method.
*
* {@link #apply()} returns a list of the snapshots that will be removed.
*/
public interface ExpireSnapshots extends PendingUpdate<List<Snapshot>> {
/**
* Expires a specific {@link Snapshot} identified by id.
*
* @param snapshotId long id of the snapshot to expire
* @return this for method chaining
*/
ExpireSnapshots expireSnapshotId(long snapshotId);
/**
* Expires all snapshots older than the given timestamp.
*
* @param timestampMillis a long timestamp, as returned by {@link System#currentTimeMillis()}
* @return this for method chaining
*/
ExpireSnapshots expireOlderThan(long timestampMillis);
/**
* Passes an alternative delete implementation that will be used for manifests and data files.
* <p>
* Manifest files that are no longer used by valid snapshots will be deleted. Data files that were
* deleted by snapshots that are expired will be deleted.
* <p>
* If this method is not called, unnecessary manifests and data files will still be deleted.
*
* @param deleteFunc a function that will be called to delete manifests and data files
* @return this for method chaining
*/
ExpireSnapshots deleteWith(Consumer<String> deleteFunc);
}
| 2,074 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/RewriteFiles.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.ValidationException;
import java.util.Set;
/**
* API for replacing files in a table.
* <p>
* This API accumulates file additions and deletions, produces a new {@link Snapshot} of the
* changes, and commits that snapshot as the current.
* <p>
* When committing, these changes will be applied to the latest table snapshot. Commit conflicts
* will be resolved by applying the changes to the new latest snapshot and reattempting the commit.
* If any of the deleted files are no longer in the latest snapshot when reattempting, the commit
* will throw a {@link ValidationException}.
*/
public interface RewriteFiles extends PendingUpdate<Snapshot> {
/**
* Add a rewrite that replaces one set of files with another set that contains the same data.
*
* @param filesToDelete files that will be replaced (deleted), cannot be null or empty.
* @param filesToAdd files that will be added, cannot be null or empty.
* @return this for method chaining
*/
RewriteFiles rewriteFiles(Set<DataFile> filesToDelete, Set<DataFile> filesToAdd);
}
| 2,075 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/StructLike.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
/**
* Interface for accessing data by position in a schema.
* <p>
* This interface supports accessing data in top-level fields, not in nested fields.
*/
public interface StructLike {
int size();
<T> T get(int pos, Class<T> javaClass);
<T> void set(int pos, T value);
}
| 2,076 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/ScanTask.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import java.io.Serializable;
/**
* A scan task.
*/
public interface ScanTask extends Serializable {
/**
* @return true if this is a {@link FileScanTask}, false otherwise.
*/
default boolean isFileScanTask() {
return false;
}
/**
* @return this cast to {@link FileScanTask} if it is one
* @throws IllegalStateException if this is not a {@link FileScanTask}
*/
default FileScanTask asFileScanTask() {
throw new IllegalStateException("Not a FileScanTask: " + this);
}
/**
* @return this cast to {@link CombinedScanTask} if it is one
* @throws IllegalStateException if this is not a {@link CombinedScanTask}
*/
default CombinedScanTask asCombinedScanTask() {
throw new IllegalStateException("Not a CombinedScanTask: " + this);
}
}
| 2,077 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Metrics.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.Map;
import static com.google.common.collect.ImmutableMap.copyOf;
public class Metrics implements Serializable {
private Long rowCount = null;
private Map<Integer, Long> columnSizes = null;
private Map<Integer, Long> valueCounts = null;
private Map<Integer, Long> nullValueCounts = null;
private Map<Integer, ByteBuffer> lowerBounds = null;
private Map<Integer, ByteBuffer> upperBounds = null;
public Metrics() {
}
public Metrics(Long rowCount,
Map<Integer, Long> columnSizes,
Map<Integer, Long> valueCounts,
Map<Integer, Long> nullValueCounts) {
this.rowCount = rowCount;
this.columnSizes = columnSizes;
this.valueCounts = valueCounts;
this.nullValueCounts = nullValueCounts;
}
public Metrics(Long rowCount,
Map<Integer, Long> columnSizes,
Map<Integer, Long> valueCounts,
Map<Integer, Long> nullValueCounts,
Map<Integer, ByteBuffer> lowerBounds,
Map<Integer, ByteBuffer> upperBounds) {
this.rowCount = rowCount;
this.columnSizes = columnSizes;
this.valueCounts = valueCounts;
this.nullValueCounts = nullValueCounts;
this.lowerBounds = lowerBounds;
this.upperBounds = upperBounds;
}
public Long recordCount() {
return rowCount;
}
public Map<Integer, Long> columnSizes() {
return columnSizes;
}
public Map<Integer, Long> valueCounts() {
return valueCounts;
}
public Map<Integer, Long> nullValueCounts() {
return nullValueCounts;
}
public Map<Integer, ByteBuffer> lowerBounds() {
return lowerBounds;
}
public Map<Integer, ByteBuffer> upperBounds() {
return upperBounds;
}
}
| 2,078 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Transaction.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.ValidationException;
/**
* A transaction for performing multiple updates to a table.
*/
public interface Transaction {
/**
* Return the {@link Table} that this transaction will update.
*
* @return this transaction's table
*/
Table table();
/**
* Create a new {@link UpdateProperties} to update table properties.
*
* @return a new {@link UpdateProperties}
*/
UpdateProperties updateProperties();
/**
* Create a new {@link AppendFiles append API} to add files to this table.
*
* @return a new {@link AppendFiles}
*/
AppendFiles newAppend();
/**
* Create a new {@link AppendFiles append API} to add files to this table.
* <p>
* Using this method signals to the underlying implementation that the append should not perform
* extra work in order to commit quickly. Fast appends are not recommended for normal writes
* because the fast commit may cause split planning to slow down over time.
* <p>
* Implementations may not support fast appends, in which case this will return the same appender
* as {@link #newAppend()}.
*
* @return a new {@link AppendFiles}
*/
default AppendFiles newFastAppend() {
return newAppend();
}
/**
* Create a new {@link RewriteFiles rewrite API} to replace files in this table.
*
* @return a new {@link RewriteFiles}
*/
RewriteFiles newRewrite();
/**
* Create a new {@link OverwriteFiles overwrite API} to overwrite files by a filter expression.
*
* @return a new {@link OverwriteFiles}
*/
OverwriteFiles newOverwrite();
/**
* Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically
* overwrite partitions in the table with new data.
* <p>
* This is provided to implement SQL compatible with Hive table operations but is not recommended.
* Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
*
* @return a new {@link ReplacePartitions}
*/
ReplacePartitions newReplacePartitions();
/**
* Create a new {@link DeleteFiles delete API} to replace files in this table.
*
* @return a new {@link DeleteFiles}
*/
DeleteFiles newDelete();
/**
* Create a new {@link ExpireSnapshots expire API} to manage snapshots in this table.
*
* @return a new {@link ExpireSnapshots}
*/
ExpireSnapshots expireSnapshots();
/**
* Apply the pending changes from all actions and commit.
*
* @throws ValidationException If any update cannot be applied to the current table metadata.
* @throws CommitFailedException If the updates cannot be committed due to conflicts.
*/
void commitTransaction();
}
| 2,079 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/IndexByName.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.google.common.base.Joiner;
import com.google.common.collect.Maps;
import com.netflix.iceberg.Schema;
import java.util.List;
import java.util.Map;
public class IndexByName extends TypeUtil.SchemaVisitor<Map<String, Integer>> {
private static final Joiner DOT = Joiner.on(".");
private final Map<String, Integer> nameToId = Maps.newHashMap();
@Override
public Map<String, Integer> schema(Schema schema, Map<String, Integer> structResult) {
return nameToId;
}
@Override
public Map<String, Integer> struct(Types.StructType struct, List<Map<String, Integer>> fieldResults) {
return nameToId;
}
@Override
public Map<String, Integer> field(Types.NestedField field, Map<String, Integer> fieldResult) {
addField(field.name(), field.fieldId());
return null;
}
@Override
public Map<String, Integer> list(Types.ListType list, Map<String, Integer> elementResult) {
for (Types.NestedField field : list.fields()) {
addField(field.name(), field.fieldId());
}
return null;
}
@Override
public Map<String, Integer> map(Types.MapType map, Map<String, Integer> keyResult, Map<String, Integer> valueResult) {
for (Types.NestedField field : map.fields()) {
addField(field.name(), field.fieldId());
}
return null;
}
private void addField(String name, int fieldId) {
String fullName = name;
if (!fieldNames.isEmpty()) {
fullName = DOT.join(DOT.join(fieldNames.descendingIterator()), name);
}
nameToId.put(fullName, fieldId);
}
}
| 2,080 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/Type.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import java.io.ObjectStreamException;
import java.io.Serializable;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
import java.util.List;
public interface Type extends Serializable {
enum TypeID {
BOOLEAN(Boolean.class),
INTEGER(Integer.class),
LONG(Long.class),
FLOAT(Float.class),
DOUBLE(Double.class),
DATE(Integer.class),
TIME(Long.class),
TIMESTAMP(Long.class),
STRING(CharSequence.class),
UUID(java.util.UUID.class),
FIXED(ByteBuffer.class),
BINARY(ByteBuffer.class),
DECIMAL(BigDecimal.class),
STRUCT(Void.class),
LIST(Void.class),
MAP(Void.class);
private final Class<?> javaClass;
TypeID(Class<?> javaClass) {
this.javaClass = javaClass;
}
public Class<?> javaClass() {
return javaClass;
}
}
TypeID typeId();
default boolean isPrimitiveType() {
return false;
}
default PrimitiveType asPrimitiveType() {
throw new IllegalArgumentException("Not a primitive type: " + this);
}
default Types.StructType asStructType() {
throw new IllegalArgumentException("Not a struct type: " + this);
}
default Types.ListType asListType() {
throw new IllegalArgumentException("Not a list type: " + this);
}
default Types.MapType asMapType() {
throw new IllegalArgumentException("Not a map type: " + this);
}
default boolean isNestedType() {
return false;
}
default boolean isStructType() {
return false;
}
default boolean isListType() {
return false;
}
default boolean isMapType() {
return false;
}
default NestedType asNestedType() {
throw new IllegalArgumentException("Not a nested type: " + this);
}
abstract class PrimitiveType implements Type {
public boolean isPrimitiveType() {
return true;
}
public PrimitiveType asPrimitiveType() {
return this;
}
Object writeReplace() throws ObjectStreamException {
return new PrimitiveHolder(toString());
}
}
abstract class NestedType implements Type {
public boolean isNestedType() {
return true;
}
public NestedType asNestedType() {
return this;
}
public abstract List<Types.NestedField> fields();
public abstract Type fieldType(String name);
public abstract Types.NestedField field(int id);
}
}
| 2,081 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/AssignFreshIds.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.google.common.collect.Lists;
import com.netflix.iceberg.Schema;
import java.util.Iterator;
import java.util.List;
import java.util.function.Supplier;
class AssignFreshIds extends TypeUtil.CustomOrderSchemaVisitor<Type> {
private final TypeUtil.NextID nextId;
AssignFreshIds(TypeUtil.NextID nextId) {
this.nextId = nextId;
}
@Override
public Type schema(Schema schema, Supplier<Type> future) {
return future.get();
}
@Override
public Type struct(Types.StructType struct, Iterable<Type> futures) {
List<Types.NestedField> fields = struct.fields();
int length = struct.fields().size();
List<Integer> newIds = Lists.newArrayListWithExpectedSize(length);
for (int i = 0; i < length; i += 1) {
newIds.add(nextId.get()); // assign IDs for this struct's fields first
}
List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(length);
Iterator<Type> types = futures.iterator();
for (int i = 0; i < length; i += 1) {
Types.NestedField field = fields.get(i);
Type type = types.next();
if (field.isOptional()) {
newFields.add(Types.NestedField.optional(newIds.get(i), field.name(), type));
} else {
newFields.add(Types.NestedField.required(newIds.get(i), field.name(), type));
}
}
return Types.StructType.of(newFields);
}
@Override
public Type field(Types.NestedField field, Supplier<Type> future) {
return future.get();
}
@Override
public Type list(Types.ListType list, Supplier<Type> future) {
int newId = nextId.get();
if (list.isElementOptional()) {
return Types.ListType.ofOptional(newId, future.get());
} else {
return Types.ListType.ofRequired(newId, future.get());
}
}
@Override
public Type map(Types.MapType map, Supplier<Type> keyFuture, Supplier<Type> valuefuture) {
int newKeyId = nextId.get();
int newValueId = nextId.get();
if (map.isValueOptional()) {
return Types.MapType.ofOptional(newKeyId, newValueId, keyFuture.get(), valuefuture.get());
} else {
return Types.MapType.ofRequired(newKeyId, newValueId, keyFuture.get(), valuefuture.get());
}
}
@Override
public Type primitive(Type.PrimitiveType primitive) {
return primitive;
}
}
| 2,082 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/GetProjectedIds.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.google.common.collect.Sets;
import com.netflix.iceberg.Schema;
import java.util.List;
import java.util.Set;
class GetProjectedIds extends TypeUtil.SchemaVisitor<Set<Integer>> {
private final Set<Integer> fieldIds = Sets.newHashSet();
@Override
public Set<Integer> schema(Schema schema, Set<Integer> structResult) {
return fieldIds;
}
@Override
public Set<Integer> struct(Types.StructType struct, List<Set<Integer>> fieldResults) {
return fieldIds;
}
@Override
public Set<Integer> field(Types.NestedField field, Set<Integer> fieldResult) {
if (fieldResult == null) {
fieldIds.add(field.fieldId());
}
return fieldIds;
}
@Override
public Set<Integer> list(Types.ListType list, Set<Integer> elementResult) {
if (elementResult == null) {
for (Types.NestedField field : list.fields()) {
fieldIds.add(field.fieldId());
}
}
return fieldIds;
}
@Override
public Set<Integer> map(Types.MapType map, Set<Integer> keyResult, Set<Integer> valueResult) {
if (valueResult == null) {
for (Types.NestedField field : map.fields()) {
fieldIds.add(field.fieldId());
}
}
return fieldIds;
}
}
| 2,083 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/ReassignIds.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.netflix.iceberg.Schema;
import java.util.List;
import java.util.function.Supplier;
class ReassignIds extends TypeUtil.CustomOrderSchemaVisitor<Type> {
private final Schema sourceSchema;
private Type sourceType;
ReassignIds(Schema sourceSchema) {
this.sourceSchema = sourceSchema;
}
@Override
public Type schema(Schema schema, Supplier<Type> future) {
this.sourceType = sourceSchema.asStruct();
try {
return future.get();
} finally {
this.sourceType = null;
}
}
@Override
public Type struct(Types.StructType struct, Iterable<Type> fieldTypes) {
Preconditions.checkNotNull(sourceType, "Evaluation must start with a schema.");
Preconditions.checkArgument(sourceType.isStructType(), "Not a struct: " + sourceType);
Types.StructType sourceStruct = sourceType.asStructType();
List<Types.NestedField> fields = struct.fields();
int length = fields.size();
List<Type> types = Lists.newArrayList(fieldTypes);
List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(length);
for (int i = 0; i < length; i += 1) {
Types.NestedField field = fields.get(i);
int sourceFieldId = sourceStruct.field(field.name()).fieldId();
if (field.isRequired()) {
newFields.add(Types.NestedField.required(sourceFieldId, field.name(), types.get(i)));
} else {
newFields.add(Types.NestedField.optional(sourceFieldId, field.name(), types.get(i)));
}
}
return Types.StructType.of(newFields);
}
@Override
public Type field(Types.NestedField field, Supplier<Type> future) {
Preconditions.checkArgument(sourceType.isStructType(), "Not a struct: " + sourceType);
Types.StructType sourceStruct = sourceType.asStructType();
Types.NestedField sourceField = sourceStruct.field(field.name());
this.sourceType = sourceField.type();
try {
return future.get();
} finally {
sourceType = sourceStruct;
}
}
@Override
public Type list(Types.ListType list, Supplier<Type> elementTypeFuture) {
Preconditions.checkArgument(sourceType.isListType(), "Not a list: " + sourceType);
Types.ListType sourceList = sourceType.asListType();
int sourceElementId = sourceList.elementId();
this.sourceType = sourceList.elementType();
try {
if (list.isElementOptional()) {
return Types.ListType.ofOptional(sourceElementId, elementTypeFuture.get());
} else {
return Types.ListType.ofRequired(sourceElementId, elementTypeFuture.get());
}
} finally {
this.sourceType = sourceList;
}
}
@Override
public Type map(Types.MapType map, Supplier<Type> keyTypeFuture, Supplier<Type> valueTypeFuture) {
Preconditions.checkArgument(sourceType.isMapType(), "Not a map: " + sourceType);
Types.MapType sourceMap = sourceType.asMapType();
int sourceKeyId = sourceMap.keyId();
int sourceValueId = sourceMap.valueId();
try {
this.sourceType = sourceMap.keyType();
Type keyType = keyTypeFuture.get();
this.sourceType = sourceMap.valueType();
Type valueType = valueTypeFuture.get();
if (map.isValueOptional()) {
return Types.MapType.ofOptional(sourceKeyId, sourceValueId, keyType, valueType);
} else {
return Types.MapType.ofRequired(sourceKeyId, sourceValueId, keyType, valueType);
}
} finally {
this.sourceType = sourceMap;
}
}
@Override
public Type primitive(Type.PrimitiveType primitive) {
return primitive; // nothing to reassign
}
}
| 2,084 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/PruneColumns.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.google.common.collect.Lists;
import com.netflix.iceberg.Schema;
import java.util.List;
import java.util.Set;
class PruneColumns extends TypeUtil.SchemaVisitor<Type> {
private final Set<Integer> selected;
public PruneColumns(Set<Integer> selected) {
this.selected = selected;
}
@Override
public Type schema(Schema schema, Type structResult) {
return structResult;
}
@Override
public Type struct(Types.StructType struct, List<Type> fieldResults) {
List<Types.NestedField> fields = struct.fields();
List<Types.NestedField> selectedFields = Lists.newArrayListWithExpectedSize(fields.size());
boolean sameTypes = true;
for (int i = 0; i < fieldResults.size(); i += 1) {
Types.NestedField field = fields.get(i);
Type projectedType = fieldResults.get(i);
if (field.type() == projectedType) {
// uses identity because there is no need to check structure. if identity doesn't match
// then structure should not either.
selectedFields.add(field);
} else if (projectedType != null) {
sameTypes = false; // signal that some types were altered
if (field.isOptional()) {
selectedFields.add(
Types.NestedField.optional(field.fieldId(), field.name(), projectedType));
} else {
selectedFields.add(
Types.NestedField.required(field.fieldId(), field.name(), projectedType));
}
}
}
if (!selectedFields.isEmpty()) {
if (selectedFields.size() == fields.size() && sameTypes) {
return struct;
} else {
return Types.StructType.of(selectedFields);
}
}
return null;
}
@Override
public Type field(Types.NestedField field, Type fieldResult) {
if (selected.contains(field.fieldId())) {
return field.type();
} else if (fieldResult != null) {
// this isn't necessarily the same as field.type() because a struct may not have all
// fields selected.
return fieldResult;
}
return null;
}
@Override
public Type list(Types.ListType list, Type elementResult) {
if (selected.contains(list.elementId())) {
return list;
} else if (elementResult != null) {
if (list.elementType() == elementResult) {
return list;
} else if (list.isElementOptional()) {
return Types.ListType.ofOptional(list.elementId(), elementResult);
} else {
return Types.ListType.ofRequired(list.elementId(), elementResult);
}
}
return null;
}
@Override
public Type map(Types.MapType map, Type ignored, Type valueResult) {
if (selected.contains(map.valueId())) {
return map;
} else if (valueResult != null) {
if (map.valueType() == valueResult) {
return map;
} else if (map.isValueOptional()) {
return Types.MapType.ofOptional(map.keyId(), map.valueId(), map.keyType(), valueResult);
} else {
return Types.MapType.ofRequired(map.keyId(), map.valueId(), map.keyType(), valueResult);
}
} else if (selected.contains(map.keyId())) {
// right now, maps can't be selected without values
return map;
}
return null;
}
@Override
public Type primitive(Type.PrimitiveType primitive) {
return null;
}
}
| 2,085 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/PrimitiveHolder.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import java.io.ObjectStreamException;
import java.io.Serializable;
/**
* Replacement for primitive types in Java Serialization.
*/
class PrimitiveHolder implements Serializable {
private String typeAsString = null;
/**
* Constructor for Java serialization.
*/
PrimitiveHolder() {
}
PrimitiveHolder(String typeAsString) {
this.typeAsString = typeAsString;
}
Object readResolve() throws ObjectStreamException {
return Types.fromPrimitiveString(typeAsString);
}
}
| 2,086 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/CheckCompatibility.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.iceberg.Schema;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.function.Supplier;
import static com.netflix.iceberg.types.TypeUtil.isPromotionAllowed;
public class CheckCompatibility extends TypeUtil.CustomOrderSchemaVisitor<List<String>> {
/**
* Returns a list of compatibility errors for writing with the given write schema.
*
* @param readSchema a read schema
* @param writeSchema a write schema
* @return a list of error details, or an empty list if there are no compatibility problems
*/
public static List<String> writeCompatibilityErrors(Schema readSchema, Schema writeSchema) {
return TypeUtil.visit(readSchema, new CheckCompatibility(writeSchema, true));
}
/**
* Returns a list of compatibility errors for reading with the given read schema.
*
* @param readSchema a read schema
* @param writeSchema a write schema
* @return a list of error details, or an empty list if there are no compatibility problems
*/
public static List<String> readCompatibilityErrors(Schema readSchema, Schema writeSchema) {
return TypeUtil.visit(readSchema, new CheckCompatibility(writeSchema, false));
}
private static final List<String> NO_ERRORS = ImmutableList.of();
private final Schema schema;
private final boolean checkOrdering;
// the current file schema, maintained while traversing a write schema
private Type currentType;
private CheckCompatibility(Schema schema, boolean checkOrdering) {
this.schema = schema;
this.checkOrdering = checkOrdering;
}
@Override
public List<String> schema(Schema readSchema, Supplier<List<String>> structErrors) {
this.currentType = this.schema.asStruct();
try {
return structErrors.get();
} finally {
this.currentType = null;
}
}
@Override
public List<String> struct(Types.StructType readStruct, Iterable<List<String>> fieldErrorLists) {
Preconditions.checkNotNull(readStruct, "Evaluation must start with a schema.");
if (!currentType.isStructType()) {
return ImmutableList.of(String.format(": %s cannot be read as a struct", currentType));
}
List<String> errors = Lists.newArrayList();
for (List<String> fieldErrors : fieldErrorLists) {
errors.addAll(fieldErrors);
}
// detect reordered fields
if (checkOrdering) {
Types.StructType struct = currentType.asStructType();
List<Types.NestedField> fields = struct.fields();
Map<Integer, Integer> idToOrdinal = Maps.newHashMap();
for (int i = 0; i < fields.size(); i += 1) {
idToOrdinal.put(fields.get(i).fieldId(), i);
}
int lastOrdinal = -1;
for (Types.NestedField readField : readStruct.fields()) {
int id = readField.fieldId();
Types.NestedField field = struct.field(id);
if (field != null) {
int ordinal = idToOrdinal.get(id);
if (lastOrdinal >= ordinal) {
errors.add(
readField.name() + " is out of order, before " + fields.get(lastOrdinal).name());
}
lastOrdinal = ordinal;
}
}
}
return errors;
}
@Override
public List<String> field(Types.NestedField readField, Supplier<List<String>> fieldErrors) {
Types.StructType struct = currentType.asStructType();
Types.NestedField field = struct.field(readField.fieldId());
List<String> errors = Lists.newArrayList();
if (field == null) {
if (readField.isRequired()) {
return ImmutableList.of(readField.name() + " is required, but is missing");
}
// if the field is optional, it will be read as nulls
return NO_ERRORS;
}
this.currentType = field.type();
try {
if (readField.isRequired() && field.isOptional()) {
errors.add(readField.name() + " should be required, but is optional");
}
for (String error : fieldErrors.get()) {
if (error.startsWith(":")) {
// this is the last field name before the error message
errors.add(readField.name() + error);
} else {
// this has a nested field, add '.' for nesting
errors.add(readField.name() + "." + error);
}
}
return errors;
} finally {
this.currentType = struct;
}
}
@Override
public List<String> list(Types.ListType readList, Supplier<List<String>> elementErrors) {
if (!currentType.isListType()) {
return ImmutableList.of(String.format(": %s cannot be read as a list", currentType));
}
Types.ListType list = currentType.asNestedType().asListType();
List<String> errors = Lists.newArrayList();
this.currentType = list.elementType();
try {
if (readList.isElementRequired() && list.isElementOptional()) {
errors.add(": elements should be required, but are optional");
}
errors.addAll(elementErrors.get());
return errors;
} finally {
this.currentType = list;
}
}
@Override
public List<String> map(Types.MapType readMap, Supplier<List<String>> keyErrors, Supplier<List<String>> valueErrors) {
if (!currentType.isMapType()) {
return ImmutableList.of(String.format(": %s cannot be read as a map", currentType));
}
Types.MapType map = currentType.asNestedType().asMapType();
List<String> errors = Lists.newArrayList();
try {
if (readMap.isValueRequired() && map.isValueOptional()) {
errors.add(": values should be required, but are optional");
}
this.currentType = map.keyType();
errors.addAll(keyErrors.get());
this.currentType = map.valueType();
errors.addAll(valueErrors.get());
return errors;
} finally {
this.currentType = map;
}
}
@Override
public List<String> primitive(Type.PrimitiveType readPrimitive) {
if (currentType.equals(readPrimitive)) {
return NO_ERRORS;
}
if (!currentType.isPrimitiveType()) {
return ImmutableList.of(String.format(": %s cannot be read as a %s",
currentType.typeId().toString().toLowerCase(Locale.ENGLISH), readPrimitive));
}
if (!isPromotionAllowed(currentType.asPrimitiveType(), readPrimitive)) {
return ImmutableList.of(String.format(": %s cannot be promoted to %s",
currentType, readPrimitive));
}
// both are primitives and promotion is allowed to the read type
return NO_ERRORS;
}
}
| 2,087 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/Types.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.types.Type.NestedType;
import com.netflix.iceberg.types.Type.PrimitiveType;
import java.io.Serializable;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class Types {
private static final ImmutableMap<String, PrimitiveType> TYPES = ImmutableMap
.<String, PrimitiveType>builder()
.put(BooleanType.get().toString(), BooleanType.get())
.put(IntegerType.get().toString(), IntegerType.get())
.put(LongType.get().toString(), LongType.get())
.put(FloatType.get().toString(), FloatType.get())
.put(DoubleType.get().toString(), DoubleType.get())
.put(DateType.get().toString(), DateType.get())
.put(TimeType.get().toString(), TimeType.get())
.put(TimestampType.withZone().toString(), TimestampType.withZone())
.put(TimestampType.withoutZone().toString(), TimestampType.withoutZone())
.put(StringType.get().toString(), StringType.get())
.put(UUIDType.get().toString(), UUIDType.get())
.put(BinaryType.get().toString(), BinaryType.get())
.build();
private static final Pattern FIXED = Pattern.compile("fixed\\[(\\d+)\\]");
private static final Pattern DECIMAL = Pattern.compile("decimal\\((\\d+),\\s+(\\d+)\\)");
public static PrimitiveType fromPrimitiveString(String typeString) {
String lowerTypeString = typeString.toLowerCase(Locale.ENGLISH);
if (TYPES.containsKey(lowerTypeString)) {
return TYPES.get(lowerTypeString);
}
Matcher fixed = FIXED.matcher(lowerTypeString);
if (fixed.matches()) {
return FixedType.ofLength(Integer.parseInt(fixed.group(1)));
}
Matcher decimal = DECIMAL.matcher(lowerTypeString);
if (decimal.matches()) {
return DecimalType.of(
Integer.parseInt(decimal.group(1)),
Integer.parseInt(decimal.group(2)));
}
throw new IllegalArgumentException("Cannot parse type string to primitive: " + typeString);
}
public static class BooleanType extends PrimitiveType {
private static final BooleanType INSTANCE = new BooleanType();
public static BooleanType get() {
return INSTANCE;
}
@Override
public TypeID typeId() {
return TypeID.BOOLEAN;
}
@Override
public String toString() {
return "boolean";
}
}
public static class IntegerType extends PrimitiveType {
private static final IntegerType INSTANCE = new IntegerType();
public static IntegerType get() {
return INSTANCE;
}
@Override
public TypeID typeId() {
return TypeID.INTEGER;
}
@Override
public String toString() {
return "int";
}
}
public static class LongType extends PrimitiveType {
private static final LongType INSTANCE = new LongType();
public static LongType get() {
return INSTANCE;
}
@Override
public TypeID typeId() {
return TypeID.LONG;
}
@Override
public String toString() {
return "long";
}
}
public static class FloatType extends PrimitiveType {
private static final FloatType INSTANCE = new FloatType();
public static FloatType get() {
return INSTANCE;
}
@Override
public TypeID typeId() {
return TypeID.FLOAT;
}
@Override
public String toString() {
return "float";
}
}
public static class DoubleType extends PrimitiveType {
private static final DoubleType INSTANCE = new DoubleType();
public static DoubleType get() {
return INSTANCE;
}
@Override
public TypeID typeId() {
return TypeID.DOUBLE;
}
@Override
public String toString() {
return "double";
}
}
public static class DateType extends PrimitiveType {
private static final DateType INSTANCE = new DateType();
public static DateType get() {
return INSTANCE;
}
@Override
public TypeID typeId() {
return TypeID.DATE;
}
@Override
public String toString() {
return "date";
}
}
public static class TimeType extends PrimitiveType {
private static final TimeType INSTANCE = new TimeType();
public static TimeType get() {
return INSTANCE;
}
private TimeType() {
}
@Override
public TypeID typeId() {
return TypeID.TIME;
}
@Override
public String toString() {
return "time";
}
}
public static class TimestampType extends PrimitiveType {
private static final TimestampType INSTANCE_WITH_ZONE = new TimestampType(true);
private static final TimestampType INSTANCE_WITHOUT_ZONE = new TimestampType(false);
public static TimestampType withZone() {
return INSTANCE_WITH_ZONE;
}
public static TimestampType withoutZone() {
return INSTANCE_WITHOUT_ZONE;
}
private final boolean adjustToUTC;
private TimestampType(boolean adjustToUTC) {
this.adjustToUTC = adjustToUTC;
}
public boolean shouldAdjustToUTC() {
return adjustToUTC;
}
@Override
public TypeID typeId() {
return TypeID.TIMESTAMP;
}
@Override
public String toString() {
if (shouldAdjustToUTC()) {
return "timestamptz";
} else {
return "timestamp";
}
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (o == null || getClass() != o.getClass()) {
return false;
}
TimestampType timestampType = (TimestampType) o;
return adjustToUTC == timestampType.adjustToUTC;
}
@Override
public int hashCode() {
return Objects.hash(TimestampType.class, adjustToUTC);
}
}
public static class StringType extends PrimitiveType {
private static final StringType INSTANCE = new StringType();
public static StringType get() {
return INSTANCE;
}
@Override
public TypeID typeId() {
return TypeID.STRING;
}
@Override
public String toString() {
return "string";
}
}
public static class UUIDType extends PrimitiveType {
private static final UUIDType INSTANCE = new UUIDType();
public static UUIDType get() {
return INSTANCE;
}
@Override
public TypeID typeId() {
return TypeID.UUID;
}
@Override
public String toString() {
return "uuid";
}
}
public static class FixedType extends PrimitiveType {
public static FixedType ofLength(int length) {
return new FixedType(length);
}
private final int length;
private FixedType(int length) {
this.length = length;
}
public int length() {
return length;
}
@Override
public TypeID typeId() {
return TypeID.FIXED;
}
@Override
public String toString() {
return String.format("fixed[%d]", length);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (o == null || getClass() != o.getClass()) {
return false;
}
FixedType fixedType = (FixedType) o;
return length == fixedType.length;
}
@Override
public int hashCode() {
return Objects.hash(FixedType.class, length);
}
}
public static class BinaryType extends PrimitiveType {
private static final BinaryType INSTANCE = new BinaryType();
public static BinaryType get() {
return INSTANCE;
}
@Override
public TypeID typeId() {
return TypeID.BINARY;
}
@Override
public String toString() {
return "binary";
}
}
public static class DecimalType extends PrimitiveType {
public static DecimalType of(int precision, int scale) {
return new DecimalType(precision, scale);
}
private final int scale;
private final int precision;
private DecimalType(int precision, int scale) {
Preconditions.checkArgument(precision <= 38,
"Decimals with precision larger than 38 are not supported: %s", precision);
this.scale = scale;
this.precision = precision;
}
public int scale() {
return scale;
}
public int precision() {
return precision;
}
@Override
public TypeID typeId() {
return TypeID.DECIMAL;
}
@Override
public String toString() {
return String.format("decimal(%d, %d)", precision, scale);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (o == null || getClass() != o.getClass()) {
return false;
}
DecimalType that = (DecimalType) o;
if (scale != that.scale) {
return false;
}
return precision == that.precision;
}
@Override
public int hashCode() {
return Objects.hash(DecimalType.class, scale, precision);
}
}
public static class NestedField implements Serializable {
public static NestedField optional(int id, String name, Type type) {
return new NestedField(true, id, name, type);
}
public static NestedField required(int id, String name, Type type) {
return new NestedField(false, id, name, type);
}
private final boolean isOptional;
private final int id;
private final String name;
private final Type type;
private NestedField(boolean isOptional, int id, String name, Type type) {
Preconditions.checkNotNull(name, "Name cannot be null");
Preconditions.checkNotNull(type, "Type cannot be null");
this.isOptional = isOptional;
this.id = id;
this.name = name;
this.type = type;
}
public boolean isOptional() {
return isOptional;
}
public boolean isRequired() {
return !isOptional;
}
public int fieldId() {
return id;
}
public String name() {
return name;
}
public Type type() {
return type;
}
@Override
public String toString() {
return String.format("%d: %s: %s %s",
id, name, isOptional ? "optional" : "required", type);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (o == null || getClass() != o.getClass()) {
return false;
}
NestedField that = (NestedField) o;
if (isOptional != that.isOptional) {
return false;
} else if (id != that.id) {
return false;
} else if (!name.equals(that.name)) {
return false;
}
return type.equals(that.type);
}
@Override
public int hashCode() {
return Objects.hash(NestedField.class, id, isOptional, name, type);
}
}
public static class StructType extends NestedType {
private static final Joiner FIELD_SEP = Joiner.on(", ");
public static StructType of(NestedField... fields) {
return of(Arrays.asList(fields));
}
public static StructType of(List<NestedField> fields) {
return new StructType(fields);
}
private final NestedField[] fields;
// lazy values
private transient List<NestedField> fieldList = null;
private transient Map<String, NestedField> fieldsByName = null;
private transient Map<Integer, NestedField> fieldsById = null;
private StructType(List<NestedField> fields) {
Preconditions.checkNotNull(fields, "Field list cannot be null");
this.fields = new NestedField[fields.size()];
for (int i = 0; i < this.fields.length; i += 1) {
this.fields[i] = fields.get(i);
}
}
@Override
public List<NestedField> fields() {
return lazyFieldList();
}
public NestedField field(String name) {
return lazyFieldsByName().get(name);
}
@Override
public Type fieldType(String name) {
NestedField field = field(name);
if (field != null) {
return field.type();
}
return null;
}
@Override
public NestedField field(int id) {
return lazyFieldsById().get(id);
}
@Override
public TypeID typeId() {
return TypeID.STRUCT;
}
@Override
public boolean isStructType() {
return true;
}
@Override
public Types.StructType asStructType() {
return this;
}
@Override
public String toString() {
return String.format("struct<%s>", FIELD_SEP.join(fields));
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (o == null || getClass() != o.getClass()) {
return false;
}
StructType that = (StructType) o;
return Arrays.equals(fields, that.fields);
}
@Override
public int hashCode() {
return Objects.hash(NestedField.class, Arrays.hashCode(fields));
}
private List<NestedField> lazyFieldList() {
if (fieldList == null) {
this.fieldList = ImmutableList.copyOf(fields);
}
return fieldList;
}
private Map<String, NestedField> lazyFieldsByName() {
if (fieldsByName == null) {
indexFields();
}
return fieldsByName;
}
private Map<Integer, NestedField> lazyFieldsById() {
if (fieldsById == null) {
indexFields();
}
return fieldsById;
}
private void indexFields() {
ImmutableMap.Builder<String, NestedField> byNameBuilder = ImmutableMap.builder();
ImmutableMap.Builder<Integer, NestedField> byIdBuilder = ImmutableMap.builder();
for (NestedField field : fields) {
byNameBuilder.put(field.name(), field);
byIdBuilder.put(field.fieldId(), field);
}
this.fieldsByName = byNameBuilder.build();
this.fieldsById = byIdBuilder.build();
}
}
public static class ListType extends NestedType {
public static ListType ofOptional(int elementId, Type elementType) {
Preconditions.checkNotNull(elementType, "Element type cannot be null");
return new ListType(NestedField.optional(elementId, "element", elementType));
}
public static ListType ofRequired(int elementId, Type elementType) {
Preconditions.checkNotNull(elementType, "Element type cannot be null");
return new ListType(NestedField.required(elementId, "element", elementType));
}
private final NestedField elementField;
private transient List<NestedField> fields = null;
private ListType(NestedField elementField) {
this.elementField = elementField;
}
public Type elementType() {
return elementField.type();
}
@Override
public Type fieldType(String name) {
if ("element".equals(name)) {
return elementType();
}
return null;
}
@Override
public NestedField field(int id) {
if (elementField.fieldId() == id) {
return elementField;
}
return null;
}
@Override
public List<NestedField> fields() {
return lazyFieldList();
}
public int elementId() {
return elementField.fieldId();
}
public boolean isElementRequired() {
return !elementField.isOptional;
}
public boolean isElementOptional() {
return elementField.isOptional;
}
@Override
public TypeID typeId() {
return TypeID.LIST;
}
@Override
public boolean isListType() {
return true;
}
@Override
public Types.ListType asListType() {
return this;
}
@Override
public String toString() {
return String.format("list<%s>", elementField.type());
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (o == null || getClass() != o.getClass()) {
return false;
}
ListType listType = (ListType) o;
return elementField.equals(listType.elementField);
}
@Override
public int hashCode() {
return Objects.hash(ListType.class, elementField);
}
private List<NestedField> lazyFieldList() {
if (fields == null) {
this.fields = ImmutableList.of(elementField);
}
return fields;
}
}
public static class MapType extends NestedType {
public static MapType ofOptional(int keyId, int valueId, Type keyType, Type valueType) {
Preconditions.checkNotNull(valueType, "Value type cannot be null");
return new MapType(
NestedField.required(keyId, "key", keyType),
NestedField.optional(valueId, "value", valueType));
}
public static MapType ofRequired(int keyId, int valueId, Type keyType, Type valueType) {
Preconditions.checkNotNull(valueType, "Value type cannot be null");
return new MapType(
NestedField.required(keyId, "key", keyType),
NestedField.required(valueId, "value", valueType));
}
private final NestedField keyField;
private final NestedField valueField;
private transient List<NestedField> fields = null;
private MapType(NestedField keyField, NestedField valueField) {
this.keyField = keyField;
this.valueField = valueField;
}
public Type keyType() {
return keyField.type();
}
public Type valueType() {
return valueField.type();
}
@Override
public Type fieldType(String name) {
if ("key".equals(name)) {
return keyField.type();
} else if ("value".equals(name)) {
return valueField.type();
}
return null;
}
@Override
public NestedField field(int id) {
if (keyField.fieldId() == id) {
return keyField;
} else if (valueField.fieldId() == id) {
return valueField;
}
return null;
}
@Override
public List<NestedField> fields() {
return lazyFieldList();
}
public int keyId() {
return keyField.fieldId();
}
public int valueId() {
return valueField.fieldId();
}
public boolean isValueRequired() {
return !valueField.isOptional;
}
public boolean isValueOptional() {
return valueField.isOptional;
}
@Override
public TypeID typeId() {
return TypeID.MAP;
}
@Override
public boolean isMapType() {
return true;
}
@Override
public Types.MapType asMapType() {
return this;
}
@Override
public String toString() {
return String.format("map<%s, %s>", keyField.type(), valueField.type());
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
} else if (o == null || getClass() != o.getClass()) {
return false;
}
MapType mapType = (MapType) o;
if (!keyField.equals(mapType.keyField)) {
return false;
}
return valueField.equals(mapType.valueField);
}
@Override
public int hashCode() {
return Objects.hash(MapType.class, keyField, valueField);
}
private List<NestedField> lazyFieldList() {
if (fields == null) {
this.fields = ImmutableList.of(keyField, valueField);
}
return fields;
}
}
}
| 2,088 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/TypeUtil.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.netflix.iceberg.Schema;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.function.Predicate;
import java.util.function.Supplier;
public class TypeUtil {
public static Schema select(Schema schema, Set<Integer> fieldIds) {
Preconditions.checkNotNull(schema, "Schema cannot be null");
Preconditions.checkNotNull(fieldIds, "Field ids cannot be null");
Type result = visit(schema, new PruneColumns(fieldIds));
if (schema.asStruct() == result) {
return schema;
} else if (result != null) {
if (schema.getAliases() != null) {
return new Schema(result.asNestedType().fields(), schema.getAliases());
} else {
return new Schema(result.asNestedType().fields());
}
}
return new Schema(ImmutableList.of(), schema.getAliases());
}
public static Set<Integer> getProjectedIds(Schema schema) {
return visit(schema, new GetProjectedIds());
}
public static Set<Integer> getProjectedIds(Type schema) {
if (schema.isPrimitiveType()) {
return ImmutableSet.of();
}
return ImmutableSet.copyOf(visit(schema, new GetProjectedIds()));
}
public static Schema selectNot(Schema schema, Set<Integer> fieldIds) {
Set<Integer> projectedIds = getProjectedIds(schema);
projectedIds.removeAll(fieldIds);
return select(schema, projectedIds);
}
public static Schema join(Schema left, Schema right) {
List<Types.NestedField> joinedColumns = Lists.newArrayList();
joinedColumns.addAll(left.columns());
joinedColumns.addAll(right.columns());
return new Schema(joinedColumns);
}
public static Map<String, Integer> indexByName(Types.StructType struct) {
return visit(struct, new IndexByName());
}
public static Map<Integer, Types.NestedField> indexById(Types.StructType struct) {
return visit(struct, new IndexById());
}
/**
* Assigns fresh ids from the {@link NextID nextId function} for all fields in a type.
*
* @param type a type
* @param nextId an id assignment function
* @return an structurally identical type with new ids assigned by the nextId function
*/
public static Type assignFreshIds(Type type, NextID nextId) {
return TypeUtil.visit(type, new AssignFreshIds(nextId));
}
/**
* Assigns fresh ids from the {@link NextID nextId function} for all fields in a schema.
*
* @param schema a schema
* @param nextId an id assignment function
* @return an structurally identical schema with new ids assigned by the nextId function
*/
public static Schema assignFreshIds(Schema schema, NextID nextId) {
return new Schema(TypeUtil
.visit(schema.asStruct(), new AssignFreshIds(nextId))
.asNestedType()
.fields());
}
/**
* Reassigns ids in a schema from another schema.
* <p>
* Ids are determined by field names. If a field in the schema cannot be found in the source
* schema, this will throw IllegalArgumentException.
* <p>
* This will not alter a schema's structure, nullability, or types.
*
* @param schema the schema to have ids reassigned
* @param idSourceSchema the schema from which field ids will be used
* @return an structurally identical schema with field ids matching the source schema
* @throws IllegalArgumentException if a field cannot be found (by name) in the source schema
*/
public static Schema reassignIds(Schema schema, Schema idSourceSchema) {
Types.StructType struct = visit(schema, new ReassignIds(idSourceSchema)).asStructType();
return new Schema(struct.fields());
}
public static Type find(Schema schema, Predicate<Type> predicate) {
return visit(schema, new FindTypeVisitor(predicate));
}
public static boolean isPromotionAllowed(Type from, Type.PrimitiveType to) {
// Warning! Before changing this function, make sure that the type change doesn't introduce
// compatibility problems in partitioning.
if (from.equals(to)) {
return true;
}
switch (from.typeId()) {
case INTEGER:
return to == Types.LongType.get();
case FLOAT:
return to == Types.DoubleType.get();
case DECIMAL:
Types.DecimalType fromDecimal = (Types.DecimalType) from;
if (to.typeId() != Type.TypeID.DECIMAL) {
return false;
}
Types.DecimalType toDecimal = (Types.DecimalType) to;
return (fromDecimal.scale() == toDecimal.scale() &&
fromDecimal.precision() <= toDecimal.precision());
}
return false;
}
/**
* Interface for passing a function that assigns column IDs.
*/
public interface NextID {
int get();
}
public static class SchemaVisitor<T> {
protected LinkedList<String> fieldNames = Lists.newLinkedList();
protected LinkedList<Integer> fieldIds = Lists.newLinkedList();
public T schema(Schema schema, T structResult) {
return null;
}
public T struct(Types.StructType struct, List<T> fieldResults) {
return null;
}
public T field(Types.NestedField field, T fieldResult) {
return null;
}
public T list(Types.ListType list, T elementResult) {
return null;
}
public T map(Types.MapType map, T keyResult, T valueResult) {
return null;
}
public T primitive(Type.PrimitiveType primitive) {
return null;
}
}
public static <T> T visit(Schema schema, SchemaVisitor<T> visitor) {
return visitor.schema(schema, visit(schema.asStruct(), visitor));
}
public static <T> T visit(Type type, SchemaVisitor<T> visitor) {
switch (type.typeId()) {
case STRUCT:
Types.StructType struct = type.asNestedType().asStructType();
List<T> results = Lists.newArrayListWithExpectedSize(struct.fields().size());
for (Types.NestedField field : struct.fields()) {
visitor.fieldIds.push(field.fieldId());
visitor.fieldNames.push(field.name());
T result;
try {
result = visit(field.type(), visitor);
} finally {
visitor.fieldIds.pop();
visitor.fieldNames.pop();
}
results.add(visitor.field(field, result));
}
return visitor.struct(struct, results);
case LIST:
Types.ListType list = type.asNestedType().asListType();
T elementResult;
visitor.fieldIds.push(list.elementId());
try {
elementResult = visit(list.elementType(), visitor);
} finally {
visitor.fieldIds.pop();
}
return visitor.list(list, elementResult);
case MAP:
Types.MapType map = type.asNestedType().asMapType();
T keyResult;
T valueResult;
visitor.fieldIds.push(map.keyId());
try {
keyResult = visit(map.keyType(), visitor);
} finally {
visitor.fieldIds.pop();
}
visitor.fieldIds.push(map.valueId());
try {
valueResult = visit(map.valueType(), visitor);
} finally {
visitor.fieldIds.pop();
}
return visitor.map(map, keyResult, valueResult);
default:
return visitor.primitive(type.asPrimitiveType());
}
}
public static class CustomOrderSchemaVisitor<T> {
public T schema(Schema schema, Supplier<T> structResult) {
return null;
}
public T struct(Types.StructType struct, Iterable<T> fieldResults) {
return null;
}
public T field(Types.NestedField field, Supplier<T> fieldResult) {
return null;
}
public T list(Types.ListType list, Supplier<T> elementResult) {
return null;
}
public T map(Types.MapType map, Supplier<T> keyResult, Supplier<T> valueResult) {
return null;
}
public T primitive(Type.PrimitiveType primitive) {
return null;
}
}
private static class VisitFuture<T> implements Supplier<T> {
private final Type type;
private final CustomOrderSchemaVisitor<T> visitor;
private VisitFuture(Type type, CustomOrderSchemaVisitor<T> visitor) {
this.type = type;
this.visitor = visitor;
}
@Override
public T get() {
return visit(type, visitor);
}
}
private static class VisitFieldFuture<T> implements Supplier<T> {
private final Types.NestedField field;
private final CustomOrderSchemaVisitor<T> visitor;
private VisitFieldFuture(Types.NestedField field, CustomOrderSchemaVisitor<T> visitor) {
this.field = field;
this.visitor = visitor;
}
@Override
public T get() {
return visitor.field(field, new VisitFuture<>(field.type(), visitor));
}
}
public static <T> T visit(Schema schema, CustomOrderSchemaVisitor<T> visitor) {
return visitor.schema(schema, new VisitFuture<>(schema.asStruct(), visitor));
}
/**
* Used to traverse types with traversals other than pre-order.
* <p>
* This passes a {@link Supplier} to each {@link CustomOrderSchemaVisitor visitor} method that
* returns the result of traversing child types. Structs are passed an {@link Iterable} that
* traverses child fields during iteration.
* <p>
* An example use is assigning column IDs, which should be done with a post-order traversal.
*
* @param type a type to traverse with a visitor
* @param visitor a custom order visitor
* @param <T> the type returned by the visitor
* @return the result of traversing the given type with the visitor
*/
public static <T> T visit(Type type, CustomOrderSchemaVisitor<T> visitor) {
switch (type.typeId()) {
case STRUCT:
Types.StructType struct = type.asNestedType().asStructType();
List<VisitFieldFuture<T>> results = Lists
.newArrayListWithExpectedSize(struct.fields().size());
for (Types.NestedField field : struct.fields()) {
results.add(
new VisitFieldFuture<>(field, visitor));
}
return visitor.struct(struct, Iterables.transform(results, VisitFieldFuture::get));
case LIST:
Types.ListType list = type.asNestedType().asListType();
return visitor.list(list, new VisitFuture<>(list.elementType(), visitor));
case MAP:
Types.MapType map = type.asNestedType().asMapType();
return visitor.map(map,
new VisitFuture<>(map.keyType(), visitor),
new VisitFuture<>(map.valueType(), visitor));
default:
return visitor.primitive(type.asPrimitiveType());
}
}
static int decimalMaxPrecision(int numBytes) {
Preconditions.checkArgument(numBytes >= 0 && numBytes < 24,
"Unsupported decimal length: " + numBytes);
return MAX_PRECISION[numBytes];
}
public static int decimalRequriedBytes(int precision) {
Preconditions.checkArgument(precision >= 0 && precision < 40,
"Unsupported decimal precision: " + precision);
return REQUIRED_LENGTH[precision];
}
private static int[] MAX_PRECISION = new int[24];
private static int[] REQUIRED_LENGTH = new int[40];
static {
// for each length, calculate the max precision
for (int len = 0; len < MAX_PRECISION.length; len += 1) {
MAX_PRECISION[len] = (int) Math.floor(Math.log10(Math.pow(2, 8*len - 1) - 1));
}
// for each precision, find the first length that can hold it
for (int precision = 0; precision < REQUIRED_LENGTH.length; precision += 1) {
REQUIRED_LENGTH[precision] = -1;
for (int len = 0; len < MAX_PRECISION.length; len += 1) {
// find the first length that can hold the precision
if (precision <= MAX_PRECISION[len]) {
REQUIRED_LENGTH[precision] = len;
break;
}
}
if (REQUIRED_LENGTH[precision] < 0) {
throw new IllegalStateException(
"Could not find required length for precision " + precision);
}
}
}
}
| 2,089 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/Conversions.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.google.common.base.Charsets;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.CharBuffer;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CharsetEncoder;
import java.util.Arrays;
import java.util.UUID;
public class Conversions {
private static final String HIVE_NULL = "__HIVE_DEFAULT_PARTITION__";
public static Object fromPartitionString(Type type, String asString) {
if (asString == null || HIVE_NULL.equals(asString)) {
return null;
}
switch (type.typeId()) {
case BOOLEAN:
return Boolean.valueOf(asString);
case INTEGER:
return Integer.valueOf(asString);
case LONG:
return Long.valueOf(asString);
case FLOAT:
return Long.valueOf(asString);
case DOUBLE:
return Double.valueOf(asString);
case STRING:
return asString;
case UUID:
return UUID.fromString(asString);
case FIXED:
Types.FixedType fixed = (Types.FixedType) type;
return Arrays.copyOf(
asString.getBytes(Charsets.UTF_8), fixed.length());
case BINARY:
return asString.getBytes(Charsets.UTF_8);
case DECIMAL:
return new BigDecimal(asString);
default:
throw new UnsupportedOperationException(
"Unsupported type for fromPartitionString: " + type);
}
}
private static final ThreadLocal<CharsetEncoder> ENCODER =
ThreadLocal.withInitial(Charsets.UTF_8::newEncoder);
private static final ThreadLocal<CharsetDecoder> DECODER =
ThreadLocal.withInitial(Charsets.UTF_8::newDecoder);
public static ByteBuffer toByteBuffer(Type type, Object value) {
switch (type.typeId()) {
case BOOLEAN:
return ByteBuffer.allocate(1).put(0, (Boolean) value ? (byte) 0x01 : (byte) 0x00);
case INTEGER:
case DATE:
return ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN).putInt(0, (int) value);
case LONG:
case TIME:
case TIMESTAMP:
return ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN).putLong(0, (long) value);
case FLOAT:
return ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN).putFloat(0, (float) value);
case DOUBLE:
return ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN).putDouble(0, (double) value);
case STRING:
CharBuffer buffer = CharBuffer.wrap((CharSequence) value);
try {
return ENCODER.get().encode(buffer);
} catch (CharacterCodingException e) {
throw new RuntimeIOException(e, "Failed to encode value as UTF-8: " + value);
}
case UUID:
UUID uuid = (UUID) value;
return ByteBuffer.allocate(16).order(ByteOrder.LITTLE_ENDIAN)
.putLong(0, uuid.getMostSignificantBits())
.putLong(1, uuid.getLeastSignificantBits());
case FIXED:
case BINARY:
return (ByteBuffer) value;
case DECIMAL:
return ByteBuffer.wrap(((BigDecimal) value).unscaledValue().toByteArray());
default:
throw new UnsupportedOperationException("Cannot serialize type: " + type);
}
}
@SuppressWarnings("unchecked")
public static <T> T fromByteBuffer(Type type, ByteBuffer buffer) {
return (T) internalFromByteBuffer(type, buffer);
}
private static Object internalFromByteBuffer(Type type, ByteBuffer buffer) {
ByteBuffer tmp = buffer.duplicate().order(ByteOrder.LITTLE_ENDIAN);
switch (type.typeId()) {
case BOOLEAN:
return (tmp.get() != 0x00);
case INTEGER:
case DATE:
return tmp.getInt();
case LONG:
case TIME:
case TIMESTAMP:
if (tmp.remaining() < 8) {
// type was later promoted to long
return (long) tmp.getInt();
}
return tmp.getLong();
case FLOAT:
return tmp.getFloat();
case DOUBLE:
if (tmp.remaining() < 8) {
// type was later promoted to long
return (double) tmp.getFloat();
}
return tmp.getDouble();
case STRING:
try {
return DECODER.get().decode(tmp);
} catch (CharacterCodingException e) {
throw new RuntimeIOException(e, "Failed to decode value as UTF-8: " + buffer);
}
case UUID:
long mostSigBits = tmp.getLong();
long leastSigBits = tmp.getLong();
return new UUID(mostSigBits, leastSigBits);
case FIXED:
case BINARY:
return tmp;
case DECIMAL:
Types.DecimalType decimal = (Types.DecimalType) type;
byte[] unscaledBytes = new byte[buffer.remaining()];
tmp.get(unscaledBytes);
return new BigDecimal(new BigInteger(unscaledBytes), decimal.scale());
default:
throw new UnsupportedOperationException("Cannot deserialize type: " + type);
}
}
}
| 2,090 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/Comparators.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.google.common.collect.ImmutableMap;
import java.nio.ByteBuffer;
import java.util.Comparator;
public class Comparators {
private static final ImmutableMap<Type.PrimitiveType, Comparator<?>> COMPARATORS = ImmutableMap
.<Type.PrimitiveType, Comparator<?>>builder()
.put(Types.BooleanType.get(), Comparator.naturalOrder())
.put(Types.IntegerType.get(), Comparator.naturalOrder())
.put(Types.LongType.get(), Comparator.naturalOrder())
.put(Types.FloatType.get(), Comparator.naturalOrder())
.put(Types.DoubleType.get(), Comparator.naturalOrder())
.put(Types.DateType.get(), Comparator.naturalOrder())
.put(Types.TimeType.get(), Comparator.naturalOrder())
.put(Types.TimestampType.withZone(), Comparator.naturalOrder())
.put(Types.TimestampType.withoutZone(), Comparator.naturalOrder())
.put(Types.StringType.get(), Comparators.charSequences())
.put(Types.UUIDType.get(), Comparator.naturalOrder())
.put(Types.BinaryType.get(), Comparators.unsignedBytes())
.build();
@SuppressWarnings("unchecked")
public static <T> Comparator<T> forType(Type.PrimitiveType type) {
Comparator<?> cmp = COMPARATORS.get(type);
if (cmp != null) {
return (Comparator<T>) cmp;
} else if (type instanceof Types.FixedType) {
return (Comparator<T>) Comparators.unsignedBytes();
} else if (type instanceof Types.DecimalType) {
return (Comparator<T>) Comparator.naturalOrder();
}
throw new UnsupportedOperationException("Cannot determine comparator for type: " + type);
}
public static Comparator<ByteBuffer> unsignedBytes() {
return UnsignedByteBufComparator.INSTANCE;
}
public static Comparator<ByteBuffer> signedBytes() {
return Comparator.naturalOrder();
}
@SuppressWarnings("unchecked")
public static <T> Comparator<T> nullsFirst() {
return (Comparator<T>) NullsFirst.INSTANCE;
}
@SuppressWarnings("unchecked")
public static <T> Comparator<T> nullsLast() {
return (Comparator<T>) NullsLast.INSTANCE;
}
public static Comparator<CharSequence> charSequences() {
return CharSeqComparator.INSTANCE;
}
private static class NullsFirst<T> implements Comparator<T> {
private static final NullsFirst<?> INSTANCE = new NullsFirst<>();
private NullsFirst() {
}
@Override
public int compare(T o1, T o2) {
if (o1 != null) {
if (o2 != null) {
return 0;
}
return 1;
} else if (o2 != null) {
return -1;
}
return 0;
}
@Override
public Comparator<T> thenComparing(Comparator<? super T> other) {
return new NullSafeChainedComparator<>(this, other);
}
}
private static class NullsLast<T> implements Comparator<T> {
private static final NullsLast<?> INSTANCE = new NullsLast<>();
private NullsLast() {
}
@Override
public int compare(T o1, T o2) {
if (o1 != null) {
if (o2 != null) {
return 0;
}
return -1;
} else if (o2 != null) {
return 1;
}
return 0;
}
@Override
public Comparator<T> thenComparing(Comparator<? super T> other) {
return new NullSafeChainedComparator<>(this, other);
}
}
private static class NullSafeChainedComparator<T> implements Comparator<T> {
private final Comparator<T> first;
private final Comparator<? super T> second;
public NullSafeChainedComparator(Comparator<T> first, Comparator<? super T> second) {
this.first = first;
this.second = second;
}
@Override
public int compare(T o1, T o2) {
int cmp = first.compare(o1, o2);
if (cmp == 0 && o1 != null) {
return second.compare(o1, o2);
}
return cmp;
}
}
private static class UnsignedByteBufComparator implements Comparator<ByteBuffer> {
private static final UnsignedByteBufComparator INSTANCE = new UnsignedByteBufComparator();
private UnsignedByteBufComparator() {
}
@Override
public int compare(ByteBuffer buf1, ByteBuffer buf2) {
int len = Math.min(buf1.remaining(), buf2.remaining());
// find the first difference and return
int b1pos = buf1.position();
int b2pos = buf2.position();
for (int i = 0; i < len; i += 1) {
// Conversion to int is what Byte.toUnsignedInt would do
int cmp = Integer.compare(
((int) buf1.get(b1pos + i)) & 0xff,
((int) buf2.get(b2pos + i)) & 0xff);
if (cmp != 0) {
return cmp;
}
}
// if there are no differences, then the shorter seq is first
return Integer.compare(buf1.remaining(), buf2.remaining());
}
}
private static class CharSeqComparator implements Comparator<CharSequence> {
private static final CharSeqComparator INSTANCE = new CharSeqComparator();
private CharSeqComparator() {
}
@Override
public int compare(CharSequence s1, CharSequence s2) {
int len = Math.min(s1.length(), s2.length());
// find the first difference and return
for (int i = 0; i < len; i += 1) {
int cmp = Character.compare(s1.charAt(i), s2.charAt(i));
if (cmp != 0) {
return cmp;
}
}
// if there are no differences, then the shorter seq is first
return Integer.compare(s1.length(), s2.length());
}
}
}
| 2,091 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/IndexById.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.google.common.collect.Maps;
import com.netflix.iceberg.Schema;
import java.util.List;
import java.util.Map;
class IndexById extends TypeUtil.SchemaVisitor<Map<Integer, Types.NestedField>> {
private final Map<Integer, Types.NestedField> index = Maps.newHashMap();
@Override
public Map<Integer, Types.NestedField> schema(Schema schema, Map<Integer, Types.NestedField> structResult) {
return index;
}
@Override
public Map<Integer, Types.NestedField> struct(Types.StructType struct, List<Map<Integer, Types.NestedField>> fieldResults) {
return index;
}
@Override
public Map<Integer, Types.NestedField> field(Types.NestedField field, Map<Integer, Types.NestedField> fieldResult) {
index.put(field.fieldId(), field);
return null;
}
@Override
public Map<Integer, Types.NestedField> list(Types.ListType list, Map<Integer, Types.NestedField> elementResult) {
for (Types.NestedField field : list.fields()) {
index.put(field.fieldId(), field);
}
return null;
}
@Override
public Map<Integer, Types.NestedField> map(Types.MapType map, Map<Integer, Types.NestedField> keyResult, Map<Integer, Types.NestedField> valueResult) {
for (Types.NestedField field : map.fields()) {
index.put(field.fieldId(), field);
}
return null;
}
}
| 2,092 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/FindTypeVisitor.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.netflix.iceberg.Schema;
import java.util.List;
import java.util.function.Predicate;
class FindTypeVisitor extends TypeUtil.SchemaVisitor<Type> {
private final Predicate<Type> predicate;
FindTypeVisitor(Predicate<Type> predicate) {
this.predicate = predicate;
}
@Override
public Type schema(Schema schema, Type structResult) {
return structResult;
}
@Override
public Type struct(Types.StructType struct, List<Type> fieldResults) {
if (predicate.test(struct)) {
return struct;
}
for (Type fieldType : fieldResults) {
if (fieldType != null) {
return fieldType;
}
}
return null;
}
@Override
public Type field(Types.NestedField field, Type fieldResult) {
return fieldResult;
}
@Override
public Type list(Types.ListType list, Type elementResult) {
if (predicate.test(list)) {
return list;
}
return elementResult;
}
@Override
public Type map(Types.MapType map, Type keyResult, Type valueResult) {
if (predicate.test(map)) {
return map;
}
if (keyResult != null) {
return keyResult;
}
return valueResult;
}
@Override
public Type primitive(Type.PrimitiveType primitive) {
if (predicate.test(primitive)) {
return primitive;
}
return null;
}
}
| 2,093 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/OutputFile.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.io;
import com.netflix.iceberg.exceptions.AlreadyExistsException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import java.io.IOException;
/**
* An interface used to create output files using {@link PositionOutputStream} instances.
* <p>
* This class is based on Parquet's InputFile.
*/
public interface OutputFile {
/**
* Create a new file and return a {@link PositionOutputStream} to it.
* <p>
* If the file already exists, this will throw an exception.
*
* @return an output stream that can report its position
* @throws AlreadyExistsException If the path already exists
* @throws RuntimeIOException If the implementation throws an {@link IOException}
*/
PositionOutputStream create();
/**
* Create a new file and return a {@link PositionOutputStream} to it.
* <p>
* If the file already exists, this will not throw an exception and will replace the file.
*
* @return an output stream that can report its position
* @throws RuntimeIOException If the implementation throws an {@link IOException}
*/
PositionOutputStream createOrOverwrite();
/**
* Return the location this output file will create.
*
* @return the location of this output file
*/
String location();
/**
* Return an {@link InputFile} for the location of this output file.
*
* @return an input file for the location of this output file
*/
InputFile toInputFile();
}
| 2,094 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/PositionOutputStream.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.io;
import java.io.IOException;
import java.io.OutputStream;
public abstract class PositionOutputStream extends OutputStream {
/**
* Return the current position in the OutputStream.
*
* @return current position in bytes from the start of the stream
* @throws IOException If the underlying stream throws IOException
*/
public abstract long getPos() throws IOException;
}
| 2,095 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/SeekableInputStream.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.io;
import java.io.IOException;
import java.io.InputStream;
/**
* {@code SeekableInputStream} is an interface with the methods needed to read data from a file or
* Hadoop data stream.
*
* This class is based on Parquet's SeekableInputStream.
*/
public abstract class SeekableInputStream extends InputStream {
/**
* Return the current position in the InputStream.
*
* @return current position in bytes from the start of the stream
* @throws IOException If the underlying stream throws IOException
*/
public abstract long getPos() throws IOException;
/**
* Seek to a new position in the InputStream.
*
* @param newPos the new position to seek to
* @throws IOException If the underlying stream throws IOException
*/
public abstract void seek(long newPos) throws IOException;
}
| 2,096 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/FileAppender.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.io;
import com.netflix.iceberg.Metrics;
import java.io.Closeable;
import java.util.Iterator;
public interface FileAppender<D> extends Closeable {
void add(D datum);
default void addAll(Iterator<D> values) {
while (values.hasNext()) {
add(values.next());
}
}
default void addAll(Iterable<D> values) {
addAll(values.iterator());
}
/**
* @return {@link Metrics} for this file. Only valid after the file is closed.
*/
Metrics metrics();
}
| 2,097 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/CloseableIterable.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.io;
import com.google.common.base.Preconditions;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.function.Function;
public interface CloseableIterable<T> extends Iterable<T>, Closeable {
static <E> CloseableIterable<E> withNoopClose(Iterable<E> iterable) {
return new CloseableIterable<E>() {
@Override
public void close() {
}
@Override
public Iterator<E> iterator() {
return iterable.iterator();
}
};
}
static <E> CloseableIterable<E> empty() {
return new CloseableIterable<E>() {
@Override
public void close() {
}
@Override
public Iterator<E> iterator() {
return Collections.emptyIterator();
}
};
}
static <E> CloseableIterable<E> combine(Iterable<E> iterable, Iterable<Closeable> closeables) {
return new CloseableGroup.ClosingIterable<>(iterable, closeables);
}
static <I, O> CloseableIterable<O> wrap(CloseableIterable<I> iterable, Function<Iterable<I>, Iterable<O>> wrap) {
Iterable<O> wrappedIterable = wrap.apply(iterable);
return new CloseableIterable<O>() {
@Override
public void close() throws IOException {
iterable.close();
}
@Override
public Iterator<O> iterator() {
return wrappedIterable.iterator();
}
};
}
static <I, O> CloseableIterable<O> transform(CloseableIterable<I> iterable, Function<I, O> transform) {
Preconditions.checkNotNull(transform, "Cannot apply a null transform");
return new CloseableIterable<O>() {
@Override
public void close() throws IOException {
iterable.close();
}
@Override
public Iterator<O> iterator() {
return new Iterator<O>() {
private final Iterator<I> inner = iterable.iterator();
@Override
public boolean hasNext() {
return inner.hasNext();
}
@Override
public O next() {
return transform.apply(inner.next());
}
};
}
};
}
}
| 2,098 |
0 | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg | Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/CloseableGroup.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.io;
import com.google.common.collect.Lists;
import java.io.Closeable;
import java.io.IOException;
import java.util.Iterator;
import java.util.LinkedList;
public abstract class CloseableGroup implements Closeable {
private final LinkedList<Closeable> closeables = Lists.newLinkedList();
protected void addCloseable(Closeable closeable) {
closeables.add(closeable);
}
@Override
public void close() throws IOException {
while (!closeables.isEmpty()) {
Closeable toClose = closeables.removeFirst();
if (toClose != null) {
toClose.close();
}
}
}
static class ClosingIterable<T> extends CloseableGroup implements CloseableIterable<T> {
private final Iterable<T> iterable;
public ClosingIterable(Iterable<T> iterable, Iterable<Closeable> closeables) {
this.iterable = iterable;
for (Closeable closeable : closeables) {
addCloseable(closeable);
}
}
@Override
public Iterator<T> iterator() {
return iterable.iterator();
}
}
}
| 2,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.