index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/SerializationProxies.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import java.io.ObjectStreamException; import java.io.Serializable; import java.nio.ByteBuffer; /** * Stand-in classes for expression classes in Java Serialization. * <p> * These are used so that expression classes are immutable and can use final fields. */ class SerializationProxies { static class ConstantExpressionProxy implements Serializable { private Boolean trueOrFalse = null; /** * Constructor for Java serialization. */ public ConstantExpressionProxy() { } public ConstantExpressionProxy(boolean trueOrFalse) { this.trueOrFalse = trueOrFalse; } Object readResolve() throws ObjectStreamException { if (trueOrFalse) { return True.INSTANCE; } else { return False.INSTANCE; } } } static class BinaryLiteralProxy extends FixedLiteralProxy { /** * Constructor for Java serialization. */ BinaryLiteralProxy() { } BinaryLiteralProxy(ByteBuffer buffer) { super(buffer); } Object readResolve() throws ObjectStreamException { return new Literals.BinaryLiteral(ByteBuffer.wrap(bytes)); } } /** * Replacement for FixedLiteral in Java Serialization. */ static class FixedLiteralProxy implements Serializable { protected byte[] bytes; /** * Constructor for Java serialization. */ FixedLiteralProxy() { } FixedLiteralProxy(ByteBuffer buffer) { this.bytes = new byte[buffer.remaining()]; buffer.duplicate().get(bytes); } Object readResolve() throws ObjectStreamException { return new Literals.FixedLiteral(ByteBuffer.wrap(bytes)); } } }
6,500
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Evaluator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.StructLike; import com.netflix.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import com.netflix.iceberg.types.Types; import java.io.Serializable; import java.util.Comparator; /** * Evaluates an {@link Expression} for data described by a {@link Types.StructType}. * <p> * Data rows must implement {@link StructLike} and are passed to {@link #eval(StructLike)}. * <p> * This class is thread-safe. */ public class Evaluator implements Serializable { private final Expression expr; private transient ThreadLocal<EvalVisitor> visitors = null; private EvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(EvalVisitor::new); } return visitors.get(); } public Evaluator(Types.StructType struct, Expression unbound) { this.expr = Binder.bind(struct, unbound); } public boolean eval(StructLike data) { return visitor().eval(data); } private class EvalVisitor extends BoundExpressionVisitor<Boolean> { private StructLike struct; private boolean eval(StructLike row) { this.struct = row; return ExpressionVisitors.visit(expr, this); } @Override public Boolean alwaysTrue() { return true; } @Override public Boolean alwaysFalse() { return false; } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { return ref.get(struct) == null; } @Override public <T> Boolean notNull(BoundReference<T> ref) { return ref.get(struct) != null; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return cmp.compare(ref.get(struct), lit.value()) < 0; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return cmp.compare(ref.get(struct), lit.value()) <= 0; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return cmp.compare(ref.get(struct), lit.value()) > 0; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return cmp.compare(ref.get(struct), lit.value()) >= 0; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return cmp.compare(ref.get(struct), lit.value()) == 0; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { return !eq(ref, lit); } @Override public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) { throw new UnsupportedOperationException("In is not supported yet"); } @Override public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) { return !in(ref, lit); } } }
6,501
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/NamedReference.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.base.Preconditions; public class NamedReference implements Reference { public final String name; NamedReference(String name) { Preconditions.checkNotNull(name, "Name cannot be null"); this.name = name; } public String name() { return name; } @Override public String toString() { return String.format("ref(name=\"%s\")", name); } }
6,502
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Not.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; public class Not implements Expression { private final Expression child; Not(Expression child) { this.child = child; } public Expression child() { return child; } @Override public Operation op() { return Expression.Operation.NOT; } @Override public Expression negate() { return child; } @Override public String toString() { return String.format("not(%s)", child); } }
6,503
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/InclusiveMetricsEvaluator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.base.Preconditions; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import com.netflix.iceberg.types.Conversions; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import java.nio.ByteBuffer; import java.util.Map; import static com.netflix.iceberg.expressions.Expressions.rewriteNot; /** * Evaluates an {@link Expression} on a {@link DataFile} to test whether rows in the file may match. * <p> * This evaluation is inclusive: it returns true if a file may match and false if it cannot match. * <p> * Files are passed to {@link #eval(DataFile)}, which returns true if the file may contain matching * rows and false if the file cannot contain matching rows. Files may be skipped if and only if the * return value of {@code eval} is false. */ public class InclusiveMetricsEvaluator { private final Schema schema; private final StructType struct; private final Expression expr; private transient ThreadLocal<MetricsEvalVisitor> visitors = null; private MetricsEvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(MetricsEvalVisitor::new); } return visitors.get(); } public InclusiveMetricsEvaluator(Schema schema, Expression unbound) { this.schema = schema; this.struct = schema.asStruct(); this.expr = Binder.bind(struct, rewriteNot(unbound)); } /** * Test whether the file may contain records that match the expression. * * @param file a data file * @return false if the file cannot contain rows that match the expression, true otherwise. */ public boolean eval(DataFile file) { // TODO: detect the case where a column is missing from the file using file's max field id. return visitor().eval(file); } private static final boolean ROWS_MIGHT_MATCH = true; private static final boolean ROWS_CANNOT_MATCH = false; private class MetricsEvalVisitor extends BoundExpressionVisitor<Boolean> { private Map<Integer, Long> valueCounts = null; private Map<Integer, Long> nullCounts = null; private Map<Integer, ByteBuffer> lowerBounds = null; private Map<Integer, ByteBuffer> upperBounds = null; private boolean eval(DataFile file) { if (file.recordCount() <= 0) { return ROWS_CANNOT_MATCH; } this.valueCounts = file.valueCounts(); this.nullCounts = file.nullValueCounts(); this.lowerBounds = file.lowerBounds(); this.upperBounds = file.upperBounds(); return ExpressionVisitors.visit(expr, this); } @Override public Boolean alwaysTrue() { return ROWS_MIGHT_MATCH; // all rows match } @Override public Boolean alwaysFalse() { return ROWS_CANNOT_MATCH; // all rows fail } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has no null values, the expression cannot match Integer id = ref.fieldId(); Preconditions.checkNotNull(struct.field(id), "Cannot filter by nested column: %s", schema.findField(id)); if (nullCounts != null && nullCounts.containsKey(id) && nullCounts.get(id) == 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has no non-null values, the expression cannot match Integer id = ref.fieldId(); Preconditions.checkNotNull(struct.field(id), "Cannot filter by nested column: %s", schema.findField(id)); if (valueCounts != null && valueCounts.containsKey(id) && nullCounts != null && nullCounts.containsKey(id) && valueCounts.get(id) - nullCounts.get(id) == 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(field.type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp >= 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(field.type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp <= 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(struct.field(id).type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } } if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { // because the bounds are not necessarily a min or max value, this cannot be answered using // them. notEq(col, X) with (X, Y) doesn't guarantee that X is a value in col. return ROWS_MIGHT_MATCH; } @Override public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } } }
6,504
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Expressions.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.base.Preconditions; import com.netflix.iceberg.expressions.Expression.Operation; /** * Factory methods for creating {@link Expression expressions}. */ public class Expressions { private Expressions() { } public static Expression and(Expression left, Expression right) { Preconditions.checkNotNull(left, "Left expression cannot be null."); Preconditions.checkNotNull(right, "Right expression cannot be null."); if (left == alwaysFalse() || right == alwaysFalse()) { return alwaysFalse(); } else if (left == alwaysTrue()) { return right; } else if (right == alwaysTrue()) { return left; } return new And(left, right); } public static Expression or(Expression left, Expression right) { Preconditions.checkNotNull(left, "Left expression cannot be null."); Preconditions.checkNotNull(right, "Right expression cannot be null."); if (left == alwaysTrue() || right == alwaysTrue()) { return alwaysTrue(); } else if (left == alwaysFalse()) { return right; } else if (right == alwaysFalse()) { return left; } return new Or(left, right); } public static Expression not(Expression child) { Preconditions.checkNotNull(child, "Child expression cannot be null."); if (child == alwaysTrue()) { return alwaysFalse(); } else if (child == alwaysFalse()) { return alwaysTrue(); } else if (child instanceof Not) { return ((Not) child).child(); } return new Not(child); } public static <T> UnboundPredicate<T> isNull(String name) { return new UnboundPredicate<>(Expression.Operation.IS_NULL, ref(name)); } public static <T> UnboundPredicate<T> notNull(String name) { return new UnboundPredicate<>(Expression.Operation.NOT_NULL, ref(name)); } public static <T> UnboundPredicate<T> lessThan(String name, T value) { return new UnboundPredicate<>(Expression.Operation.LT, ref(name), value); } public static <T> UnboundPredicate<T> lessThanOrEqual(String name, T value) { return new UnboundPredicate<>(Expression.Operation.LT_EQ, ref(name), value); } public static <T> UnboundPredicate<T> greaterThan(String name, T value) { return new UnboundPredicate<>(Expression.Operation.GT, ref(name), value); } public static <T> UnboundPredicate<T> greaterThanOrEqual(String name, T value) { return new UnboundPredicate<>(Expression.Operation.GT_EQ, ref(name), value); } public static <T> UnboundPredicate<T> equal(String name, T value) { return new UnboundPredicate<>(Expression.Operation.EQ, ref(name), value); } public static <T> UnboundPredicate<T> notEqual(String name, T value) { return new UnboundPredicate<>(Expression.Operation.NOT_EQ, ref(name), value); } public static <T> UnboundPredicate<T> predicate(Operation op, String name, T value) { Preconditions.checkArgument(op != Operation.IS_NULL && op != Operation.NOT_NULL, "Cannot create %s predicate inclusive a value", op); return new UnboundPredicate<>(op, ref(name), value); } public static <T> UnboundPredicate<T> predicate(Operation op, String name, Literal<T> lit) { Preconditions.checkArgument(op != Operation.IS_NULL && op != Operation.NOT_NULL, "Cannot create %s predicate inclusive a value", op); return new UnboundPredicate<>(op, ref(name), lit); } public static <T> UnboundPredicate<T> predicate(Operation op, String name) { Preconditions.checkArgument(op == Operation.IS_NULL || op == Operation.NOT_NULL, "Cannot create %s predicate without a value", op); return new UnboundPredicate<>(op, ref(name)); } public static True alwaysTrue() { return True.INSTANCE; } public static False alwaysFalse() { return False.INSTANCE; } public static Expression rewriteNot(Expression expr) { return ExpressionVisitors.visit(expr, RewriteNot.get()); } static NamedReference ref(String name) { return new NamedReference(name); } }
6,505
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/InclusiveManifestEvaluator.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.ManifestFile; import com.netflix.iceberg.ManifestFile.PartitionFieldSummary; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import com.netflix.iceberg.types.Conversions; import com.netflix.iceberg.types.Types.StructType; import java.nio.ByteBuffer; import java.util.List; import static com.netflix.iceberg.expressions.Expressions.rewriteNot; /** * Evaluates an {@link Expression} on a {@link ManifestFile} to test whether the file contains * matching partitions. * <p> * This evaluation is inclusive: it returns true if a file may match and false if it cannot match. * <p> * Files are passed to {@link #eval(ManifestFile)}, which returns true if the manifest may contain * data files that match the partition expression. Manifest files may be skipped if and only if the * return value of {@code eval} is false. */ public class InclusiveManifestEvaluator { private final StructType struct; private final Expression expr; private transient ThreadLocal<ManifestEvalVisitor> visitors = null; private ManifestEvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(ManifestEvalVisitor::new); } return visitors.get(); } public InclusiveManifestEvaluator(PartitionSpec spec, Expression rowFilter) { this.struct = spec.partitionType(); this.expr = Binder.bind(struct, rewriteNot(Projections.inclusive(spec).project(rowFilter))); } /** * Test whether the file may contain records that match the expression. * * @param manifest a manifest file * @return false if the file cannot contain rows that match the expression, true otherwise. */ public boolean eval(ManifestFile manifest) { return visitor().eval(manifest); } private static final boolean ROWS_MIGHT_MATCH = true; private static final boolean ROWS_CANNOT_MATCH = false; private class ManifestEvalVisitor extends BoundExpressionVisitor<Boolean> { private List<PartitionFieldSummary> stats = null; private boolean eval(ManifestFile manifest) { this.stats = manifest.partitions(); if (stats == null) { return ROWS_MIGHT_MATCH; } return ExpressionVisitors.visit(expr, this); } @Override public Boolean alwaysTrue() { return ROWS_MIGHT_MATCH; // all rows match } @Override public Boolean alwaysFalse() { return ROWS_CANNOT_MATCH; // all rows fail } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has no null values, the expression cannot match if (!stats.get(ref.pos()).containsNull()) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notNull(BoundReference<T> ref) { // containsNull encodes whether at least one partition value is null, lowerBound is null if // all partition values are null. ByteBuffer lowerBound = stats.get(ref.pos()).lowerBound(); if (lowerBound == null) { return ROWS_CANNOT_MATCH; // all values are null } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { ByteBuffer lowerBound = stats.get(ref.pos()).lowerBound(); if (lowerBound == null) { return ROWS_CANNOT_MATCH; // values are all null } T lower = Conversions.fromByteBuffer(ref.type(), lowerBound); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp >= 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { ByteBuffer lowerBound = stats.get(ref.pos()).lowerBound(); if (lowerBound == null) { return ROWS_CANNOT_MATCH; // values are all null } T lower = Conversions.fromByteBuffer(ref.type(), lowerBound); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { ByteBuffer upperBound = stats.get(ref.pos()).upperBound(); if (upperBound == null) { return ROWS_CANNOT_MATCH; // values are all null } T upper = Conversions.fromByteBuffer(ref.type(), upperBound); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp <= 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { ByteBuffer upperBound = stats.get(ref.pos()).upperBound(); if (upperBound == null) { return ROWS_CANNOT_MATCH; // values are all null } T upper = Conversions.fromByteBuffer(ref.type(), upperBound); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { PartitionFieldSummary fieldStats = stats.get(ref.pos()); if (fieldStats.lowerBound() == null) { return ROWS_CANNOT_MATCH; // values are all null and literal cannot contain null } T lower = Conversions.fromByteBuffer(ref.type(), fieldStats.lowerBound()); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } T upper = Conversions.fromByteBuffer(ref.type(), fieldStats.upperBound()); cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { // because the bounds are not necessarily a min or max value, this cannot be answered using // them. notEq(col, X) with (X, Y) doesn't guarantee that X is a value in col. return ROWS_MIGHT_MATCH; } @Override public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } } }
6,506
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Or.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; public class Or implements Expression { private final Expression left; private final Expression right; Or(Expression left, Expression right) { this.left = left; this.right = right; } public Expression left() { return left; } public Expression right() { return right; } @Override public Operation op() { return Expression.Operation.OR; } @Override public Expression negate() { // not(or(a, b)) => and(not(a), not(b)) return Expressions.and(left.negate(), right.negate()); } @Override public String toString() { return String.format("(%s or %s)", left, right); } }
6,507
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/RewriteNot.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; class RewriteNot extends ExpressionVisitors.ExpressionVisitor<Expression> { private static final RewriteNot INSTANCE = new RewriteNot(); static RewriteNot get() { return INSTANCE; } private RewriteNot() { } @Override public Expression alwaysTrue() { return Expressions.alwaysTrue(); } @Override public Expression alwaysFalse() { return Expressions.alwaysFalse(); } @Override public Expression not(Expression result) { return result.negate(); } @Override public Expression and(Expression leftResult, Expression rightResult) { return Expressions.and(leftResult, rightResult); } @Override public Expression or(Expression leftResult, Expression rightResult) { return Expressions.or(leftResult, rightResult); } @Override public <T> Expression predicate(BoundPredicate<T> pred) { return pred; } @Override public <T> Expression predicate(UnboundPredicate<T> pred) { return pred; } }
6,508
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/events/Listeners.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.events; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import java.util.Iterator; import java.util.List; import java.util.Map; /** * Static registration and notification for listeners. */ public class Listeners { private Listeners() { } private static final Map<Class<?>, List<Listener<?>>> listeners = Maps.newConcurrentMap(); public static <E> void register(Listener<E> listener, Class<E> eventType) { List<Listener<?>> list = listeners.get(eventType); if (list == null) { synchronized (listeners) { list = listeners.get(eventType); if (list == null) { list = Lists.newArrayList(); listeners.put(eventType, list); } } } list.add(listener); } @SuppressWarnings("unchecked") public static <E> void notifyAll(E event) { Preconditions.checkNotNull(event, "Cannot notify listeners for a null event."); List<Listener<?>> list = listeners.get(event.getClass()); if (list != null) { Iterator<Listener<?>> iter = list.iterator(); while (iter.hasNext()) { Listener<E> listener = (Listener<E>) iter.next(); listener.notify(event); } } } }
6,509
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/events/ScanEvent.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.events; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Expression; /** * Event sent to listeners when a table scan is planned. */ public final class ScanEvent { private final String tableName; private final long snapshotId; private final Expression filter; private final Schema projection; public ScanEvent(String tableName, long snapshotId, Expression filter, Schema projection) { this.tableName = tableName; this.snapshotId = snapshotId; this.filter = filter; this.projection = projection; } public String tableName() { return tableName; } public long snapshotId() { return snapshotId; } public Expression filter() { return filter; } public Schema projection() { return projection; } }
6,510
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/events/Listener.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.events; /** * A listener interface that can receive notifications. */ public interface Listener<E> { void notify(E event); }
6,511
0
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg/orc/OrcFileAppender.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.orc; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.Schema; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.OutputFile; import org.apache.hadoop.fs.Path; import org.apache.orc.ColumnStatistics; import org.apache.orc.TypeDescription; import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch; import org.apache.orc.OrcFile; import org.apache.orc.Writer; import java.io.IOException; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; /** * Create a file appender for ORC. */ public class OrcFileAppender implements FileAppender<VectorizedRowBatch> { private final Writer writer; private final TypeDescription orcSchema; private final ColumnIdMap columnIds = new ColumnIdMap(); private final Path path; public static final String COLUMN_NUMBERS_ATTRIBUTE = "iceberg.column.ids"; OrcFileAppender(Schema schema, OutputFile file, OrcFile.WriterOptions options, Map<String,byte[]> metadata) { orcSchema = TypeConversion.toOrc(schema, columnIds); options.setSchema(orcSchema); path = new Path(file.location()); try { writer = OrcFile.createWriter(path, options); } catch (IOException e) { throw new RuntimeException("Can't create file " + path, e); } writer.addUserMetadata(COLUMN_NUMBERS_ATTRIBUTE, columnIds.serialize()); metadata.forEach( (key,value) -> writer.addUserMetadata(key, ByteBuffer.wrap(value))); } @Override public void add(VectorizedRowBatch datum) { try { writer.addRowBatch(datum); } catch (IOException e) { throw new RuntimeException("Problem writing to ORC file " + path, e); } } @Override public Metrics metrics() { try { long rows = writer.getNumberOfRows(); ColumnStatistics[] stats = writer.getStatistics(); // we don't currently have columnSizes or distinct counts. Map<Integer, Long> valueCounts = new HashMap<>(); Map<Integer, Long> nullCounts = new HashMap<>(); Integer[] icebergIds = new Integer[orcSchema.getMaximumId() + 1]; for(TypeDescription type: columnIds.keySet()) { icebergIds[type.getId()] = columnIds.get(type); } for(int c=1; c < stats.length; ++c) { if (icebergIds[c] != null) { valueCounts.put(icebergIds[c], stats[c].getNumberOfValues()); } } for(TypeDescription child: orcSchema.getChildren()) { int c = child.getId(); if (icebergIds[c] != null) { nullCounts.put(icebergIds[c], rows - stats[c].getNumberOfValues()); } } return new Metrics(rows, null, valueCounts, nullCounts); } catch (IOException e) { throw new RuntimeException("Can't get statistics " + path, e); } } @Override public void close() throws IOException { writer.close(); } public TypeDescription getSchema() { return orcSchema; } }
6,512
0
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg/orc/OrcIterator.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.orc; import org.apache.hadoop.fs.Path; import org.apache.orc.RecordReader; import org.apache.orc.TypeDescription; import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch; import java.io.Closeable; import java.io.IOException; import java.util.Iterator; /** * An adaptor so that the ORC RecordReader can be used as an Iterator. * Because the same VectorizedRowBatch is reused on each call to next, * it gets changed when hasNext or next is called. */ public class OrcIterator implements Iterator<VectorizedRowBatch>, Closeable { private final Path filename; private final RecordReader rows; private final VectorizedRowBatch batch; private boolean advanced = false; OrcIterator(Path filename, TypeDescription schema, RecordReader rows) { this.filename = filename; this.rows = rows; this.batch = schema.createRowBatch(); } @Override public void close() throws IOException { rows.close(); } private void advance() { if (!advanced) { try { rows.nextBatch(batch); } catch (IOException e) { throw new RuntimeException("Problem reading ORC file " + filename, e); } advanced = true; } } @Override public boolean hasNext() { advance(); return batch.size > 0; } @Override public VectorizedRowBatch next() { // make sure we have the next batch advance(); // mark it as used advanced = false; return batch; } }
6,513
0
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg/orc/ColumnIdMap.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.orc; import org.apache.orc.TypeDescription; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Collection; import java.util.IdentityHashMap; import java.util.Map; import java.util.Set; /** * The mapping from ORC's TypeDescription to the Iceberg column ids. * * Keep the API limited to Map rather than a concrete type so that we can * change it later. */ public class ColumnIdMap implements Map<TypeDescription, Integer> { private final IdentityHashMap<TypeDescription, Integer> idMap = new IdentityHashMap<>(); @Override public int size() { return idMap.size(); } @Override public boolean isEmpty() { return idMap.isEmpty(); } @Override public boolean containsKey(Object key) { return idMap.containsKey(key); } @Override public boolean containsValue(Object value) { return idMap.containsValue(value); } @Override public Integer get(Object key) { return idMap.get(key); } @Override public Integer put(TypeDescription key, Integer value) { return idMap.put(key, value); } @Override public Integer remove(Object key) { return idMap.remove(key); } @Override public void putAll(Map<? extends TypeDescription, ? extends Integer> map) { idMap.putAll(map); } @Override public void clear() { idMap.clear(); } @Override public Set<TypeDescription> keySet() { return idMap.keySet(); } @Override public Collection<Integer> values() { return idMap.values(); } @Override public Set<Entry<TypeDescription, Integer>> entrySet() { return idMap.entrySet(); } public ByteBuffer serialize() { StringBuilder buffer = new StringBuilder(); boolean needComma = false; for(TypeDescription key: idMap.keySet()) { if (needComma) { buffer.append(','); } else { needComma = true; } buffer.append(key.getId()); buffer.append(':'); buffer.append(idMap.get(key).intValue()); } return ByteBuffer.wrap(buffer.toString().getBytes(StandardCharsets.UTF_8)); } public static ColumnIdMap deserialize(TypeDescription schema, ByteBuffer serial) { ColumnIdMap result = new ColumnIdMap(); String[] parts = StandardCharsets.UTF_8.decode(serial).toString().split(","); for(int i = 0; i < parts.length; ++i) { String[] subparts = parts[i].split(":"); result.put(schema.findSubtype(Integer.parseInt(subparts[0])), Integer.parseInt(subparts[1])); } return result; } }
6,514
0
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg/orc/TypeConversion.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.orc; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.apache.orc.TypeDescription; import java.util.ArrayList; import java.util.List; public class TypeConversion { /** * Convert a given Iceberg schema to ORC. * @param schema the Iceberg schema to convert * @param columnIds an output with the column ids * @return the ORC schema */ public static TypeDescription toOrc(Schema schema, ColumnIdMap columnIds) { return toOrc(null, schema.asStruct(), columnIds); } static TypeDescription toOrc(Integer fieldId, Type type, ColumnIdMap columnIds) { TypeDescription result; switch (type.typeId()) { case BOOLEAN: result = TypeDescription.createBoolean(); break; case INTEGER: result = TypeDescription.createInt(); break; case LONG: result = TypeDescription.createLong(); break; case FLOAT: result = TypeDescription.createFloat(); break; case DOUBLE: result = TypeDescription.createDouble(); break; case DATE: result = TypeDescription.createDate(); break; case TIME: result = TypeDescription.createInt(); break; case TIMESTAMP: result = TypeDescription.createTimestamp(); break; case STRING: result = TypeDescription.createString(); break; case UUID: result = TypeDescription.createBinary(); break; case FIXED: result = TypeDescription.createBinary(); break; case BINARY: result = TypeDescription.createBinary(); break; case DECIMAL: { Types.DecimalType decimal = (Types.DecimalType) type; result = TypeDescription.createDecimal() .withScale(decimal.scale()) .withPrecision(decimal.precision()); break; } case STRUCT: { result = TypeDescription.createStruct(); for(Types.NestedField field: type.asStructType().fields()) { result.addField(field.name(), toOrc(field.fieldId(), field.type(), columnIds)); } break; } case LIST: { Types.ListType list = (Types.ListType) type; result = TypeDescription.createList(toOrc(list.elementId(), list.elementType(), columnIds)); break; } case MAP: { Types.MapType map = (Types.MapType) type; TypeDescription key = toOrc(map.keyId(),map.keyType(), columnIds); result = TypeDescription.createMap(key, toOrc(map.valueId(), map.valueType(), columnIds)); break; } default: throw new IllegalArgumentException("Unhandled type " + type.typeId()); } if (fieldId != null) { columnIds.put(result, fieldId); } return result; } /** * Convert an ORC schema to an Iceberg schema. * @param schema the ORC schema * @param columnIds the column ids * @return the Iceberg schema */ public Schema fromOrc(TypeDescription schema, ColumnIdMap columnIds) { return new Schema(convertOrcToType(schema, columnIds).asStructType().fields()); } Type convertOrcToType(TypeDescription schema, ColumnIdMap columnIds) { switch (schema.getCategory()) { case BOOLEAN: return Types.BooleanType.get(); case BYTE: case SHORT: case INT: return Types.IntegerType.get(); case LONG: return Types.LongType.get(); case FLOAT: return Types.FloatType.get(); case DOUBLE: return Types.DoubleType.get(); case STRING: case CHAR: case VARCHAR: return Types.StringType.get(); case BINARY: return Types.BinaryType.get(); case DATE: return Types.DateType.get(); case TIMESTAMP: return Types.TimestampType.withoutZone(); case DECIMAL: return Types.DecimalType.of(schema.getPrecision(), schema.getScale()); case STRUCT: { List<String> fieldNames = schema.getFieldNames(); List<TypeDescription> fieldTypes = schema.getChildren(); List<Types.NestedField> fields = new ArrayList<>(fieldNames.size()); for (int c=0; c < fieldNames.size(); ++c) { String name = fieldNames.get(c); TypeDescription type = fieldTypes.get(c); fields.add(Types.NestedField.optional(columnIds.get(type), name, convertOrcToType(type, columnIds))); } return Types.StructType.of(fields); } case LIST: { TypeDescription child = schema.getChildren().get(0); return Types.ListType.ofOptional(columnIds.get(child), convertOrcToType(child, columnIds)); } case MAP: { TypeDescription key = schema.getChildren().get(0); TypeDescription value = schema.getChildren().get(1); return Types.MapType.ofOptional(columnIds.get(key), columnIds.get(value), convertOrcToType(key, columnIds), convertOrcToType(value, columnIds)); } default: // We don't have an answer for union types. throw new IllegalArgumentException("Can't handle " + schema); } } }
6,515
0
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg/orc/ORC.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.orc; import com.google.common.base.Preconditions; import com.netflix.iceberg.Schema; import com.netflix.iceberg.hadoop.HadoopInputFile; import com.netflix.iceberg.hadoop.HadoopOutputFile; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.orc.OrcFile; import org.apache.orc.Reader; import org.apache.orc.TypeDescription; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; public class ORC { private ORC() { } public static WriteBuilder write(OutputFile file) { return new WriteBuilder(file); } public static class WriteBuilder { private final OutputFile file; private final Configuration conf; private Schema schema = null; private Map<String, byte[]> metadata = new HashMap<>(); private WriteBuilder(OutputFile file) { this.file = file; if (file instanceof HadoopOutputFile) { conf = new Configuration(((HadoopOutputFile) file).getConf()); } else { conf = new Configuration(); } } public WriteBuilder metadata(String property, String value) { metadata.put(property, value.getBytes(StandardCharsets.UTF_8)); return this; } public WriteBuilder config(String property, String value) { conf.set(property, value); return this; } public WriteBuilder schema(Schema schema) { this.schema = schema; return this; } public OrcFileAppender build() { OrcFile.WriterOptions options = OrcFile.writerOptions(conf); return new OrcFileAppender(schema, file, options, metadata); } } public static ReadBuilder read(InputFile file) { return new ReadBuilder(file); } public static class ReadBuilder { private final InputFile file; private final Configuration conf; private com.netflix.iceberg.Schema schema = null; private Long start = null; private Long length = null; private ReadBuilder(InputFile file) { Preconditions.checkNotNull(file, "Input file cannot be null"); this.file = file; if (file instanceof HadoopInputFile) { conf = new Configuration(((HadoopInputFile) file).getConf()); } else { conf = new Configuration(); } } /** * Restricts the read to the given range: [start, start + length). * * @param start the start position for this read * @param length the length of the range this read should scan * @return this builder for method chaining */ public ReadBuilder split(long start, long length) { this.start = start; this.length = length; return this; } public ReadBuilder schema(com.netflix.iceberg.Schema schema) { this.schema = schema; return this; } public ReadBuilder config(String property, String value) { conf.set(property, value); return this; } public OrcIterator build() { Preconditions.checkNotNull(schema, "Schema is required"); try { Path path = new Path(file.location()); Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf)); ColumnIdMap columnIds = new ColumnIdMap(); TypeDescription orcSchema = TypeConversion.toOrc(schema, columnIds); Reader.Options options = reader.options(); if (start != null) { options.range(start, length); } options.schema(orcSchema); return new OrcIterator(path, orcSchema, reader.rows(options)); } catch (IOException e) { throw new RuntimeException("Can't open " + file.location(), e); } } } }
6,516
0
Create_ds/iceberg/parquet/src/test/java/com/netflix
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg/TestHelpers.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import org.junit.Assert; import java.util.concurrent.Callable; public class TestHelpers { /** * A convenience method to avoid a large number of @Test(expected=...) tests * @param message A String message to describe this assertion * @param expected An Exception class that the Runnable should throw * @param containedInMessage A String that should be contained by the thrown * exception's message * @param callable A Callable that is expected to throw the exception */ public static void assertThrows(String message, Class<? extends Exception> expected, String containedInMessage, Callable callable) { try { callable.call(); Assert.fail("No exception was thrown (" + message + "), expected: " + expected.getName()); } catch (Exception actual) { handleException(message, expected, containedInMessage, actual); } } /** * A convenience method to avoid a large number of @Test(expected=...) tests * @param message A String message to describe this assertion * @param expected An Exception class that the Runnable should throw * @param containedInMessage A String that should be contained by the thrown * exception's message * @param runnable A Runnable that is expected to throw the runtime exception */ public static void assertThrows(String message, Class<? extends Exception> expected, String containedInMessage, Runnable runnable) { try { runnable.run(); Assert.fail("No exception was thrown (" + message + "), expected: " + expected.getName()); } catch (Exception actual) { handleException(message, expected, containedInMessage, actual); } } private static void handleException(String message, Class<? extends Exception> expected, String containedInMessage, Exception actual) { try { Assert.assertEquals(message, expected, actual.getClass()); Assert.assertTrue( "Expected exception message (" + containedInMessage + ") missing: " + actual.getMessage(), actual.getMessage().contains(containedInMessage) ); } catch (AssertionError e) { e.addSuppressed(actual); throw e; } } }
6,517
0
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg/avro/TestParquetReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.avro; import com.google.common.collect.Iterables; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import org.apache.avro.generic.GenericData; import java.io.File; import java.io.IOException; public class TestParquetReadProjection extends TestReadProjection { protected GenericData.Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, GenericData.Record record) throws IOException { File file = temp.newFile(desc + ".parquet"); file.delete(); try (FileAppender<GenericData.Record> appender = Parquet.write(Files.localOutput(file)) .schema(writeSchema) .build()) { appender.add(record); } Iterable<GenericData.Record> records = Parquet.read(Files.localInput(file)) .project(readSchema) .callInit() .build(); return Iterables.getOnlyElement(records); } }
6,518
0
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg/avro/TestReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.avro; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Comparators; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Record; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.IOException; import java.util.List; import java.util.Map; public abstract class TestReadProjection { protected abstract Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException; @Rule public TemporaryFolder temp = new TemporaryFolder(); @Test public void testFullProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Record projected = writeAndRead("full_projection", schema, schema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("data")); Assert.assertTrue("Should contain the correct data value", cmp == 0); } @Test public void testReorderedFullProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("full_projection", schema, reordered, record); Assert.assertEquals("Should contain the correct 0 value", "test", projected.get(0).toString()); Assert.assertEquals("Should contain the correct 1 value", 34L, projected.get(1)); } @Test public void testReorderedProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(2, "missing_1", Types.StringType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.optional(3, "missing_2", Types.LongType.get()) ); Record projected = writeAndRead("full_projection", schema, reordered, record); Assert.assertNull("Should contain the correct 0 value", projected.get(0)); Assert.assertEquals("Should contain the correct 1 value", "test", projected.get(1).toString()); Assert.assertNull("Should contain the correct 2 value", projected.get(2)); } @Test public void testEmptyProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Record projected = writeAndRead("empty_projection", schema, schema.select(), record); Assert.assertNotNull("Should read a non-null record", projected); try { projected.get(0); Assert.fail("Should not retrieve value with ordinal 0"); } catch (ArrayIndexOutOfBoundsException e) { // this is expected because there are no values } } @Test public void testBasicProjection() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("data", "test"); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("basic_projection_id", writeSchema, idOnly, record); Assert.assertNull("Should not project data", projected.get("data")); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Schema dataOnly = new Schema( Types.NestedField.optional(1, "data", Types.StringType.get()) ); projected = writeAndRead("basic_projection_data", writeSchema, dataOnly, record); Assert.assertNull("Should not project id", projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("data")); Assert.assertTrue("Should contain the correct data value", cmp == 0); } @Test public void testRename() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("data", "test"); Schema readSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "renamed", Types.StringType.get()) ); Record projected = writeAndRead("project_and_rename", writeSchema, readSchema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("renamed")); Assert.assertTrue("Should contain the correct data/renamed value", cmp == 0); } @Test public void testNestedStructProjection() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()), Types.NestedField.required(2, "long", Types.FloatType.get()) )) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); Record location = new Record( AvroSchemaUtil.fromOption(record.getSchema().getField("location").schema())); location.put("lat", 52.995143f); location.put("long", -1.539054f); record.put("location", location); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Record projectedLocation = (Record) projected.get("location"); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project location", projectedLocation); Schema latOnly = new Schema( Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()) )) ); projected = writeAndRead("latitude_only", writeSchema, latOnly, record); projectedLocation = (Record) projected.get("location"); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project location", projected.get("location")); Assert.assertNull("Should not project longitude", projectedLocation.get("long")); Assert.assertEquals("Should project latitude", 52.995143f, (float) projectedLocation.get("lat"), 0.000001f); Schema longOnly = new Schema( Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(2, "long", Types.FloatType.get()) )) ); projected = writeAndRead("longitude_only", writeSchema, longOnly, record); projectedLocation = (Record) projected.get("location"); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project location", projected.get("location")); Assert.assertNull("Should not project latitutde", projectedLocation.get("lat")); Assert.assertEquals("Should project longitude", -1.539054f, (float) projectedLocation.get("long"), 0.000001f); Schema locationOnly = writeSchema.select("location"); projected = writeAndRead("location_only", writeSchema, locationOnly, record); projectedLocation = (Record) projected.get("location"); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project location", projected.get("location")); Assert.assertEquals("Should project latitude", 52.995143f, (float) projectedLocation.get("lat"), 0.000001f); Assert.assertEquals("Should project longitude", -1.539054f, (float) projectedLocation.get("long"), 0.000001f); } @Test public void testMapProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(5, "properties", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StringType.get())) ); Map<String, String> properties = ImmutableMap.of("a", "A", "b", "B"); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("properties", properties); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project properties map", projected.get("properties")); Schema keyOnly = writeSchema.select("properties.key"); projected = writeAndRead("key_only", writeSchema, keyOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.get("properties"))); Schema valueOnly = writeSchema.select("properties.value"); projected = writeAndRead("value_only", writeSchema, valueOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.get("properties"))); Schema mapOnly = writeSchema.select("properties"); projected = writeAndRead("map_only", writeSchema, mapOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.get("properties"))); } private Map<String, ?> toStringMap(Map<?, ?> map) { Map<String, Object> stringMap = Maps.newHashMap(); for (Map.Entry<?, ?> entry : map.entrySet()) { if (entry.getValue() instanceof CharSequence) { stringMap.put(entry.getKey().toString(), entry.getValue().toString()); } else { stringMap.put(entry.getKey().toString(), entry.getValue()); } } return stringMap; } @Test public void testMapOfStructsProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()), Types.NestedField.required(2, "long", Types.FloatType.get()) ) )) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); Record l1 = new Record(AvroSchemaUtil.fromOption( AvroSchemaUtil.fromOption(record.getSchema().getField("locations").schema()) .getValueType())); l1.put("lat", 53.992811f); l1.put("long", -1.542616f); Record l2 = new Record(l1.getSchema()); l2.put("lat", 52.995143f); l2.put("long", -1.539054f); record.put("locations", ImmutableMap.of("L1", l1, "L2", l2)); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project locations map", projected.get("locations")); projected = writeAndRead("all_locations", writeSchema, writeSchema.select("locations"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project locations map", record.get("locations"), toStringMap((Map) projected.get("locations"))); projected = writeAndRead("lat_only", writeSchema, writeSchema.select("locations.lat"), record); Assert.assertNull("Should not project id", projected.get("id")); Map<String, ?> locations = toStringMap((Map) projected.get("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); Record projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertEquals("L1 should contain lat", 53.992811f, (float) projectedL1.get("lat"), 0.000001); Assert.assertNull("L1 should not contain long", projectedL1.get("long")); Record projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertEquals("L2 should contain lat", 52.995143f, (float) projectedL2.get("lat"), 0.000001); Assert.assertNull("L2 should not contain long", projectedL2.get("long")); projected = writeAndRead("long_only", writeSchema, writeSchema.select("locations.long"), record); Assert.assertNull("Should not project id", projected.get("id")); locations = toStringMap((Map) projected.get("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertNull("L1 should not contain lat", projectedL1.get("lat")); Assert.assertEquals("L1 should contain long", -1.542616f, (float) projectedL1.get("long"), 0.000001); projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertNull("L2 should not contain lat", projectedL2.get("lat")); Assert.assertEquals("L2 should contain long", -1.539054f, (float) projectedL2.get("long"), 0.000001); Schema latitiudeRenamed = new Schema( Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StructType.of( Types.NestedField.required(1, "latitude", Types.FloatType.get()) ) )) ); projected = writeAndRead("latitude_renamed", writeSchema, latitiudeRenamed, record); Assert.assertNull("Should not project id", projected.get("id")); locations = toStringMap((Map) projected.get("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertEquals("L1 should contain latitude", 53.992811f, (float) projectedL1.get("latitude"), 0.000001); Assert.assertNull("L1 should not contain lat", projectedL1.get("lat")); Assert.assertNull("L1 should not contain long", projectedL1.get("long")); projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertEquals("L2 should contain latitude", 52.995143f, (float) projectedL2.get("latitude"), 0.000001); Assert.assertNull("L2 should not contain lat", projectedL2.get("lat")); Assert.assertNull("L2 should not contain long", projectedL2.get("long")); } @Test public void testListProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(10, "values", Types.ListType.ofOptional(11, Types.LongType.get())) ); List<Long> values = ImmutableList.of(56L, 57L, 58L); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("values", values); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project values list", projected.get("values")); Schema elementOnly = writeSchema.select("values.element"); projected = writeAndRead("element_only", writeSchema, elementOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire list", values, projected.get("values")); Schema listOnly = writeSchema.select("values"); projected = writeAndRead("list_only", writeSchema, listOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire list", values, projected.get("values")); } @Test @SuppressWarnings("unchecked") public void testListOfStructsProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.required(19, "x", Types.IntegerType.get()), Types.NestedField.optional(18, "y", Types.IntegerType.get()) )) ) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); Record p1 = new Record(AvroSchemaUtil.fromOption( AvroSchemaUtil.fromOption(record.getSchema().getField("points").schema()) .getElementType())); p1.put("x", 1); p1.put("y", 2); Record p2 = new Record(p1.getSchema()); p2.put("x", 3); p2.put("y", null); record.put("points", ImmutableList.of(p1, p2)); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project points list", projected.get("points")); projected = writeAndRead("all_points", writeSchema, writeSchema.select("points"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project points list", record.get("points"), projected.get("points")); projected = writeAndRead("x_only", writeSchema, writeSchema.select("points.x"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); List<Record> points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); Record projectedP1 = points.get(0); Assert.assertEquals("Should project x", 1, (int) projectedP1.get("x")); Assert.assertNull("Should not project y", projectedP1.get("y")); Record projectedP2 = points.get(1); Assert.assertEquals("Should project x", 3, (int) projectedP2.get("x")); Assert.assertNull("Should not project y", projectedP2.get("y")); projected = writeAndRead("y_only", writeSchema, writeSchema.select("points.y"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertNull("Should not project x", projectedP1.get("x")); Assert.assertEquals("Should project y", 2, (int) projectedP1.get("y")); projectedP2 = points.get(1); Assert.assertNull("Should not project x", projectedP2.get("x")); Assert.assertEquals("Should project null y", null, projectedP2.get("y")); Schema yRenamed = new Schema( Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.optional(18, "z", Types.IntegerType.get()) )) ) ); projected = writeAndRead("y_renamed", writeSchema, yRenamed, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertNull("Should not project x", projectedP1.get("x")); Assert.assertNull("Should not project y", projectedP1.get("y")); Assert.assertEquals("Should project z", 2, (int) projectedP1.get("z")); projectedP2 = points.get(1); Assert.assertNull("Should not project x", projectedP2.get("x")); Assert.assertNull("Should not project y", projectedP2.get("y")); Assert.assertEquals("Should project null z", null, projectedP2.get("z")); } }
6,519
0
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg/parquet/TestMetricsRowGroupFilter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.TestHelpers; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import com.netflix.iceberg.types.Types.FloatType; import com.netflix.iceberg.types.Types.IntegerType; import com.netflix.iceberg.types.Types.LongType; import com.netflix.iceberg.types.Types.StringType; import org.apache.avro.generic.GenericData.Record; import org.apache.avro.generic.GenericRecordBuilder; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.schema.MessageType; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import java.io.File; import java.io.IOException; import java.util.UUID; import static com.netflix.iceberg.avro.AvroSchemaUtil.convert; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.greaterThan; import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.isNull; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.notEqual; import static com.netflix.iceberg.expressions.Expressions.notNull; import static com.netflix.iceberg.expressions.Expressions.or; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestMetricsRowGroupFilter { private static final Schema SCHEMA = new Schema( required(1, "id", IntegerType.get()), optional(2, "no_stats", StringType.get()), required(3, "required", StringType.get()), optional(4, "all_nulls", LongType.get()), optional(5, "some_nulls", StringType.get()), optional(6, "no_nulls", StringType.get()), optional(7, "not_in_file", FloatType.get()) ); private static final Schema FILE_SCHEMA = new Schema( required(1, "_id", IntegerType.get()), optional(2, "_no_stats", StringType.get()), required(3, "_required", StringType.get()), optional(4, "_all_nulls", LongType.get()), optional(5, "_some_nulls", StringType.get()), optional(6, "_no_nulls", StringType.get()) ); private static final String TOO_LONG_FOR_STATS; static { StringBuilder sb = new StringBuilder(); for (int i = 0; i < 200; i += 1) { sb.append(UUID.randomUUID().toString()); } TOO_LONG_FOR_STATS = sb.toString(); } private static final File PARQUET_FILE = new File("/tmp/stats-row-group-filter-test.parquet"); private static MessageType PARQUET_SCHEMA = null; private static BlockMetaData ROW_GROUP_METADATA = null; @BeforeClass public static void createInputFile() throws IOException { if (PARQUET_FILE.exists()) { Assert.assertTrue(PARQUET_FILE.delete()); } OutputFile outFile = Files.localOutput(PARQUET_FILE); try (FileAppender<Record> appender = Parquet.write(outFile) .schema(FILE_SCHEMA) .build()) { GenericRecordBuilder builder = new GenericRecordBuilder(convert(FILE_SCHEMA, "table")); // create 50 records for (int i = 0; i < 50; i += 1) { builder.set("_id", 30 + i); // min=30, max=79, num-nulls=0 builder.set("_no_stats", TOO_LONG_FOR_STATS); // value longer than 4k will produce no stats builder.set("_required", "req"); // required, always non-null builder.set("_all_nulls", null); // never non-null builder.set("_some_nulls", (i % 10 == 0) ? null : "some"); // includes some null values builder.set("_no_nulls", ""); // optional, but always non-null appender.add(builder.build()); } } InputFile inFile = Files.localInput(PARQUET_FILE); try (ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(inFile))) { Assert.assertEquals("Should create only one row group", 1, reader.getRowGroups().size()); ROW_GROUP_METADATA = reader.getRowGroups().get(0); PARQUET_SCHEMA = reader.getFileMetaData().getSchema(); } PARQUET_FILE.deleteOnExit(); } @Test public void testAllNulls() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notNull("all_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: no non-null value in all null column", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notNull("some_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: column with some nulls contains a non-null value", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notNull("no_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: non-null column contains a non-null value", shouldRead); } @Test public void testNoNulls() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, isNull("all_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: at least one null value in all null column", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, isNull("some_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: column with some nulls contains a null value", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, isNull("no_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: non-null column contains no null values", shouldRead); } @Test public void testRequiredColumn() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notNull("required")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: required columns are always non-null", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, isNull("required")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: required columns are always non-null", shouldRead); } @Test public void testMissingColumn() { TestHelpers.assertThrows("Should complain about missing column in expression", ValidationException.class, "Cannot find field 'missing'", () -> new ParquetMetricsRowGroupFilter(SCHEMA, lessThan("missing", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA)); } @Test public void testColumnNotInFile() { Expression[] cannotMatch = new Expression[] { lessThan("not_in_file", 1.0f), lessThanOrEqual("not_in_file", 1.0f), equal("not_in_file", 1.0f), greaterThan("not_in_file", 1.0f), greaterThanOrEqual("not_in_file", 1.0f), notNull("not_in_file") }; for (Expression expr : cannotMatch) { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, expr) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip when column is not in file (all nulls): " + expr, shouldRead); } Expression[] canMatch = new Expression[] { isNull("not_in_file"), notEqual("not_in_file", 1.0f) }; for (Expression expr : canMatch) { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, expr) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read when column is not in file (all nulls): " + expr, shouldRead); } } @Test public void testMissingStats() { Expression[] exprs = new Expression[] { lessThan("no_stats", "a"), lessThanOrEqual("no_stats", "b"), equal("no_stats", "c"), greaterThan("no_stats", "d"), greaterThanOrEqual("no_stats", "e"), notEqual("no_stats", "f"), isNull("no_stats"), notNull("no_stats") }; for (Expression expr : exprs) { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, expr) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read when missing stats for expr: " + expr, shouldRead); } } @Test public void testZeroRecordFile() { BlockMetaData emptyBlock = new BlockMetaData(); emptyBlock.setRowCount(0); Expression[] exprs = new Expression[] { lessThan("id", 5), lessThanOrEqual("id", 30), equal("id", 70), greaterThan("id", 78), greaterThanOrEqual("id", 90), notEqual("id", 101), isNull("some_nulls"), notNull("some_nulls") }; for (Expression expr : exprs) { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, expr) .shouldRead(PARQUET_SCHEMA, emptyBlock); Assert.assertFalse("Should never read 0-record file: " + expr, shouldRead); } } @Test public void testNot() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(lessThan("id", 5))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: not(false)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(greaterThan("id", 5))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: not(true)", shouldRead); } @Test public void testAnd() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, and(lessThan("id", 5), greaterThanOrEqual("id", 0))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: and(false, false)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, and(greaterThan("id", 5), lessThanOrEqual("id", 30))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: and(true, true)", shouldRead); } @Test public void testOr() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 80))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: or(false, false)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 60))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: or(false, true)", shouldRead); } @Test public void testIntegerLt() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThan("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThan("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range below lower bound (30 is not < 30)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThan("id", 31)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThan("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerLtEq() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThanOrEqual("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThanOrEqual("id", 29)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range below lower bound (29 < 30)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThanOrEqual("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThanOrEqual("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: many possible ids", shouldRead); } @Test public void testIntegerGt() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThan("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThan("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range above upper bound (79 is not > 79)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThan("id", 78)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThan("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerGtEq() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 80)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range above upper bound (80 > 79)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerEq() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id below lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 29)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id below lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 80)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id above upper bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id above upper bound", shouldRead); } @Test public void testIntegerNotEq() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 29)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 80)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id above upper bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id above upper bound", shouldRead); } @Test public void testIntegerNotEqRewritten() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 5))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 29))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 30))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 75))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 79))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 80))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id above upper bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 85))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id above upper bound", shouldRead); } }
6,520
0
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg/parquet/TestDictionaryRowGroupFilter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.TestHelpers; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import com.netflix.iceberg.types.Types.FloatType; import com.netflix.iceberg.types.Types.IntegerType; import com.netflix.iceberg.types.Types.LongType; import com.netflix.iceberg.types.Types.StringType; import org.apache.avro.generic.GenericData.Record; import org.apache.avro.generic.GenericRecordBuilder; import org.apache.parquet.column.page.DictionaryPageReadStore; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.schema.MessageType; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import java.io.File; import java.io.IOException; import java.util.UUID; import static com.netflix.iceberg.avro.AvroSchemaUtil.convert; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.greaterThan; import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.isNull; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.notEqual; import static com.netflix.iceberg.expressions.Expressions.notNull; import static com.netflix.iceberg.expressions.Expressions.or; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestDictionaryRowGroupFilter { private static final Schema SCHEMA = new Schema( required(1, "id", IntegerType.get()), optional(2, "no_stats", StringType.get()), required(3, "required", StringType.get()), optional(4, "all_nulls", LongType.get()), optional(5, "some_nulls", StringType.get()), optional(6, "no_nulls", StringType.get()), optional(7, "non_dict", StringType.get()), optional(8, "not_in_file", FloatType.get()) ); private static final Schema FILE_SCHEMA = new Schema( required(1, "_id", IntegerType.get()), optional(2, "_no_stats", StringType.get()), required(3, "_required", StringType.get()), optional(4, "_all_nulls", LongType.get()), optional(5, "_some_nulls", StringType.get()), optional(6, "_no_nulls", StringType.get()), optional(7, "_non_dict", StringType.get()) ); private static final String TOO_LONG_FOR_STATS; static { StringBuilder sb = new StringBuilder(); for (int i = 0; i < 200; i += 1) { sb.append(UUID.randomUUID().toString()); } TOO_LONG_FOR_STATS = sb.toString(); } private static final File PARQUET_FILE = new File("/tmp/stats-row-group-filter-test.parquet"); private static MessageType PARQUET_SCHEMA = null; private static BlockMetaData ROW_GROUP_METADATA = null; private static DictionaryPageReadStore DICTIONARY_STORE = null; @BeforeClass public static void createInputFile() throws IOException { if (PARQUET_FILE.exists()) { Assert.assertTrue(PARQUET_FILE.delete()); } OutputFile outFile = Files.localOutput(PARQUET_FILE); try (FileAppender<Record> appender = Parquet.write(outFile) .schema(FILE_SCHEMA) .build()) { GenericRecordBuilder builder = new GenericRecordBuilder(convert(FILE_SCHEMA, "table")); // create 20 copies of each record to ensure dictionary-encoding for (int copy = 0; copy < 20; copy += 1) { // create 50 records for (int i = 0; i < 50; i += 1) { builder.set("_id", 30 + i); // min=30, max=79, num-nulls=0 builder.set("_no_stats", TOO_LONG_FOR_STATS); // value longer than 4k will produce no stats builder.set("_required", "req"); // required, always non-null builder.set("_all_nulls", null); // never non-null builder.set("_some_nulls", (i % 10 == 0) ? null : "some"); // includes some null values builder.set("_no_nulls", ""); // optional, but always non-null builder.set("_non_dict", UUID.randomUUID().toString()); // not dictionary-encoded appender.add(builder.build()); } } } InputFile inFile = Files.localInput(PARQUET_FILE); ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(inFile)); Assert.assertEquals("Should create only one row group", 1, reader.getRowGroups().size()); ROW_GROUP_METADATA = reader.getRowGroups().get(0); PARQUET_SCHEMA = reader.getFileMetaData().getSchema(); DICTIONARY_STORE = reader.getNextDictionaryReader(); PARQUET_FILE.deleteOnExit(); } @Test public void testAllNulls() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("all_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("some_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("no_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); } @Test public void testNoNulls() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("all_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("some_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("no_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); } @Test public void testRequiredColumn() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("required")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: required columns are always non-null", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("required")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should skip: required columns are always non-null", shouldRead); } @Test public void testMissingColumn() { TestHelpers.assertThrows("Should complain about missing column in expression", ValidationException.class, "Cannot find field 'missing'", () -> new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("missing", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE)); } @Test public void testColumnNotInFile() { Expression[] exprs = new Expression[] { lessThan("not_in_file", 1.0f), lessThanOrEqual("not_in_file", 1.0f), equal("not_in_file", 1.0f), greaterThan("not_in_file", 1.0f), greaterThanOrEqual("not_in_file", 1.0f), notNull("not_in_file"), isNull("not_in_file"), notEqual("not_in_file", 1.0f) }; for (Expression expr : exprs) { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, expr) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary cannot be found: " + expr, shouldRead); } } @Test public void testColumnFallbackOrNotDictionaryEncoded() { Expression[] exprs = new Expression[] { lessThan("non_dict", "a"), lessThanOrEqual("non_dict", "a"), equal("non_dict", "a"), greaterThan("non_dict", "a"), greaterThanOrEqual("non_dict", "a"), notNull("non_dict"), isNull("non_dict"), notEqual("non_dict", "a") }; for (Expression expr : exprs) { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, expr) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary cannot be found: " + expr, shouldRead); } } @Test public void testMissingStats() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("no_stats", "a")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should skip: stats are missing but dictionary is present", shouldRead); } @Test public void testNot() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(lessThan("id", 5))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: not(false)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(greaterThan("id", 5))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should skip: not(true)", shouldRead); } @Test public void testAnd() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, and(lessThan("id", 5), greaterThanOrEqual("id", 0))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should skip: and(false, false)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, and(greaterThan("id", 5), lessThanOrEqual("id", 30))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: and(true, true)", shouldRead); } @Test public void testOr() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 80))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should skip: or(false, false)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 60))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: or(false, true)", shouldRead); } @Test public void testIntegerLt() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range below lower bound (30 is not < 30)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("id", 31)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerLtEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThanOrEqual("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThanOrEqual("id", 29)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range below lower bound (29 < 30)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThanOrEqual("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThanOrEqual("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: many possible ids", shouldRead); } @Test public void testIntegerGt() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThan("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThan("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range above upper bound (79 is not > 79)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThan("id", 78)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThan("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerGtEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 80)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range above upper bound (80 > 79)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id below lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 29)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id below lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 80)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id above upper bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id above upper bound", shouldRead); } @Test public void testIntegerNotEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 29)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 80)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id above upper bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id above upper bound", shouldRead); } @Test public void testIntegerNotEqRewritten() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 5))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 29))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 30))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 75))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 79))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 80))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id above upper bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 85))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id above upper bound", shouldRead); } @Test public void testStringNotEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("some_nulls", "some")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should skip: all values are 'some'", shouldRead); } }
6,521
0
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg/parquet/TestMetricsRowGroupFilterTypes.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.BinaryType; import com.netflix.iceberg.types.Types.BooleanType; import com.netflix.iceberg.types.Types.DateType; import com.netflix.iceberg.types.Types.DoubleType; import com.netflix.iceberg.types.Types.FixedType; import com.netflix.iceberg.types.Types.FloatType; import com.netflix.iceberg.types.Types.IntegerType; import com.netflix.iceberg.types.Types.LongType; import com.netflix.iceberg.types.Types.StringType; import com.netflix.iceberg.types.Types.TimeType; import com.netflix.iceberg.types.Types.TimestampType; import com.netflix.iceberg.types.Types.UUIDType; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericFixed; import org.apache.avro.generic.GenericRecordBuilder; import org.apache.commons.io.Charsets; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.schema.MessageType; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.UUID; import static com.netflix.iceberg.avro.AvroSchemaUtil.convert; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.types.Types.NestedField.optional; @RunWith(Parameterized.class) public class TestMetricsRowGroupFilterTypes { private static final Schema SCHEMA = new Schema( optional(1, "boolean", BooleanType.get()), optional(2, "int", IntegerType.get()), optional(3, "long", LongType.get()), optional(4, "float", FloatType.get()), optional(5, "double", DoubleType.get()), optional(6, "date", DateType.get()), optional(7, "time", TimeType.get()), optional(8, "timestamp", TimestampType.withoutZone()), optional(9, "timestamptz", TimestampType.withZone()), optional(10, "string", StringType.get()), optional(11, "uuid", UUIDType.get()), optional(12, "fixed", FixedType.ofLength(4)), optional(13, "binary", BinaryType.get()), optional(14, "int_decimal", Types.DecimalType.of(8, 2)), optional(15, "long_decimal", Types.DecimalType.of(14, 2)), optional(16, "fixed_decimal", Types.DecimalType.of(31, 2)) ); private static final Schema FILE_SCHEMA = new Schema( optional(1, "_boolean", BooleanType.get()), optional(2, "_int", IntegerType.get()), optional(3, "_long", LongType.get()), optional(4, "_float", FloatType.get()), optional(5, "_double", DoubleType.get()), optional(6, "_date", DateType.get()), optional(7, "_time", TimeType.get()), optional(8, "_timestamp", TimestampType.withoutZone()), optional(9, "_timestamptz", TimestampType.withZone()), optional(10, "_string", StringType.get()), optional(11, "_uuid", UUIDType.get()), optional(12, "_fixed", FixedType.ofLength(4)), optional(13, "_binary", BinaryType.get()), optional(14, "_int_decimal", Types.DecimalType.of(8, 2)), optional(15, "_long_decimal", Types.DecimalType.of(14, 2)), optional(16, "_fixed_decimal", Types.DecimalType.of(31, 2)) ); private static final File PARQUET_FILE = new File("/tmp/stats-row-group-filter-types-test.parquet"); private static MessageType PARQUET_SCHEMA = null; private static BlockMetaData ROW_GROUP_METADATA = null; private static final UUID uuid = UUID.randomUUID(); private static final Integer date = (Integer) Literal.of("2018-06-29").to(DateType.get()).value(); private static final Long time = (Long) Literal.of("10:02:34.000000").to(TimeType.get()).value(); private static final Long timestamp = (Long) Literal.of("2018-06-29T10:02:34.000000") .to(TimestampType.withoutZone()).value(); private static final GenericFixed fixed = new GenericData.Fixed( org.apache.avro.Schema.createFixed("_fixed", null, null, 4), "abcd".getBytes(Charsets.UTF_8)); @BeforeClass public static void createInputFile() throws IOException { if (PARQUET_FILE.exists()) { Assert.assertTrue(PARQUET_FILE.delete()); } OutputFile outFile = Files.localOutput(PARQUET_FILE); try (FileAppender<GenericData.Record> appender = Parquet.write(outFile) .schema(FILE_SCHEMA) .build()) { GenericRecordBuilder builder = new GenericRecordBuilder(convert(FILE_SCHEMA, "table")); // create 50 records for (int i = 0; i < 50; i += 1) { builder.set("_boolean", false); builder.set("_int", i); builder.set("_long", 5_000_000_000L + i); builder.set("_float", ((float) (100 - i)) / 100F + 1.0F); // 2.0f, 1.99f, 1.98f, ... builder.set("_double", ((double) i) / 100.0D + 2.0D); // 2.0d, 2.01d, 2.02d, ... builder.set("_date", date); builder.set("_time", time); builder.set("_timestamp", timestamp); builder.set("_timestamptz", timestamp); builder.set("_string", "tapir"); builder.set("_uuid", uuid); builder.set("_fixed", fixed); builder.set("_binary", ByteBuffer.wrap("xyz".getBytes(Charsets.UTF_8))); builder.set("_int_decimal", new BigDecimal("77.77")); builder.set("_long_decimal", new BigDecimal("88.88")); builder.set("_fixed_decimal", new BigDecimal("99.99")); appender.add(builder.build()); } } InputFile inFile = Files.localInput(PARQUET_FILE); try (ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(inFile))) { Assert.assertEquals("Should create only one row group", 1, reader.getRowGroups().size()); ROW_GROUP_METADATA = reader.getRowGroups().get(0); PARQUET_SCHEMA = reader.getFileMetaData().getSchema(); } PARQUET_FILE.deleteOnExit(); } private final String column; private final Object readValue; private final Object skipValue; @Parameterized.Parameters public static Object[][] parameters() { return new Object[][] { new Object[] { "boolean", false, true }, new Object[] { "int", 5, 55 }, new Object[] { "long", 5_000_000_049L, 5_000L }, new Object[] { "float", 1.97f, 2.11f }, new Object[] { "double", 2.11d, 1.97d }, new Object[] { "date", "2018-06-29", "2018-05-03" }, new Object[] { "time", "10:02:34.000000", "10:02:34.000001" }, new Object[] { "timestamp", "2018-06-29T10:02:34.000000", "2018-06-29T15:02:34.000000" }, new Object[] { "timestamptz", "2018-06-29T10:02:34.000000+00:00", "2018-06-29T10:02:34.000000-07:00" }, new Object[] { "string", "tapir", "monthly" }, // new Object[] { "uuid", uuid, UUID.randomUUID() }, // not supported yet new Object[] { "fixed", "abcd".getBytes(Charsets.UTF_8), new byte[] { 0, 1, 2, 3 } }, new Object[] { "binary", "xyz".getBytes(Charsets.UTF_8), new byte[] { 0, 1, 2, 3, 4, 5 } }, new Object[] { "int_decimal", "77.77", "12.34" }, new Object[] { "long_decimal", "88.88", "12.34" }, new Object[] { "fixed_decimal", "99.99", "12.34" }, }; } public TestMetricsRowGroupFilterTypes(String column, Object readValue, Object skipValue) { this.column = column; this.readValue = readValue; this.skipValue = skipValue; } @Test public void testEq() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal(column, readValue)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: value is in the row group: " + readValue, shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal(column, skipValue)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: value is not in the row group: " + skipValue, shouldRead); } }
6,522
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ColumnWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.ColumnWriteStore; import org.apache.parquet.io.api.Binary; public abstract class ColumnWriter<T> implements TripleWriter<T> { @SuppressWarnings("unchecked") static <T> ColumnWriter<T> newWriter(ColumnDescriptor desc) { switch (desc.getType()) { case BOOLEAN: return (ColumnWriter<T>) new ColumnWriter<Boolean>(desc) { @Override public void write(int rl, Boolean value) { writeBoolean(rl, value); } }; case INT32: return (ColumnWriter<T>) new ColumnWriter<Integer>(desc) { @Override public void write(int rl, Integer value) { writeInteger(rl, value); } }; case INT64: return (ColumnWriter<T>) new ColumnWriter<Long>(desc) { @Override public void write(int rl, Long value) { writeLong(rl, value); } }; case FLOAT: return (ColumnWriter<T>) new ColumnWriter<Float>(desc) { @Override public void write(int rl, Float value) { writeFloat(rl, value); } }; case DOUBLE: return (ColumnWriter<T>) new ColumnWriter<Double>(desc) { @Override public void write(int rl, Double value) { writeDouble(rl, value); } }; case FIXED_LEN_BYTE_ARRAY: case BINARY: return (ColumnWriter<T>) new ColumnWriter<Binary>(desc) { @Override public void write(int rl, Binary value) { writeBinary(rl, value); } }; default: throw new UnsupportedOperationException("Unsupported primitive type: " + desc.getType()); } } private final ColumnDescriptor desc; private final int maxDefinitionLevel; private long triplesCount = 0L; private org.apache.parquet.column.ColumnWriter columnWriter = null; private ColumnWriter(ColumnDescriptor desc) { this.desc = desc; this.maxDefinitionLevel = desc.getMaxDefinitionLevel(); } public void setColumnStore(ColumnWriteStore columnStore) { this.columnWriter = columnStore.getColumnWriter(desc); } @Override public void writeBoolean(int rl, boolean value) { this.triplesCount += 1; columnWriter.write(value, rl, maxDefinitionLevel); } @Override public void writeInteger(int rl, int value) { this.triplesCount += 1; columnWriter.write(value, rl, maxDefinitionLevel); } @Override public void writeLong(int rl, long value) { this.triplesCount += 1; columnWriter.write(value, rl, maxDefinitionLevel); } @Override public void writeFloat(int rl, float value) { this.triplesCount += 1; columnWriter.write(value, rl, maxDefinitionLevel); } @Override public void writeDouble(int rl, double value) { this.triplesCount += 1; columnWriter.write(value, rl, maxDefinitionLevel); } @Override public void writeBinary(int rl, Binary value) { this.triplesCount += 1; columnWriter.write(value, rl, maxDefinitionLevel); } @Override public void writeNull(int rl, int dl) { this.triplesCount += 1; columnWriter.writeNull(rl, dl); } }
6,523
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetSchemaUtil.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.Type; import org.apache.parquet.schema.Types.MessageTypeBuilder; import java.util.Set; public class ParquetSchemaUtil { public static MessageType convert(Schema schema, String name) { return new TypeToMessageType().convert(schema, name); } public static Schema convert(MessageType parquetSchema) { MessageTypeToType converter = new MessageTypeToType(parquetSchema); return new Schema( ParquetTypeVisitor.visit(parquetSchema, converter).asNestedType().fields(), converter.getAliases()); } public static MessageType pruneColumns(MessageType fileSchema, Schema expectedSchema) { // column order must match the incoming type, so it doesn't matter that the ids are unordered Set<Integer> selectedIds = TypeUtil.getProjectedIds(expectedSchema); return (MessageType) ParquetTypeVisitor.visit(fileSchema, new PruneColumns(selectedIds)); } /** * Prunes columns from a Parquet file schema that was written without field ids. * <p> * Files that were written without field ids are read assuming that schema evolution preserved * column order. Deleting columns was not allowed. * <p> * The order of columns in the resulting Parquet schema matches the Parquet file. * * @param fileSchema schema from a Parquet file that does not have field ids. * @param expectedSchema expected schema * @return a parquet schema pruned using the expected schema */ public static MessageType pruneColumnsFallback(MessageType fileSchema, Schema expectedSchema) { Set<Integer> selectedIds = Sets.newHashSet(); for (Types.NestedField field : expectedSchema.columns()) { selectedIds.add(field.fieldId()); } MessageTypeBuilder builder = org.apache.parquet.schema.Types.buildMessage(); int ordinal = 1; for (Type type : fileSchema.getFields()) { if (selectedIds.contains(ordinal)) { builder.addField(type.withId(ordinal)); } ordinal += 1; } return builder.named(fileSchema.getName()); } public static boolean hasIds(MessageType fileSchema) { try { // Try to convert the type to Iceberg. If an ID assignment is needed, return false. ParquetTypeVisitor.visit(fileSchema, new MessageTypeToType(fileSchema) { @Override protected int nextId() { throw new IllegalStateException("Needed to assign ID"); } }); // no assignment was needed return true; } catch (IllegalStateException e) { // at least one field was missing an id. return false; } } public static MessageType addFallbackIds(MessageType fileSchema) { MessageTypeBuilder builder = org.apache.parquet.schema.Types.buildMessage(); int ordinal = 1; // ids are assigned starting at 1 for (Type type : fileSchema.getFields()) { builder.addField(type.withId(ordinal)); ordinal += 1; } return builder.named(fileSchema.getName()); } }
6,524
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetValueWriters.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.netflix.iceberg.types.TypeUtil; import org.apache.avro.util.Utf8; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.ColumnWriteStore; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.Type; import java.lang.reflect.Array; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Map; public class ParquetValueWriters { private ParquetValueWriters() { } public static <T> ParquetValueWriter<T> option(Type type, int definitionLevel, ParquetValueWriter<T> writer) { if (type.isRepetition(Type.Repetition.OPTIONAL)) { return new OptionWriter<>(definitionLevel, writer); } return writer; } public static <T> UnboxedWriter<T> unboxed(ColumnDescriptor desc) { return new UnboxedWriter<>(desc); } public static PrimitiveWriter<CharSequence> strings(ColumnDescriptor desc) { return new StringWriter(desc); } public static PrimitiveWriter<BigDecimal> decimalAsInteger(ColumnDescriptor desc, int precision, int scale) { return new IntegerDecimalWriter(desc, precision, scale); } public static PrimitiveWriter<BigDecimal> decimalAsLong(ColumnDescriptor desc, int precision, int scale) { return new LongDecimalWriter(desc, precision, scale); } public static PrimitiveWriter<BigDecimal> decimalAsFixed(ColumnDescriptor desc, int precision, int scale) { return new FixedDecimalWriter(desc, precision, scale); } public static PrimitiveWriter<ByteBuffer> byteBuffers(ColumnDescriptor desc) { return new BytesWriter(desc); } public static <E> CollectionWriter<E> collections(int dl, int rl, ParquetValueWriter<E> writer) { return new CollectionWriter<>(dl, rl, writer); } public static <K, V> MapWriter<K, V> maps(int dl, int rl, ParquetValueWriter<K> keyWriter, ParquetValueWriter<V> valueWriter) { return new MapWriter<>(dl, rl, keyWriter, valueWriter); } public abstract static class PrimitiveWriter<T> implements ParquetValueWriter<T> { private final ColumnDescriptor desc; protected final ColumnWriter<T> column; private final List<TripleWriter<?>> children; protected PrimitiveWriter(ColumnDescriptor desc) { this.desc = desc; this.column = ColumnWriter.newWriter(desc); this.children = ImmutableList.of(column); } @Override public void write(int repetitionLevel, T value) { column.write(repetitionLevel, value); } @Override public List<TripleWriter<?>> columns() { return children; } @Override public void setColumnStore(ColumnWriteStore columnStore) { this.column.setColumnStore(columnStore); } } private static class UnboxedWriter<T> extends PrimitiveWriter<T> { private UnboxedWriter(ColumnDescriptor desc) { super(desc); } public void writeBoolean(int repetitionLevel, boolean value) { column.writeBoolean(repetitionLevel, value); } public void writeInteger(int repetitionLevel, int value) { column.writeInteger(repetitionLevel, value); } public void writeLong(int repetitionLevel, long value) { column.writeLong(repetitionLevel, value); } public void writeFloat(int repetitionLevel, float value) { column.writeFloat(repetitionLevel, value); } public void writeDouble(int repetitionLevel, double value) { column.writeDouble(repetitionLevel, value); } } private static class IntegerDecimalWriter extends PrimitiveWriter<BigDecimal> { private final int precision; private final int scale; private IntegerDecimalWriter(ColumnDescriptor desc, int precision, int scale) { super(desc); this.precision = precision; this.scale = scale; } @Override public void write(int repetitionLevel, BigDecimal decimal) { Preconditions.checkArgument(decimal.scale() == scale, "Cannot write value as decimal(%s,%s), wrong scale: %s", precision, scale, decimal); Preconditions.checkArgument(decimal.precision() <= precision, "Cannot write value as decimal(%s,%s), too large: %s", precision, scale, decimal); column.writeInteger(repetitionLevel, decimal.unscaledValue().intValue()); } } private static class LongDecimalWriter extends PrimitiveWriter<BigDecimal> { private final int precision; private final int scale; private LongDecimalWriter(ColumnDescriptor desc, int precision, int scale) { super(desc); this.precision = precision; this.scale = scale; } @Override public void write(int repetitionLevel, BigDecimal decimal) { Preconditions.checkArgument(decimal.scale() == scale, "Cannot write value as decimal(%s,%s), wrong scale: %s", precision, scale, decimal); Preconditions.checkArgument(decimal.precision() <= precision, "Cannot write value as decimal(%s,%s), too large: %s", precision, scale, decimal); column.writeLong(repetitionLevel, decimal.unscaledValue().longValue()); } } private static class FixedDecimalWriter extends PrimitiveWriter<BigDecimal> { private final int precision; private final int scale; private final int length; private final ThreadLocal<byte[]> bytes; private FixedDecimalWriter(ColumnDescriptor desc, int precision, int scale) { super(desc); this.precision = precision; this.scale = scale; this.length = TypeUtil.decimalRequriedBytes(precision); this.bytes = ThreadLocal.withInitial(() -> new byte[length]); } @Override public void write(int repetitionLevel, BigDecimal decimal) { Preconditions.checkArgument(decimal.scale() == scale, "Cannot write value as decimal(%s,%s), wrong scale: %s", precision, scale, decimal); Preconditions.checkArgument(decimal.precision() <= precision, "Cannot write value as decimal(%s,%s), too large: %s", precision, scale, decimal); byte fillByte = (byte) (decimal.signum() < 0 ? 0xFF : 0x00); byte[] unscaled = decimal.unscaledValue().toByteArray(); byte[] buf = bytes.get(); int offset = length - unscaled.length; for (int i = 0; i < length; i += 1) { if (i < offset) { buf[i] = fillByte; } else { buf[i] = unscaled[i - offset]; } } column.writeBinary(repetitionLevel, Binary.fromReusedByteArray(buf)); } } private static class BytesWriter extends PrimitiveWriter<ByteBuffer> { private BytesWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, ByteBuffer buffer) { column.writeBinary(repetitionLevel, Binary.fromReusedByteBuffer(buffer)); } } private static class StringWriter extends PrimitiveWriter<CharSequence> { private StringWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, CharSequence value) { if (value instanceof Utf8) { Utf8 utf8 = (Utf8) value; column.writeBinary(repetitionLevel, Binary.fromReusedByteArray(utf8.getBytes(), 0, utf8.getByteLength())); } else { column.writeBinary(repetitionLevel, Binary.fromString(value.toString())); } } } static class OptionWriter<T> implements ParquetValueWriter<T> { private final int definitionLevel; private final ParquetValueWriter<T> writer; private final List<TripleWriter<?>> children; OptionWriter(int definitionLevel, ParquetValueWriter<T> writer) { this.definitionLevel = definitionLevel; this.writer = writer; this.children = writer.columns(); } @Override public void write(int repetitionLevel, T value) { if (value != null) { writer.write(repetitionLevel, value); } else { for (TripleWriter<?> column : children) { column.writeNull(repetitionLevel, definitionLevel - 1); } } } @Override public List<TripleWriter<?>> columns() { return children; } @Override public void setColumnStore(ColumnWriteStore columnStore) { writer.setColumnStore(columnStore); } } public abstract static class RepeatedWriter<L, E> implements ParquetValueWriter<L> { private final int definitionLevel; private final int repetitionLevel; private final ParquetValueWriter<E> writer; private final List<TripleWriter<?>> children; protected RepeatedWriter(int definitionLevel, int repetitionLevel, ParquetValueWriter<E> writer) { this.definitionLevel = definitionLevel; this.repetitionLevel = repetitionLevel; this.writer = writer; this.children = writer.columns(); } @Override public void write(int parentRepetition, L value) { Iterator<E> elements = elements(value); if (!elements.hasNext()) { // write the empty list to each column // TODO: make sure this definition level is correct for (TripleWriter<?> column : children) { column.writeNull(parentRepetition, definitionLevel - 1); } } else { boolean first = true; while (elements.hasNext()) { E element = elements.next(); int rl = repetitionLevel; if (first) { rl = parentRepetition; first = false; } writer.write(rl, element); } } } @Override public List<TripleWriter<?>> columns() { return children; } @Override public void setColumnStore(ColumnWriteStore columnStore) { writer.setColumnStore(columnStore); } protected abstract Iterator<E> elements(L value); } private static class CollectionWriter<E> extends RepeatedWriter<Collection<E>, E> { private CollectionWriter(int definitionLevel, int repetitionLevel, ParquetValueWriter<E> writer) { super(definitionLevel, repetitionLevel, writer); } @Override protected Iterator<E> elements(Collection<E> list) { return list.iterator(); } } public abstract static class RepeatedKeyValueWriter<M, K, V> implements ParquetValueWriter<M> { private final int definitionLevel; private final int repetitionLevel; private final ParquetValueWriter<K> keyWriter; private final ParquetValueWriter<V> valueWriter; private final List<TripleWriter<?>> children; protected RepeatedKeyValueWriter(int definitionLevel, int repetitionLevel, ParquetValueWriter<K> keyWriter, ParquetValueWriter<V> valueWriter) { this.definitionLevel = definitionLevel; this.repetitionLevel = repetitionLevel; this.keyWriter = keyWriter; this.valueWriter= valueWriter; this.children = ImmutableList.<TripleWriter<?>>builder() .addAll(keyWriter.columns()) .addAll(valueWriter.columns()) .build(); } @Override public void write(int parentRepetition, M value) { Iterator<Map.Entry<K, V>> pairs = pairs(value); if (!pairs.hasNext()) { // write the empty map to each column for (TripleWriter<?> column : children) { column.writeNull(parentRepetition, definitionLevel - 1); } } else { boolean first = true; while (pairs.hasNext()) { Map.Entry<K, V> pair = pairs.next(); int rl = repetitionLevel; if (first) { rl = parentRepetition; first = false; } keyWriter.write(rl, pair.getKey()); valueWriter.write(rl, pair.getValue()); } } } @Override public List<TripleWriter<?>> columns() { return children; } @Override public void setColumnStore(ColumnWriteStore columnStore) { keyWriter.setColumnStore(columnStore); valueWriter.setColumnStore(columnStore); } protected abstract Iterator<Map.Entry<K, V>> pairs(M value); } private static class MapWriter<K, V> extends RepeatedKeyValueWriter<Map<K, V>, K, V> { private MapWriter(int definitionLevel, int repetitionLevel, ParquetValueWriter<K> keyWriter, ParquetValueWriter<V> valueWriter) { super(definitionLevel, repetitionLevel, keyWriter, valueWriter); } @Override protected Iterator<Map.Entry<K, V>> pairs(Map<K, V> map) { return map.entrySet().iterator(); } } public abstract static class StructWriter<S> implements ParquetValueWriter<S> { private final ParquetValueWriter<Object>[] writers; private final List<TripleWriter<?>> children; @SuppressWarnings("unchecked") protected StructWriter(List<ParquetValueWriter<?>> writers) { this.writers = (ParquetValueWriter<Object>[]) Array.newInstance( ParquetValueWriter.class, writers.size()); ImmutableList.Builder<TripleWriter<?>> columnsBuilder = ImmutableList.builder(); for (int i = 0; i < writers.size(); i += 1) { ParquetValueWriter<?> writer = writers.get(i); this.writers[i] = (ParquetValueWriter<Object>) writer; columnsBuilder.addAll(writer.columns()); } this.children = columnsBuilder.build(); } @Override public void write(int repetitionLevel, S value) { for (int i = 0; i < writers.length; i += 1) { Object fieldValue = get(value, i); writers[i].write(repetitionLevel, fieldValue); } } @Override public List<TripleWriter<?>> columns() { return children; } @Override public void setColumnStore(ColumnWriteStore columnStore) { for (ParquetValueWriter<?> writer : writers) { writer.setColumnStore(columnStore); } } protected abstract Object get(S struct, int index); } }
6,525
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetIterable.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.io.CloseableGroup; import com.netflix.iceberg.io.CloseableIterable; import org.apache.parquet.hadoop.ParquetReader; import java.io.Closeable; import java.io.IOException; import java.util.Iterator; import java.util.NoSuchElementException; public class ParquetIterable<T> extends CloseableGroup implements CloseableIterable<T> { private final ParquetReader.Builder<T> builder; ParquetIterable(ParquetReader.Builder<T> builder) { this.builder = builder; } @Override public Iterator<T> iterator() { try { ParquetReader<T> reader = builder.build(); addCloseable(reader); return new ParquetIterator<>(reader); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to create Parquet reader"); } } private static class ParquetIterator<T> implements Iterator<T>, Closeable { private final ParquetReader<T> parquet; private boolean needsAdvance = false; private boolean hasNext = false; private T next = null; ParquetIterator(ParquetReader<T> parquet) { this.parquet = parquet; this.next = advance(); } @Override public boolean hasNext() { if (needsAdvance) { this.next = advance(); } return hasNext; } @Override public T next() { if (!hasNext()) { throw new NoSuchElementException(); } this.needsAdvance = true; return next; } private T advance() { // this must be called in hasNext because it reuses an UnsafeRow try { T next = parquet.read(); this.needsAdvance = false; this.hasNext = (next != null); return next; } catch (IOException e) { throw new RuntimeIOException(e); } } @Override public void remove() { throw new UnsupportedOperationException("Remove is not supported"); } @Override public void close() throws IOException { parquet.close(); } } }
6,526
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetAvro.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.netflix.iceberg.avro.AvroSchemaVisitor; import com.netflix.iceberg.avro.UUIDConversion; import com.netflix.iceberg.types.TypeUtil; import org.apache.avro.Conversion; import org.apache.avro.Conversions; import org.apache.avro.LogicalType; import org.apache.avro.LogicalTypes; import org.apache.avro.Schema; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericFixed; import org.apache.avro.specific.SpecificData; import java.math.BigDecimal; import java.util.List; import java.util.Map; class ParquetAvro { static Schema parquetAvroSchema(Schema avroSchema) { return AvroSchemaVisitor.visit(avroSchema, new ParquetDecimalSchemaConverter()); } static class ParquetDecimal extends LogicalType { private static final String NAME = "parquet-decimal"; private int precision; private int scale; ParquetDecimal(int precision, int scale) { super(NAME); this.precision = precision; this.scale = scale; } @Override public String getName() { return NAME; } int precision() { return precision; } int scale() { return scale; } @Override public Schema addToSchema(Schema schema) { super.addToSchema(schema); schema.addProp("precision", String.valueOf(precision)); schema.addProp("scale", String.valueOf(scale)); return schema; } @Override public void validate(Schema schema) { super.validate(schema); switch (schema.getType()) { case INT: Preconditions.checkArgument(precision <= 9, "Int cannot hold decimal precision: %s", precision); break; case LONG: Preconditions.checkArgument(precision <= 18, "Long cannot hold decimal precision: %s", precision); case FIXED: break; default: throw new IllegalArgumentException("Invalid base type for decimal: " + schema); } Preconditions.checkArgument(scale >= 0, "Scale %s cannot be negative", scale); Preconditions.checkArgument(scale <= precision, "Scale %s cannot be less than precision %s", scale, precision); } } static { LogicalTypes.register(ParquetDecimal.NAME, schema -> { int precision = Integer.parseInt(schema.getProp("precision")); int scale = Integer.parseInt(schema.getProp("scale")); return new ParquetDecimal(precision, scale); }); } private static class IntDecimalConversion extends Conversion<BigDecimal> { @Override public Class<BigDecimal> getConvertedType() { return BigDecimal.class; } @Override public String getLogicalTypeName() { return ParquetDecimal.NAME; } @Override public BigDecimal fromInt(Integer value, org.apache.avro.Schema schema, LogicalType type) { return BigDecimal.valueOf(value, ((ParquetDecimal) type).scale()); } @Override public Integer toInt(BigDecimal value, org.apache.avro.Schema schema, LogicalType type) { return value.unscaledValue().intValue(); } } private static class LongDecimalConversion extends Conversion<BigDecimal> { @Override public Class<BigDecimal> getConvertedType() { return BigDecimal.class; } @Override public String getLogicalTypeName() { return ParquetDecimal.NAME; } @Override public BigDecimal fromLong(Long value, org.apache.avro.Schema schema, LogicalType type) { return BigDecimal.valueOf(value, ((ParquetDecimal) type).scale()); } @Override public Long toLong(BigDecimal value, org.apache.avro.Schema schema, LogicalType type) { return value.unscaledValue().longValue(); } } private static class FixedDecimalConversion extends Conversions.DecimalConversion { private final LogicalType[] decimalsByScale = new LogicalType[39]; private FixedDecimalConversion() { for (int i = 0; i < decimalsByScale.length; i += 1) { decimalsByScale[i] = LogicalTypes.decimal(i, i); } } @Override public String getLogicalTypeName() { return ParquetDecimal.NAME; } @Override public BigDecimal fromFixed(GenericFixed value, Schema schema, LogicalType type) { return super.fromFixed(value, schema, decimalsByScale[((ParquetDecimal) type).scale()]); } @Override public GenericFixed toFixed(BigDecimal value, Schema schema, LogicalType type) { return super.toFixed(value, schema, decimalsByScale[((ParquetDecimal) type).scale()]); } } static GenericData DEFAULT_MODEL = new SpecificData() { private final Conversion<?> fixedDecimalConversion = new FixedDecimalConversion(); private final Conversion<?> intDecimalConversion = new IntDecimalConversion(); private final Conversion<?> longDecimalConversion = new LongDecimalConversion(); private final Conversion<?> uuidConversion = new UUIDConversion(); { addLogicalTypeConversion(fixedDecimalConversion); addLogicalTypeConversion(uuidConversion); } @Override @SuppressWarnings("unchecked") public <T> Conversion<T> getConversionByClass(Class<T> datumClass, LogicalType logicalType) { if (logicalType == null) { return null; } if (logicalType instanceof ParquetDecimal) { ParquetDecimal decimal = (ParquetDecimal) logicalType; if (decimal.precision() <= 9) { return (Conversion<T>) intDecimalConversion; } else if (decimal.precision() <= 18) { return (Conversion<T>) longDecimalConversion; } else { return (Conversion<T>) fixedDecimalConversion; } } else if ("uuid".equals(logicalType.getName())) { return (Conversion<T>) uuidConversion; } return super.getConversionByClass(datumClass, logicalType); } @Override @SuppressWarnings("unchecked") public Conversion<Object> getConversionFor(LogicalType logicalType) { if (logicalType == null) { return null; } if (logicalType instanceof LogicalTypes.Decimal) { LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; if (decimal.getPrecision() <= 9) { return (Conversion<Object>) intDecimalConversion; } else if (decimal.getPrecision() <= 18) { return (Conversion<Object>) longDecimalConversion; } else { return (Conversion<Object>) fixedDecimalConversion; } } else if ("uuid".equals(logicalType.getName())) { return (Conversion<Object>) uuidConversion; } return super.getConversionFor(logicalType); } }; private static class ParquetDecimalSchemaConverter extends AvroSchemaVisitor<Schema> { @Override public Schema record(Schema record, List<String> names, List<Schema> types) { List<Schema.Field> fields = record.getFields(); int length = fields.size(); boolean hasChange = false; if (length != types.size()) { hasChange = true; } List<Schema.Field> newFields = Lists.newArrayListWithExpectedSize(length); for (int i = 0; i < length; i += 1) { Schema.Field field = fields.get(i); Schema type = types.get(i); newFields.add(copyField(field, type)); if (field.schema() != type) { hasChange = true; } } if (hasChange) { return copyRecord(record, newFields); } return record; } @Override public Schema union(Schema union, List<Schema> options) { if (!isIdentical(union.getTypes(), options)) { return Schema.createUnion(options); } return union; } @Override public Schema array(Schema array, Schema element) { if (array.getElementType() != element) { return Schema.createArray(element); } return array; } @Override public Schema map(Schema map, Schema value) { if (map.getValueType() != value) { return Schema.createMap(value); } return map; } @Override public Schema primitive(Schema primitive) { LogicalType logicalType = primitive.getLogicalType(); if (logicalType != null && logicalType instanceof LogicalTypes.Decimal) { LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; if (decimal.getPrecision() <= 9) { return new ParquetDecimal(decimal.getPrecision(), decimal.getScale()) .addToSchema(Schema.create(Schema.Type.INT)); } else if (decimal.getPrecision() <= 18) { return new ParquetDecimal(decimal.getPrecision(), decimal.getScale()) .addToSchema(Schema.create(Schema.Type.LONG)); } else { return new ParquetDecimal(decimal.getPrecision(), decimal.getScale()) .addToSchema(Schema.createFixed(primitive.getName(), null, null, TypeUtil.decimalRequriedBytes(decimal.getPrecision()))); } } return primitive; } private boolean isIdentical(List<Schema> types, List<Schema> replacements) { if (types.size() != replacements.size()) { return false; } int length = types.size(); for (int i = 0; i < length; i += 1) { if (types.get(i) != replacements.get(i)) { return false; } } return true; } private static Schema copyRecord(Schema record, List<Schema.Field> newFields) { Schema copy = Schema.createRecord(record.getName(), record.getDoc(), record.getNamespace(), record.isError(), newFields); for (Map.Entry<String, Object> prop : record.getObjectProps().entrySet()) { copy.addProp(prop.getKey(), prop.getValue()); } return copy; } private static Schema.Field copyField(Schema.Field field, Schema newSchema) { Schema.Field copy = new Schema.Field(field.name(), newSchema, field.doc(), field.defaultVal(), field.order()); for (Map.Entry<String, Object> prop : field.getObjectProps().entrySet()) { copy.addProp(prop.getKey(), prop.getValue()); } return copy; } } }
6,527
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ColumnIterator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.Dictionary; import org.apache.parquet.column.page.DataPage; import org.apache.parquet.column.page.DictionaryPage; import org.apache.parquet.column.page.PageReader; import org.apache.parquet.io.ParquetDecodingException; import org.apache.parquet.io.api.Binary; import java.io.IOException; public abstract class ColumnIterator<T> implements TripleIterator<T> { @SuppressWarnings("unchecked") static <T> ColumnIterator<T> newIterator(ColumnDescriptor desc, String writerVersion) { switch (desc.getType()) { case BOOLEAN: return (ColumnIterator<T>) new ColumnIterator<Boolean>(desc, writerVersion) { @Override public Boolean next() { return nextBoolean(); } }; case INT32: return (ColumnIterator<T>) new ColumnIterator<Integer>(desc, writerVersion) { @Override public Integer next() { return nextInteger(); } }; case INT64: return (ColumnIterator<T>) new ColumnIterator<Long>(desc, writerVersion) { @Override public Long next() { return nextLong(); } }; case FLOAT: return (ColumnIterator<T>) new ColumnIterator<Float>(desc, writerVersion) { @Override public Float next() { return nextFloat(); } }; case DOUBLE: return (ColumnIterator<T>) new ColumnIterator<Double>(desc, writerVersion) { @Override public Double next() { return nextDouble(); } }; case FIXED_LEN_BYTE_ARRAY: case BINARY: return (ColumnIterator<T>) new ColumnIterator<Binary>(desc, writerVersion) { @Override public Binary next() { return nextBinary(); } }; default: throw new UnsupportedOperationException("Unsupported primitive type: " + desc.getType()); } } private final ColumnDescriptor desc; private final PageIterator<T> pageIterator; // state reset for each row group private PageReader pageSource = null; private long triplesCount = 0L; private long triplesRead = 0L; private long advanceNextPageCount = 0L; private ColumnIterator(ColumnDescriptor desc, String writerVersion) { this.desc = desc; this.pageIterator = PageIterator.newIterator(desc, writerVersion); } public void setPageSource(PageReader source) { this.pageSource = source; this.triplesCount = source.getTotalValueCount(); this.triplesRead = 0L; this.advanceNextPageCount = 0L; this.pageIterator.reset(); this.pageIterator.setDictionary(readDictionary(desc, pageSource)); advance(); } private void advance() { if (triplesRead >= advanceNextPageCount) { while (!pageIterator.hasNext()) { DataPage page = pageSource.readPage(); if (page != null) { pageIterator.setPage(page); this.advanceNextPageCount += pageIterator.currentPageCount(); } else { return; } } } } @Override public boolean hasNext() { return triplesRead < triplesCount; } @Override public int currentDefinitionLevel() { advance(); return pageIterator.currentDefinitionLevel(); } @Override public int currentRepetitionLevel() { advance(); return pageIterator.currentRepetitionLevel(); } @Override public boolean nextBoolean() { this.triplesRead += 1; advance(); return pageIterator.nextBoolean(); } @Override public int nextInteger() { this.triplesRead += 1; advance(); return pageIterator.nextInteger(); } @Override public long nextLong() { this.triplesRead += 1; advance(); return pageIterator.nextLong(); } @Override public float nextFloat() { this.triplesRead += 1; advance(); return pageIterator.nextFloat(); } @Override public double nextDouble() { this.triplesRead += 1; advance(); return pageIterator.nextDouble(); } @Override public Binary nextBinary() { this.triplesRead += 1; advance(); return pageIterator.nextBinary(); } @Override public <N> N nextNull() { this.triplesRead += 1; advance(); return pageIterator.nextNull(); } private static Dictionary readDictionary(ColumnDescriptor desc, PageReader pageSource) { DictionaryPage dictionaryPage = pageSource.readDictionaryPage(); if (dictionaryPage != null) { try { return dictionaryPage.getEncoding().initDictionary(desc, dictionaryPage); // if (converter.hasDictionarySupport()) { // converter.setDictionary(dictionary); // } } catch (IOException e) { throw new ParquetDecodingException("could not decode the dictionary for " + desc, e); } } return null; } }
6,528
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetValueWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import org.apache.parquet.column.ColumnWriteStore; import java.util.List; public interface ParquetValueWriter<T> { void write(int repetitionLevel, T value); List<TripleWriter<?>> columns(); void setColumnStore(ColumnWriteStore columnStore); }
6,529
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetWriteSupport.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; import org.apache.parquet.hadoop.api.WriteSupport; import org.apache.parquet.io.api.RecordConsumer; import org.apache.parquet.schema.MessageType; import java.util.Map; class ParquetWriteSupport<T> extends WriteSupport<T> { private final MessageType type; private final Map<String, String> keyValueMetadata; private final WriteSupport<T> wrapped; ParquetWriteSupport(MessageType type, Map<String, String> keyValueMetadata, WriteSupport<T> writeSupport) { this.type = type; this.keyValueMetadata = keyValueMetadata; this.wrapped = writeSupport; } @Override public WriteContext init(Configuration configuration) { WriteContext wrappedContext = wrapped.init(configuration); Map<String, String> metadata = ImmutableMap.<String, String>builder() .putAll(keyValueMetadata) .putAll(wrappedContext.getExtraMetaData()) .build(); return new WriteContext(type, metadata); } @Override public String getName() { return "Iceberg/" + wrapped.getName(); } @Override public void prepareForWrite(RecordConsumer recordConsumer) { wrapped.prepareForWrite(recordConsumer); } @Override public void write(T t) { wrapped.write(t); } @Override public FinalizedWriteContext finalizeWrite() { return wrapped.finalizeWrite(); } }
6,530
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetMetrics.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.Schema; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.types.Conversions; import com.netflix.iceberg.types.Types; import org.apache.parquet.column.statistics.Statistics; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; import org.apache.parquet.hadoop.metadata.ParquetMetadata; import org.apache.parquet.schema.MessageType; import java.io.IOException; import java.io.Serializable; import java.nio.ByteBuffer; import java.util.List; import java.util.Map; import java.util.Set; import static com.netflix.iceberg.parquet.ParquetConversions.fromParquetPrimitive; public class ParquetMetrics implements Serializable { private ParquetMetrics() { } public static Metrics fromInputFile(InputFile file) { try (ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(file))) { return fromMetadata(reader.getFooter()); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to read footer of file: %s", file); } } public static Metrics fromMetadata(ParquetMetadata metadata) { long rowCount = 0; Map<Integer, Long> columnSizes = Maps.newHashMap(); Map<Integer, Long> valueCounts = Maps.newHashMap(); Map<Integer, Long> nullValueCounts = Maps.newHashMap(); Map<Integer, Literal<?>> lowerBounds = Maps.newHashMap(); Map<Integer, Literal<?>> upperBounds = Maps.newHashMap(); Set<Integer> missingStats = Sets.newHashSet(); MessageType parquetType = metadata.getFileMetaData().getSchema(); Schema fileSchema = ParquetSchemaUtil.convert(parquetType); List<BlockMetaData> blocks = metadata.getBlocks(); for (BlockMetaData block : blocks) { rowCount += block.getRowCount(); for (ColumnChunkMetaData column : block.getColumns()) { int fieldId = fileSchema.aliasToId(column.getPath().toDotString()); increment(columnSizes, fieldId, column.getTotalSize()); increment(valueCounts, fieldId, column.getValueCount()); Statistics stats = column.getStatistics(); if (stats == null) { missingStats.add(fieldId); } else if (!stats.isEmpty()) { increment(nullValueCounts, fieldId, stats.getNumNulls()); // only add min/max stats for top-level fields // TODO: allow struct nesting, but not maps or arrays Types.NestedField field = fileSchema.asStruct().field(fieldId); if (field != null && stats.hasNonNullValue()) { updateMin(lowerBounds, fieldId, fromParquetPrimitive(field.type(), stats.genericGetMin())); updateMax(upperBounds, fieldId, fromParquetPrimitive(field.type(), stats.genericGetMax())); } } } } // discard accumulated values if any stats were missing for (Integer fieldId : missingStats) { nullValueCounts.remove(fieldId); lowerBounds.remove(fieldId); upperBounds.remove(fieldId); } return new Metrics(rowCount, columnSizes, valueCounts, nullValueCounts, toBufferMap(fileSchema, lowerBounds), toBufferMap(fileSchema, upperBounds)); } private static void increment(Map<Integer, Long> columns, int fieldId, long amount) { if (columns != null) { if (columns.containsKey(fieldId)) { columns.put(fieldId, columns.get(fieldId) + amount); } else { columns.put(fieldId, amount); } } } @SuppressWarnings("unchecked") private static <T> void updateMin(Map<Integer, Literal<?>> lowerBounds, int id, Literal<T> min) { Literal<T> currentMin = (Literal<T>) lowerBounds.get(id); if (currentMin == null || min.comparator().compare(min.value(), currentMin.value()) < 0) { lowerBounds.put(id, min); } } @SuppressWarnings("unchecked") private static <T> void updateMax(Map<Integer, Literal<?>> upperBounds, int id, Literal<T> max) { Literal<T> currentMax = (Literal<T>) upperBounds.get(id); if (currentMax == null || max.comparator().compare(max.value(), currentMax.value()) > 0) { upperBounds.put(id, max); } } private static Map<Integer, ByteBuffer> toBufferMap(Schema schema, Map<Integer, Literal<?>> map) { Map<Integer, ByteBuffer> bufferMap = Maps.newHashMap(); for (Map.Entry<Integer, Literal<?>> entry : map.entrySet()) { bufferMap.put(entry.getKey(), Conversions.toByteBuffer(schema.findType(entry.getKey()), entry.getValue().value())); } return bufferMap; } }
6,531
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetValueReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import org.apache.parquet.column.page.PageReadStore; import java.util.List; public interface ParquetValueReader<T> { T read(T reuse); TripleIterator<?> column(); List<TripleIterator<?>> columns(); void setPageSource(PageReadStore pageStore); }
6,532
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetReadSupport.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.AvroSchemaUtil; import org.apache.hadoop.conf.Configuration; import org.apache.parquet.avro.AvroReadSupport; import org.apache.parquet.hadoop.api.InitContext; import org.apache.parquet.hadoop.api.ReadSupport; import org.apache.parquet.io.api.RecordMaterializer; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.Type; import java.util.List; import java.util.Map; import java.util.Set; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.hasIds; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.pruneColumns; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.pruneColumnsFallback; /** * Parquet {@link ReadSupport} that handles column projection based on {@link Schema} column IDs. * * @param <T> Java type produced by this read support instance */ class ParquetReadSupport<T> extends ReadSupport<T> { private final Schema expectedSchema; private final ReadSupport<T> wrapped; private final boolean callInit; ParquetReadSupport(Schema expectedSchema, ReadSupport<T> readSupport, boolean callInit) { this.expectedSchema = expectedSchema; this.wrapped = readSupport; this.callInit = callInit; } @Override @SuppressWarnings("deprecation") public ReadContext init(Configuration configuration, Map<String, String> keyValueMetaData, MessageType fileSchema) { // Columns are selected from the Parquet file by taking the read context's message type and // matching to the file's columns by full path, so this must select columns by using the path // in the file's schema. MessageType projection = hasIds(fileSchema) ? pruneColumns(fileSchema, expectedSchema) : pruneColumnsFallback(fileSchema, expectedSchema); // override some known backward-compatibility options configuration.set("parquet.strict.typing", "false"); configuration.set("parquet.avro.add-list-element-records", "false"); configuration.set("parquet.avro.write-old-list-structure", "false"); // set Avro schemas in case the reader is Avro AvroReadSupport.setRequestedProjection(configuration, AvroSchemaUtil.convert(expectedSchema, projection.getName())); org.apache.avro.Schema avroReadSchema = AvroSchemaUtil.buildAvroProjection( AvroSchemaUtil.convert(ParquetSchemaUtil.convert(projection), projection.getName()), expectedSchema, ImmutableMap.of()); AvroReadSupport.setAvroReadSchema(configuration, ParquetAvro.parquetAvroSchema(avroReadSchema)); // let the context set up read support metadata, but always use the correct projection ReadContext context = null; if (callInit) { try { context = wrapped.init(configuration, keyValueMetaData, projection); } catch (UnsupportedOperationException e) { // try the InitContext version context = wrapped.init(new InitContext( configuration, makeMultimap(keyValueMetaData), projection)); } } return new ReadContext(projection, context != null ? context.getReadSupportMetadata() : ImmutableMap.of()); } @Override public RecordMaterializer<T> prepareForRead(Configuration configuration, Map<String, String> fileMetadata, MessageType fileMessageType, ReadContext readContext) { // This is the type created in init that was based on the file's schema. The schema that this // will pass to the wrapped ReadSupport needs to match the expected schema's names. Rather than // renaming the file's schema, convert the expected schema to Parquet. This relies on writing // files with the correct schema. // TODO: this breaks when columns are reordered. MessageType readSchema = ParquetSchemaUtil.convert(expectedSchema, fileMessageType.getName()); return wrapped.prepareForRead(configuration, fileMetadata, readSchema, readContext); } private Map<String, Set<String>> makeMultimap(Map<String, String> map) { ImmutableMap.Builder<String, Set<String>> builder = ImmutableMap.builder(); for (Map.Entry<String, String> entry : map.entrySet()) { builder.put(entry.getKey(), Sets.newHashSet(entry.getValue())); } return builder.build(); } }
6,533
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/TypeWithSchemaVisitor.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.netflix.iceberg.types.Types; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.OriginalType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import java.util.LinkedList; import java.util.List; import static org.apache.parquet.schema.Type.Repetition.REPEATED; /** * Visitor for traversing a Parquet type with a companion Iceberg type. * * @param <T> the Java class returned by the visitor */ public class TypeWithSchemaVisitor<T> { protected LinkedList<String> fieldNames = Lists.newLinkedList(); public static <T> T visit(com.netflix.iceberg.types.Type iType, Type type, TypeWithSchemaVisitor<T> visitor) { if (type instanceof MessageType) { Types.StructType struct = iType != null ? iType.asStructType() : null; return visitor.message(struct, (MessageType) type, visitFields(struct, type.asGroupType(), visitor)); } else if (type.isPrimitive()) { com.netflix.iceberg.types.Type.PrimitiveType iPrimitive = iType != null ? iType.asPrimitiveType() : null; return visitor.primitive(iPrimitive, type.asPrimitiveType()); } else { // if not a primitive, the typeId must be a group GroupType group = type.asGroupType(); OriginalType annotation = group.getOriginalType(); if (annotation != null) { switch (annotation) { case LIST: Preconditions.checkArgument(!group.isRepetition(REPEATED), "Invalid list: top-level group is repeated: " + group); Preconditions.checkArgument(group.getFieldCount() == 1, "Invalid list: does not contain single repeated field: " + group); GroupType repeatedElement = group.getFields().get(0).asGroupType(); Preconditions.checkArgument(repeatedElement.isRepetition(REPEATED), "Invalid list: inner group is not repeated"); Preconditions.checkArgument(repeatedElement.getFieldCount() <= 1, "Invalid list: repeated group is not a single field: " + group); Types.ListType list = null; Types.NestedField element = null; if (iType != null) { list = iType.asListType(); element = list.fields().get(0); } visitor.fieldNames.push(repeatedElement.getName()); try { T elementResult = null; if (repeatedElement.getFieldCount() > 0) { elementResult = visitField(element, repeatedElement.getType(0), visitor); } return visitor.list(list, group, elementResult); } finally { visitor.fieldNames.pop(); } case MAP: Preconditions.checkArgument(!group.isRepetition(REPEATED), "Invalid map: top-level group is repeated: " + group); Preconditions.checkArgument(group.getFieldCount() == 1, "Invalid map: does not contain single repeated field: " + group); GroupType repeatedKeyValue = group.getType(0).asGroupType(); Preconditions.checkArgument(repeatedKeyValue.isRepetition(REPEATED), "Invalid map: inner group is not repeated"); Preconditions.checkArgument(repeatedKeyValue.getFieldCount() <= 2, "Invalid map: repeated group does not have 2 fields"); Types.MapType map = null; Types.NestedField keyField = null; Types.NestedField valueField = null; if (iType != null) { map = iType.asMapType(); keyField = map.fields().get(0); valueField = map.fields().get(1); } visitor.fieldNames.push(repeatedKeyValue.getName()); try { T keyResult = null; T valueResult = null; switch (repeatedKeyValue.getFieldCount()) { case 2: // if there are 2 fields, both key and value are projected keyResult = visitField(keyField, repeatedKeyValue.getType(0), visitor); valueResult = visitField(valueField, repeatedKeyValue.getType(1), visitor); case 1: // if there is just one, use the name to determine what it is Type keyOrValue = repeatedKeyValue.getType(0); if (keyOrValue.getName().equalsIgnoreCase("key")) { keyResult = visitField(keyField, keyOrValue, visitor); // value result remains null } else { valueResult = visitField(valueField, keyOrValue, visitor); // key result remains null } default: // both results will remain null } return visitor.map(map, group, keyResult, valueResult); } finally { visitor.fieldNames.pop(); } default: } } Types.StructType struct = iType != null ? iType.asStructType() : null; return visitor.struct(struct, group, visitFields(struct, group, visitor)); } } private static <T> T visitField(Types.NestedField iField, Type field, TypeWithSchemaVisitor<T> visitor) { visitor.fieldNames.push(field.getName()); try { return visit(iField != null ? iField.type() : null, field, visitor); } finally { visitor.fieldNames.pop(); } } private static <T> List<T> visitFields(Types.StructType struct, GroupType group, TypeWithSchemaVisitor<T> visitor) { List<T> results = Lists.newArrayListWithExpectedSize(group.getFieldCount()); for (Type field : group.getFields()) { int id = -1; if (field.getId() != null) { id = field.getId().intValue(); } Types.NestedField iField = (struct != null && id >= 0) ? struct.field(id) : null; results.add(visitField(iField, field, visitor)); } return results; } public T message(Types.StructType iStruct, MessageType message, List<T> fields) { return null; } public T struct(Types.StructType iStruct, GroupType struct, List<T> fields) { return null; } public T list(Types.ListType iList, GroupType array, T element) { return null; } public T map(Types.MapType iMap, GroupType map, T key, T value) { return null; } public T primitive(com.netflix.iceberg.types.Type.PrimitiveType iPrimitive, PrimitiveType primitive) { return null; } }
6,534
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetTypeVisitor.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.OriginalType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import java.util.LinkedList; import java.util.List; import static org.apache.parquet.schema.Type.Repetition.REPEATED; public class ParquetTypeVisitor<T> { protected LinkedList<String> fieldNames = Lists.newLinkedList(); public static <T> T visit(Type type, ParquetTypeVisitor<T> visitor) { if (type instanceof MessageType) { return visitor.message((MessageType) type, visitFields(type.asGroupType(), visitor)); } else if (type.isPrimitive()) { return visitor.primitive(type.asPrimitiveType()); } else { // if not a primitive, the typeId must be a group GroupType group = type.asGroupType(); OriginalType annotation = group.getOriginalType(); if (annotation != null) { switch (annotation) { case LIST: Preconditions.checkArgument(!group.isRepetition(REPEATED), "Invalid list: top-level group is repeated: " + group); Preconditions.checkArgument(group.getFieldCount() == 1, "Invalid list: does not contain single repeated field: " + group); GroupType repeatedElement = group.getFields().get(0).asGroupType(); Preconditions.checkArgument(repeatedElement.isRepetition(REPEATED), "Invalid list: inner group is not repeated"); Preconditions.checkArgument(repeatedElement.getFieldCount() <= 1, "Invalid list: repeated group is not a single field: " + group); visitor.fieldNames.push(repeatedElement.getName()); try { T elementResult = null; if (repeatedElement.getFieldCount() > 0) { elementResult = visitField(repeatedElement.getType(0), visitor); } return visitor.list(group, elementResult); } finally { visitor.fieldNames.pop(); } case MAP: Preconditions.checkArgument(!group.isRepetition(REPEATED), "Invalid map: top-level group is repeated: " + group); Preconditions.checkArgument(group.getFieldCount() == 1, "Invalid map: does not contain single repeated field: " + group); GroupType repeatedKeyValue = group.getType(0).asGroupType(); Preconditions.checkArgument(repeatedKeyValue.isRepetition(REPEATED), "Invalid map: inner group is not repeated"); Preconditions.checkArgument(repeatedKeyValue.getFieldCount() <= 2, "Invalid map: repeated group does not have 2 fields"); visitor.fieldNames.push(repeatedKeyValue.getName()); try { T keyResult = null; T valueResult = null; switch (repeatedKeyValue.getFieldCount()) { case 2: // if there are 2 fields, both key and value are projected keyResult = visitField(repeatedKeyValue.getType(0), visitor); valueResult = visitField(repeatedKeyValue.getType(1), visitor); case 1: // if there is just one, use the name to determine what it is Type keyOrValue = repeatedKeyValue.getType(0); if (keyOrValue.getName().equalsIgnoreCase("key")) { keyResult = visitField(keyOrValue, visitor); // value result remains null } else { valueResult = visitField(keyOrValue, visitor); // key result remains null } default: // both results will remain null } return visitor.map(group, keyResult, valueResult); } finally { visitor.fieldNames.pop(); } default: } } return visitor.struct(group, visitFields(group, visitor)); } } private static <T> T visitField(Type field, ParquetTypeVisitor<T> visitor) { visitor.fieldNames.push(field.getName()); try { return visit(field, visitor); } finally { visitor.fieldNames.pop(); } } private static <T> List<T> visitFields(GroupType group, ParquetTypeVisitor<T> visitor) { List<T> results = Lists.newArrayListWithExpectedSize(group.getFieldCount()); for (Type field : group.getFields()) { results.add(visitField(field, visitor)); } return results; } public T message(MessageType message, List<T> fields) { return null; } public T struct(GroupType struct, List<T> fields) { return null; } public T list(GroupType array, T element) { return null; } public T map(GroupType map, T key, T value) { return null; } public T primitive(PrimitiveType primitive) { return null; } }
6,535
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/TripleIterator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import org.apache.parquet.io.api.Binary; import java.util.Iterator; interface TripleIterator<T> extends Iterator<T> { /** * Returns the definition level from the current triple. * <p> * This method does not advance this iterator. * * @return the definition level of the current triple. * @throws java.util.NoSuchElementException if there are no more elements */ int currentDefinitionLevel(); /** * Returns the repetition level from the current triple or 0 if there are no more elements. * <p> * This method does not advance this iterator. * * @return the repetition level of the current triple, or 0 if there is no current triple. * @throws java.util.NoSuchElementException if there are no more elements */ int currentRepetitionLevel(); /** * Returns the next value as an un-boxed boolean. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return the next value as an un-boxed boolean * @throws java.util.NoSuchElementException if there are no more elements * @throws UnsupportedOperationException if the underlying data values are not booleans */ default boolean nextBoolean() { throw new UnsupportedOperationException("Not a boolean column"); } /** * Returns the next value as an un-boxed int. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return the next value as an un-boxed int * @throws java.util.NoSuchElementException if there are no more elements * @throws UnsupportedOperationException if the underlying data values are not ints */ default int nextInteger() { throw new UnsupportedOperationException("Not an integer column"); } /** * Returns the next value as an un-boxed long. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return the next value as an un-boxed long * @throws java.util.NoSuchElementException if there are no more elements * @throws UnsupportedOperationException if the underlying data values are not longs */ default long nextLong() { throw new UnsupportedOperationException("Not a long column"); } /** * Returns the next value as an un-boxed float. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return the next value as an un-boxed float * @throws java.util.NoSuchElementException if there are no more elements * @throws UnsupportedOperationException if the underlying data values are not floats */ default float nextFloat() { throw new UnsupportedOperationException("Not a float column"); } /** * Returns the next value as an un-boxed double. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return the next value as an un-boxed double * @throws java.util.NoSuchElementException if there are no more elements * @throws UnsupportedOperationException if the underlying data values are not doubles */ default double nextDouble() { throw new UnsupportedOperationException("Not a double column"); } /** * Returns the next value as a Binary. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return the next value as a Binary * @throws java.util.NoSuchElementException if there are no more elements * @throws UnsupportedOperationException if the underlying data values are not binary */ default Binary nextBinary() { throw new UnsupportedOperationException("Not a binary column"); } /** * Returns null and advances the iterator. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return null * @throws java.util.NoSuchElementException if there are no more elements */ <N> N nextNull(); }
6,536
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetAvroWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.Lists; import com.netflix.iceberg.parquet.ParquetValueWriters.PrimitiveWriter; import com.netflix.iceberg.parquet.ParquetValueWriters.StructWriter; import org.apache.avro.generic.GenericData.Fixed; import org.apache.avro.generic.IndexedRecord; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.DecimalMetadata; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import java.util.Iterator; import java.util.List; import static com.netflix.iceberg.parquet.ParquetValueWriters.collections; import static com.netflix.iceberg.parquet.ParquetValueWriters.maps; import static com.netflix.iceberg.parquet.ParquetValueWriters.option; public class ParquetAvroWriter { private ParquetAvroWriter() { } @SuppressWarnings("unchecked") public static <T> ParquetValueWriter<T> buildWriter(MessageType type) { return (ParquetValueWriter<T>) ParquetTypeVisitor.visit(type, new WriteBuilder(type)); } private static class WriteBuilder extends ParquetTypeVisitor<ParquetValueWriter<?>> { private final MessageType type; WriteBuilder(MessageType type) { this.type = type; } @Override public ParquetValueWriter<?> message(MessageType message, List<ParquetValueWriter<?>> fieldWriters) { return struct(message.asGroupType(), fieldWriters); } @Override public ParquetValueWriter<?> struct(GroupType struct, List<ParquetValueWriter<?>> fieldWriters) { List<Type> fields = struct.getFields(); List<ParquetValueWriter<?>> writers = Lists.newArrayListWithExpectedSize(fieldWriters.size()); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = struct.getType(i); int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName())); writers.add(option(fieldType, fieldD, fieldWriters.get(i))); } return new RecordWriter(writers); } @Override public ParquetValueWriter<?> list(GroupType array, ParquetValueWriter<?> elementWriter) { GroupType repeated = array.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath); int repeatedR = type.getMaxRepetitionLevel(repeatedPath); org.apache.parquet.schema.Type elementType = repeated.getType(0); int elementD = type.getMaxDefinitionLevel(path(elementType.getName())); return collections(repeatedD, repeatedR, option(elementType, elementD, elementWriter)); } @Override public ParquetValueWriter<?> map(GroupType map, ParquetValueWriter<?> keyWriter, ParquetValueWriter<?> valueWriter) { GroupType repeatedKeyValue = map.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath); int repeatedR = type.getMaxRepetitionLevel(repeatedPath); org.apache.parquet.schema.Type keyType = repeatedKeyValue.getType(0); int keyD = type.getMaxDefinitionLevel(path(keyType.getName())); org.apache.parquet.schema.Type valueType = repeatedKeyValue.getType(1); int valueD = type.getMaxDefinitionLevel(path(valueType.getName())); return maps(repeatedD, repeatedR, option(keyType, keyD, keyWriter), option(valueType, valueD, valueWriter)); } @Override public ParquetValueWriter<?> primitive(PrimitiveType primitive) { ColumnDescriptor desc = type.getColumnDescription(currentPath()); if (primitive.getOriginalType() != null) { switch (primitive.getOriginalType()) { case ENUM: case JSON: case UTF8: return ParquetValueWriters.strings(desc); case DATE: case INT_8: case INT_16: case INT_32: case INT_64: case TIME_MICROS: case TIMESTAMP_MICROS: return ParquetValueWriters.unboxed(desc); case DECIMAL: DecimalMetadata decimal = primitive.getDecimalMetadata(); switch (primitive.getPrimitiveTypeName()) { case INT32: return ParquetValueWriters.decimalAsInteger( desc, decimal.getPrecision(), decimal.getScale()); case INT64: return ParquetValueWriters.decimalAsLong( desc, decimal.getPrecision(), decimal.getScale()); case BINARY: case FIXED_LEN_BYTE_ARRAY: return ParquetValueWriters.decimalAsFixed( desc, decimal.getPrecision(), decimal.getScale()); default: throw new UnsupportedOperationException( "Unsupported base type for decimal: " + primitive.getPrimitiveTypeName()); } case BSON: return ParquetValueWriters.byteBuffers(desc); default: throw new UnsupportedOperationException( "Unsupported logical type: " + primitive.getOriginalType()); } } switch (primitive.getPrimitiveTypeName()) { case FIXED_LEN_BYTE_ARRAY: return new FixedWriter(desc); case BINARY: return ParquetValueWriters.byteBuffers(desc); case BOOLEAN: case INT32: case INT64: case FLOAT: case DOUBLE: return ParquetValueWriters.unboxed(desc); default: throw new UnsupportedOperationException("Unsupported type: " + primitive); } } private String[] currentPath() { String[] path = new String[fieldNames.size()]; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } private String[] path(String name) { String[] path = new String[fieldNames.size() + 1]; path[fieldNames.size()] = name; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } } private static class FixedWriter extends PrimitiveWriter<Fixed> { private FixedWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, Fixed buffer) { column.writeBinary(repetitionLevel, Binary.fromReusedByteArray(buffer.bytes())); } } private static class RecordWriter extends StructWriter<IndexedRecord> { private RecordWriter(List<ParquetValueWriter<?>> writers) { super(writers); } @Override protected Object get(IndexedRecord struct, int index) { return struct.get(index); } } }
6,537
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.ImmutableMap; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.Schema; import com.netflix.iceberg.common.DynConstructors; import com.netflix.iceberg.common.DynMethods; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.OutputFile; import org.apache.hadoop.conf.Configuration; import org.apache.parquet.bytes.ByteBufferAllocator; import org.apache.parquet.column.ColumnWriteStore; import org.apache.parquet.column.ParquetProperties; import org.apache.parquet.column.page.PageWriteStore; import org.apache.parquet.hadoop.CodecFactory; import org.apache.parquet.hadoop.ParquetFileWriter; import org.apache.parquet.hadoop.metadata.CompressionCodecName; import org.apache.parquet.schema.MessageType; import java.io.Closeable; import java.io.IOException; import java.util.Map; import java.util.function.Function; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.convert; import static java.lang.Math.max; import static java.lang.Math.min; import static org.apache.parquet.column.ParquetProperties.WriterVersion.PARQUET_1_0; class ParquetWriter<T> implements FileAppender<T>, Closeable { private static final DynConstructors.Ctor<PageWriteStore> pageStoreCtor = DynConstructors .builder(PageWriteStore.class) .hiddenImpl("org.apache.parquet.hadoop.ColumnChunkPageWriteStore", CodecFactory.BytesCompressor.class, MessageType.class, ByteBufferAllocator.class) .build(); private static final DynMethods.UnboundMethod flushToWriter = DynMethods .builder("flushToFileWriter") .hiddenImpl("org.apache.parquet.hadoop.ColumnChunkPageWriteStore", ParquetFileWriter.class) .build(); private final OutputFile output; private final long targetRowGroupSize; private final Map<String, String> metadata; private final ParquetProperties props = ParquetProperties.builder() .withWriterVersion(PARQUET_1_0) .build(); private final CodecFactory.BytesCompressor compressor; private final MessageType parquetSchema; private final ParquetValueWriter<T> model; private final ParquetFileWriter writer; private DynMethods.BoundMethod flushPageStoreToWriter; private ColumnWriteStore writeStore; private long nextRowGroupSize = 0; private long recordCount = 0; private long nextCheckRecordCount = 10; @SuppressWarnings("unchecked") ParquetWriter(Configuration conf, OutputFile output, Schema schema, long rowGroupSize, Map<String, String> metadata, Function<MessageType, ParquetValueWriter<?>> createWriterFunc, CompressionCodecName codec) { this.output = output; this.targetRowGroupSize = rowGroupSize; this.metadata = ImmutableMap.copyOf(metadata); this.compressor = new CodecFactory(conf, props.getPageSizeThreshold()).getCompressor(codec); this.parquetSchema = convert(schema, "table"); this.model = (ParquetValueWriter<T>) createWriterFunc.apply(parquetSchema); try { this.writer = new ParquetFileWriter(ParquetIO.file(output, conf), parquetSchema, ParquetFileWriter.Mode.OVERWRITE, rowGroupSize, 0); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to create Parquet file"); } try { writer.start(); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to start Parquet file writer"); } startRowGroup(); } @Override public void add(T value) { recordCount += 1; model.write(0, value); writeStore.endRecord(); checkSize(); } @Override public Metrics metrics() { return ParquetMetrics.fromMetadata(writer.getFooter()); } private void checkSize() { if (recordCount >= nextCheckRecordCount) { long bufferedSize = writeStore.getBufferedSize(); double avgRecordSize = ((double) bufferedSize) / recordCount; if (bufferedSize > (nextRowGroupSize - 2 * avgRecordSize)) { flushRowGroup(false); } else { long remainingSpace = nextRowGroupSize - bufferedSize; long remainingRecords = (long) (remainingSpace / avgRecordSize); this.nextCheckRecordCount = recordCount + min(max(remainingRecords / 2, 100), 10000); } } } private void flushRowGroup(boolean finished) { try { if (recordCount > 0) { writer.startBlock(recordCount); writeStore.flush(); flushPageStoreToWriter.invoke(writer); writer.endBlock(); if (!finished) { startRowGroup(); } } } catch (IOException e) { throw new RuntimeIOException(e, "Failed to flush row group"); } } private void startRowGroup() { try { this.nextRowGroupSize = min(writer.getNextRowGroupSize(), targetRowGroupSize); } catch (IOException e) { throw new RuntimeIOException(e); } this.nextCheckRecordCount = min(max(recordCount / 2, 100), 10000); this.recordCount = 0; PageWriteStore pageStore = pageStoreCtor.newInstance( compressor, parquetSchema, props.getAllocator()); this.flushPageStoreToWriter = flushToWriter.bind(pageStore); this.writeStore = props.newColumnWriteStore(parquetSchema, pageStore); model.setColumnStore(writeStore); } @Override public void close() throws IOException { flushRowGroup(true); writeStore.close(); writer.end(metadata); } }
6,538
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/PruneColumns.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import org.apache.parquet.schema.Types; import java.util.List; import java.util.Set; class PruneColumns extends ParquetTypeVisitor<Type> { private final Set<Integer> selectedIds; PruneColumns(Set<Integer> selectedIds) { this.selectedIds = selectedIds; } @Override public Type message(MessageType message, List<Type> fields) { Types.MessageTypeBuilder builder = Types.buildMessage(); boolean hasChange = false; int fieldCount = 0; for (int i = 0; i < fields.size(); i += 1) { Type originalField = message.getType(i); Type field = fields.get(i); if (selectedIds.contains(getId(originalField))) { builder.addField(originalField); fieldCount += 1; } else if (field != null) { builder.addField(field); fieldCount += 1; hasChange = true; } } if (hasChange) { return builder.named(message.getName()); } else if (message.getFieldCount() == fieldCount) { return message; } return builder.named(message.getName()); } @Override public Type struct(GroupType struct, List<Type> fields) { boolean hasChange = false; List<Type> filteredFields = Lists.newArrayListWithExpectedSize(fields.size()); for (int i = 0; i < fields.size(); i += 1) { Type originalField = struct.getType(i); Type field = fields.get(i); if (selectedIds.contains(getId(originalField))) { filteredFields.add(originalField); } else if (field != null) { filteredFields.add(originalField); hasChange = true; } } if (hasChange) { return struct.withNewFields(filteredFields); } else if (struct.getFieldCount() == filteredFields.size()) { return struct; } else if (!filteredFields.isEmpty()) { return struct.withNewFields(filteredFields); } return null; } @Override public Type list(GroupType list, Type element) { GroupType repeated = list.getType(0).asGroupType(); Type originalElement = repeated.getType(0); int elementId = getId(originalElement); if (selectedIds.contains(elementId)) { return list; } else if (element != null) { if (element != originalElement) { // the element type was projected return Types.list(list.getRepetition()) .element(element) .id(getId(list)) .named(list.getName()); } return list; } return null; } @Override public Type map(GroupType map, Type key, Type value) { GroupType repeated = map.getType(0).asGroupType(); Type originalKey = repeated.getType(0); Type originalValue = repeated.getType(1); int keyId = getId(originalKey); int valueId = getId(originalValue); if (selectedIds.contains(keyId) || selectedIds.contains(valueId)) { return map; } else if (value != null) { if (value != originalValue) { return Types.map(map.getRepetition()) .key(originalKey) .value(value) .id(getId(map)) .named(map.getName()); } return map; } return null; } @Override public Type primitive(PrimitiveType primitive) { return null; } private int getId(Type type) { Preconditions.checkNotNull(type.getId(), "Missing id for type: " + type); return type.getId().intValue(); } }
6,539
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Schema; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.io.CloseableGroup; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.InputFile; import org.apache.parquet.ParquetReadOptions; import org.apache.parquet.column.page.PageReadStore; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.schema.MessageType; import java.io.Closeable; import java.io.IOException; import java.util.Iterator; import java.util.List; import java.util.function.Function; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.addFallbackIds; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.hasIds; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.pruneColumns; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.pruneColumnsFallback; public class ParquetReader<T> extends CloseableGroup implements CloseableIterable<T> { private final InputFile input; private final Schema expectedSchema; private final ParquetReadOptions options; private final Function<MessageType, ParquetValueReader<?>> readerFunc; private final Expression filter; private final boolean reuseContainers; public ParquetReader(InputFile input, Schema expectedSchema, ParquetReadOptions options, Function<MessageType, ParquetValueReader<?>> readerFunc, Expression filter, boolean reuseContainers) { this.input = input; this.expectedSchema = expectedSchema; this.options = options; this.readerFunc = readerFunc; // replace alwaysTrue with null to avoid extra work evaluating a trivial filter this.filter = filter == Expressions.alwaysTrue() ? null : filter; this.reuseContainers = reuseContainers; } private static class ReadConf<T> { private final ParquetFileReader reader; private final InputFile file; private final ParquetReadOptions options; private final MessageType projection; private final ParquetValueReader<T> model; private final List<BlockMetaData> rowGroups; private final boolean[] shouldSkip; private final long totalValues; private final boolean reuseContainers; @SuppressWarnings("unchecked") ReadConf(InputFile file, ParquetReadOptions options, Schema expectedSchema, Expression filter, Function<MessageType, ParquetValueReader<?>> readerFunc, boolean reuseContainers) { this.file = file; this.options = options; this.reader = newReader(file, options); MessageType fileSchema = reader.getFileMetaData().getSchema(); boolean hasIds = hasIds(fileSchema); MessageType typeWithIds = hasIds ? fileSchema : addFallbackIds(fileSchema); this.projection = hasIds ? pruneColumns(fileSchema, expectedSchema) : pruneColumnsFallback(fileSchema, expectedSchema); this.model = (ParquetValueReader<T>) readerFunc.apply(typeWithIds); this.rowGroups = reader.getRowGroups(); this.shouldSkip = new boolean[rowGroups.size()]; ParquetMetricsRowGroupFilter statsFilter = null; ParquetDictionaryRowGroupFilter dictFilter = null; if (filter != null) { statsFilter = new ParquetMetricsRowGroupFilter(expectedSchema, filter); dictFilter = new ParquetDictionaryRowGroupFilter(expectedSchema, filter); } long totalValues = 0L; for (int i = 0; i < shouldSkip.length; i += 1) { BlockMetaData rowGroup = rowGroups.get(i); boolean shouldRead = filter == null || ( statsFilter.shouldRead(typeWithIds, rowGroup) && dictFilter.shouldRead(typeWithIds, rowGroup, reader.getDictionaryReader(rowGroup))); this.shouldSkip[i] = !shouldRead; if (shouldRead) { totalValues += rowGroup.getRowCount(); } } this.totalValues = totalValues; this.reuseContainers = reuseContainers; } ReadConf(ReadConf<T> toCopy) { this.reader = null; this.file = toCopy.file; this.options = toCopy.options; this.projection = toCopy.projection; this.model = toCopy.model; this.rowGroups = toCopy.rowGroups; this.shouldSkip = toCopy.shouldSkip; this.totalValues = toCopy.totalValues; this.reuseContainers = toCopy.reuseContainers; } ParquetFileReader reader() { if (reader != null) { reader.setRequestedSchema(projection); return reader; } ParquetFileReader newReader = newReader(file, options); newReader.setRequestedSchema(projection); return newReader; } ParquetValueReader<T> model() { return model; } boolean[] shouldSkip() { return shouldSkip; } long totalValues() { return totalValues; } boolean reuseContainers() { return reuseContainers; } ReadConf<T> copy() { return new ReadConf<>(this); } private static ParquetFileReader newReader(InputFile file, ParquetReadOptions options) { try { return ParquetFileReader.open(ParquetIO.file(file), options); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to open Parquet file: %s", file.location()); } } } private ReadConf<T> conf = null; private ReadConf<T> init() { if (conf == null) { ReadConf<T> conf = new ReadConf<>( input, options, expectedSchema, filter, readerFunc, reuseContainers); this.conf = conf.copy(); return conf; } return conf; } @Override public Iterator<T> iterator() { FileIterator<T> iter = new FileIterator<>(init()); addCloseable(iter); return iter; } private static class FileIterator<T> implements Iterator<T>, Closeable { private final ParquetFileReader reader; private final boolean[] shouldSkip; private final ParquetValueReader<T> model; private final long totalValues; private final boolean reuseContainers; private int nextRowGroup = 0; private long nextRowGroupStart = 0; private long valuesRead = 0; private T last = null; FileIterator(ReadConf<T> conf) { this.reader = conf.reader(); this.shouldSkip = conf.shouldSkip(); this.model = conf.model(); this.totalValues = conf.totalValues(); this.reuseContainers = conf.reuseContainers(); } @Override public boolean hasNext() { return valuesRead < totalValues; } @Override public T next() { if (valuesRead >= nextRowGroupStart) { advance(); } if (reuseContainers) { this.last = model.read(last); } else { this.last = model.read(null); } valuesRead += 1; return last; } private void advance() { while (shouldSkip[nextRowGroup]) { nextRowGroup += 1; reader.skipNextRowGroup(); } PageReadStore pages; try { pages = reader.readNextRowGroup(); } catch (IOException e) { throw new RuntimeIOException(e); } nextRowGroupStart += pages.getRowCount(); model.setPageSource(pages); } @Override public void close() throws IOException { reader.close(); } } }
6,540
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/TypeToMessageType.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Type.NestedType; import com.netflix.iceberg.types.Type.PrimitiveType; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types.DecimalType; import com.netflix.iceberg.types.Types.FixedType; import com.netflix.iceberg.types.Types.ListType; import com.netflix.iceberg.types.Types.MapType; import com.netflix.iceberg.types.Types.NestedField; import com.netflix.iceberg.types.Types.StructType; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.Type; import org.apache.parquet.schema.Types; import static org.apache.parquet.schema.OriginalType.DATE; import static org.apache.parquet.schema.OriginalType.DECIMAL; import static org.apache.parquet.schema.OriginalType.TIMESTAMP_MICROS; import static org.apache.parquet.schema.OriginalType.TIME_MICROS; import static org.apache.parquet.schema.OriginalType.UTF8; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.BINARY; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.BOOLEAN; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.DOUBLE; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.FLOAT; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT32; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT64; public class TypeToMessageType { public static final int DECIMAL_INT32_MAX_DIGITS = 9; public static final int DECIMAL_INT64_MAX_DIGITS = 18; public MessageType convert(Schema schema, String name) { Types.MessageTypeBuilder builder = Types.buildMessage(); for (NestedField field : schema.columns()) { builder.addField(field(field)); } return builder.named(name); } public GroupType struct(StructType struct, Type.Repetition repetition, int id, String name) { Types.GroupBuilder<GroupType> builder = Types.buildGroup(repetition); for (NestedField field : struct.fields()) { builder.addField(field(field)); } return builder.id(id).named(name); } public Type field(NestedField field) { Type.Repetition repetition = field.isOptional() ? Type.Repetition.OPTIONAL : Type.Repetition.REQUIRED; int id = field.fieldId(); String name = field.name(); if (field.type().isPrimitiveType()) { return primitive(field.type().asPrimitiveType(), repetition, id, name); } else { NestedType nested = field.type().asNestedType(); if (nested.isStructType()) { return struct(nested.asStructType(), repetition, id, name); } else if (nested.isMapType()) { return map(nested.asMapType(), repetition, id, name); } else if (nested.isListType()) { return list(nested.asListType(), repetition, id, name); } throw new UnsupportedOperationException("Can't convert unknown type: " + nested); } } public GroupType list(ListType list, Type.Repetition repetition, int id, String name) { NestedField elementField = list.fields().get(0); return Types.list(repetition) .element(field(elementField)) .id(id) .named(name); } public GroupType map(MapType map, Type.Repetition repetition, int id, String name) { NestedField keyField = map.fields().get(0); NestedField valueField = map.fields().get(1); return Types.map(repetition) .key(field(keyField)) .value(field(valueField)) .id(id) .named(name); } public Type primitive(PrimitiveType primitive, Type.Repetition repetition, int id, String name) { switch (primitive.typeId()) { case BOOLEAN: return Types.primitive(BOOLEAN, repetition).id(id).named(name); case INTEGER: return Types.primitive(INT32, repetition).id(id).named(name); case LONG: return Types.primitive(INT64, repetition).id(id).named(name); case FLOAT: return Types.primitive(FLOAT, repetition).id(id).named(name); case DOUBLE: return Types.primitive(DOUBLE, repetition).id(id).named(name); case DATE: return Types.primitive(INT32, repetition).as(DATE).id(id).named(name); case TIME: return Types.primitive(INT64, repetition).as(TIME_MICROS).id(id).named(name); case TIMESTAMP: return Types.primitive(INT64, repetition).as(TIMESTAMP_MICROS).id(id).named(name); case STRING: return Types.primitive(BINARY, repetition).as(UTF8).id(id).named(name); case BINARY: return Types.primitive(BINARY, repetition).id(id).named(name); case FIXED: FixedType fixed = (FixedType) primitive; return Types.primitive(FIXED_LEN_BYTE_ARRAY, repetition).length(fixed.length()) .id(id) .named(name); case DECIMAL: DecimalType decimal = (DecimalType) primitive; if (decimal.precision() <= DECIMAL_INT32_MAX_DIGITS) { // store as an int return Types.primitive(INT32, repetition) .as(DECIMAL) .precision(decimal.precision()) .scale(decimal.scale()) .id(id) .named(name); } else if (decimal.precision() <= DECIMAL_INT64_MAX_DIGITS) { // store as a long return Types.primitive(INT64, repetition) .as(DECIMAL) .precision(decimal.precision()) .scale(decimal.scale()) .id(id) .named(name); } else { // store as a fixed-length array int minLength = TypeUtil.decimalRequriedBytes(decimal.precision()); return Types.primitive(FIXED_LEN_BYTE_ARRAY, repetition).length(minLength) .as(DECIMAL) .precision(decimal.precision()) .scale(decimal.scale()) .id(id) .named(name); } case UUID: return Types.primitive(FIXED_LEN_BYTE_ARRAY, repetition).length(16).id(id).named(name); default: throw new UnsupportedOperationException("Unsupported type for Parquet: " + primitive); } } }
6,541
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetAvroReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; public class ParquetAvroReader { }
6,542
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/TripleWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import org.apache.parquet.io.api.Binary; public interface TripleWriter<T> { // TODO: should definition level be included, or should it be part of the column? /** * Write a value. * * @param rl repetition level * @param value the value */ void write(int rl, T value); /** * Write a triple. * * @param rl repetition level * @param value the boolean value */ default void writeBoolean(int rl, boolean value) { throw new UnsupportedOperationException("Not a boolean column"); } /** * Write a triple. * * @param rl repetition level * @param value the boolean value */ default void writeInteger(int rl, int value) { throw new UnsupportedOperationException("Not an integer column"); } /** * Write a triple. * * @param rl repetition level * @param value the boolean value */ default void writeLong(int rl, long value) { throw new UnsupportedOperationException("Not an long column"); } /** * Write a triple. * * @param rl repetition level * @param value the boolean value */ default void writeFloat(int rl, float value) { throw new UnsupportedOperationException("Not an float column"); } /** * Write a triple. * * @param rl repetition level * @param value the boolean value */ default void writeDouble(int rl, double value) { throw new UnsupportedOperationException("Not an double column"); } /** * Write a triple. * * @param rl repetition level * @param value the boolean value */ default void writeBinary(int rl, Binary value) { throw new UnsupportedOperationException("Not an binary column"); } /** * Write a triple for a null value. * * @param rl repetition level * @param dl definition level */ void writeNull(int rl, int dl); }
6,543
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetMetricsRowGroupFilter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Binder; import com.netflix.iceberg.expressions.BoundReference; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.ExpressionVisitors; import com.netflix.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import org.apache.parquet.column.statistics.Statistics; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import java.util.Map; import java.util.function.Function; import static com.netflix.iceberg.expressions.Expressions.rewriteNot; import static com.netflix.iceberg.parquet.ParquetConversions.converterFromParquet; public class ParquetMetricsRowGroupFilter { private final Schema schema; private final StructType struct; private final Expression expr; private transient ThreadLocal<MetricsEvalVisitor> visitors = null; private MetricsEvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(MetricsEvalVisitor::new); } return visitors.get(); } public ParquetMetricsRowGroupFilter(Schema schema, Expression unbound) { this.schema = schema; this.struct = schema.asStruct(); this.expr = Binder.bind(struct, rewriteNot(unbound)); } /** * Test whether the file may contain records that match the expression. * * @param fileSchema schema for the Parquet file * @param rowGroup metadata for a row group * @return false if the file cannot contain rows that match the expression, true otherwise. */ public boolean shouldRead(MessageType fileSchema, BlockMetaData rowGroup) { return visitor().eval(fileSchema, rowGroup); } private static final boolean ROWS_MIGHT_MATCH = true; private static final boolean ROWS_CANNOT_MATCH = false; private class MetricsEvalVisitor extends BoundExpressionVisitor<Boolean> { private Map<Integer, Statistics> stats = null; private Map<Integer, Long> valueCounts = null; private Map<Integer, Function<Object, Object>> conversions = null; private boolean eval(MessageType fileSchema, BlockMetaData rowGroup) { if (rowGroup.getRowCount() <= 0) { return ROWS_CANNOT_MATCH; } this.stats = Maps.newHashMap(); this.valueCounts = Maps.newHashMap(); this.conversions = Maps.newHashMap(); for (ColumnChunkMetaData col : rowGroup.getColumns()) { PrimitiveType colType = fileSchema.getType(col.getPath().toArray()).asPrimitiveType(); if (colType.getId() != null) { int id = colType.getId().intValue(); stats.put(id, col.getStatistics()); valueCounts.put(id, col.getValueCount()); conversions.put(id, converterFromParquet(colType)); } } return ExpressionVisitors.visit(expr, this); } @Override public Boolean alwaysTrue() { return ROWS_MIGHT_MATCH; // all rows match } @Override public Boolean alwaysFalse() { return ROWS_CANNOT_MATCH; // all rows fail } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has no null values, the expression cannot match Integer id = ref.fieldId(); Preconditions.checkNotNull(struct.field(id), "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_MIGHT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && !colStats.isEmpty() && colStats.getNumNulls() == 0) { // there are stats and no values are null => all values are non-null return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has no non-null values, the expression cannot match Integer id = ref.fieldId(); Preconditions.checkNotNull(struct.field(id), "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_CANNOT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && valueCount - colStats.getNumNulls() == 0) { // (num nulls == value count) => all values are null => no non-null values return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_CANNOT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && !colStats.isEmpty()) { if (!colStats.hasNonNullValue()) { return ROWS_CANNOT_MATCH; } T lower = min(colStats, id); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp >= 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_CANNOT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && !colStats.isEmpty()) { if (!colStats.hasNonNullValue()) { return ROWS_CANNOT_MATCH; } T lower = min(colStats, id); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_CANNOT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && !colStats.isEmpty()) { if (!colStats.hasNonNullValue()) { return ROWS_CANNOT_MATCH; } T upper = max(colStats, id); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp <= 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_CANNOT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && !colStats.isEmpty()) { if (!colStats.hasNonNullValue()) { return ROWS_CANNOT_MATCH; } T upper = max(colStats, id); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_CANNOT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && !colStats.isEmpty()) { if (!colStats.hasNonNullValue()) { return ROWS_CANNOT_MATCH; } T lower = min(colStats, id); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } T upper = max(colStats, id); cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { // because the bounds are not necessarily a min or max value, this cannot be answered using // them. notEq(col, X) with (X, Y) doesn't guarantee that X is a value in col. return ROWS_MIGHT_MATCH; } @Override public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } @SuppressWarnings("unchecked") private <T> T min(Statistics<?> stats, int id) { return (T) conversions.get(id).apply(stats.genericGetMin()); } @SuppressWarnings("unchecked") private <T> T max(Statistics<?> stats, int id) { return (T) conversions.get(id).apply(stats.genericGetMax()); } } }
6,544
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetFilters.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.BoundReference; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.Expression.Operation; import com.netflix.iceberg.expressions.ExpressionVisitors.ExpressionVisitor; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Types; import org.apache.parquet.filter2.compat.FilterCompat; import org.apache.parquet.filter2.predicate.FilterApi; import org.apache.parquet.filter2.predicate.FilterPredicate; import org.apache.parquet.filter2.predicate.Operators; import org.apache.parquet.io.api.Binary; import java.nio.ByteBuffer; import static com.netflix.iceberg.expressions.ExpressionVisitors.visit; class ParquetFilters { static FilterCompat.Filter convert(Schema schema, Expression expr) { FilterPredicate pred = visit(expr, new ConvertFilterToParquet(schema)); // TODO: handle AlwaysFalse.INSTANCE if (pred != null && pred != AlwaysTrue.INSTANCE) { // FilterCompat will apply LogicalInverseRewriter return FilterCompat.get(pred); } else { return FilterCompat.NOOP; } } static FilterCompat.Filter convertColumnFilter(Schema schema, String column, Expression expr) { FilterPredicate pred = visit(expr, new ConvertColumnFilterToParquet(schema, column)); // TODO: handle AlwaysFalse.INSTANCE if (pred != null && pred != AlwaysTrue.INSTANCE) { // FilterCompat will apply LogicalInverseRewriter return FilterCompat.get(pred); } else { return FilterCompat.NOOP; } } private static class ConvertFilterToParquet extends ExpressionVisitor<FilterPredicate> { private final Schema schema; private ConvertFilterToParquet(Schema schema) { this.schema = schema; } @Override public FilterPredicate alwaysTrue() { return AlwaysTrue.INSTANCE; } @Override public FilterPredicate alwaysFalse() { return AlwaysFalse.INSTANCE; } @Override public FilterPredicate not(FilterPredicate child) { if (child == AlwaysTrue.INSTANCE) { return AlwaysFalse.INSTANCE; } else if (child == AlwaysFalse.INSTANCE) { return AlwaysTrue.INSTANCE; } return FilterApi.not(child); } @Override public FilterPredicate and(FilterPredicate left, FilterPredicate right) { if (left == AlwaysFalse.INSTANCE || right == AlwaysFalse.INSTANCE) { return AlwaysFalse.INSTANCE; } else if (left == AlwaysTrue.INSTANCE) { return right; } else if (right == AlwaysTrue.INSTANCE) { return left; } return FilterApi.and(left, right); } @Override public FilterPredicate or(FilterPredicate left, FilterPredicate right) { if (left == AlwaysTrue.INSTANCE || right == AlwaysTrue.INSTANCE) { return AlwaysTrue.INSTANCE; } else if (left == AlwaysFalse.INSTANCE) { return right; } else if (right == AlwaysFalse.INSTANCE) { return left; } return FilterApi.or(left, right); } @Override public <T> FilterPredicate predicate(BoundPredicate<T> pred) { Operation op = pred.op(); BoundReference<T> ref = pred.ref(); Literal<T> lit = pred.literal(); String path = schema.idToAlias(ref.fieldId()); switch (ref.type().typeId()) { case BOOLEAN: Operators.BooleanColumn col = FilterApi.booleanColumn(schema.idToAlias(ref.fieldId())); switch (op) { case EQ: return FilterApi.eq(col, getParquetPrimitive(lit)); case NOT_EQ: return FilterApi.eq(col, getParquetPrimitive(lit)); } case INTEGER: return pred(op, FilterApi.intColumn(path), getParquetPrimitive(lit)); case LONG: return pred(op, FilterApi.longColumn(path), getParquetPrimitive(lit)); case FLOAT: return pred(op, FilterApi.floatColumn(path), getParquetPrimitive(lit)); case DOUBLE: return pred(op, FilterApi.doubleColumn(path), getParquetPrimitive(lit)); case DATE: return pred(op, FilterApi.intColumn(path), getParquetPrimitive(lit)); case TIME: return pred(op, FilterApi.longColumn(path), getParquetPrimitive(lit)); case TIMESTAMP: return pred(op, FilterApi.longColumn(path), getParquetPrimitive(lit)); case STRING: return pred(op, FilterApi.binaryColumn(path), getParquetPrimitive(lit)); case UUID: return pred(op, FilterApi.binaryColumn(path), getParquetPrimitive(lit)); case FIXED: return pred(op, FilterApi.binaryColumn(path), getParquetPrimitive(lit)); case BINARY: return pred(op, FilterApi.binaryColumn(path), getParquetPrimitive(lit)); case DECIMAL: return pred(op, FilterApi.binaryColumn(path), getParquetPrimitive(lit)); } throw new UnsupportedOperationException("Cannot convert to Parquet filter: " + pred); } protected Expression bind(UnboundPredicate<?> pred) { return pred.bind(schema.asStruct()); } @Override @SuppressWarnings("unchecked") public <T> FilterPredicate predicate(UnboundPredicate<T> pred) { Expression bound = bind(pred); if (bound instanceof BoundPredicate) { return predicate((BoundPredicate<?>) bound); } else if (bound == Expressions.alwaysTrue()) { return AlwaysTrue.INSTANCE; } else if (bound == Expressions.alwaysFalse()) { return AlwaysFalse.INSTANCE; } throw new UnsupportedOperationException("Cannot convert to Parquet filter: " + pred); } } private static class ConvertColumnFilterToParquet extends ConvertFilterToParquet { private final Types.StructType partitionStruct; private ConvertColumnFilterToParquet(Schema schema, String column) { super(schema); this.partitionStruct = schema.findField(column).type().asNestedType().asStructType(); } protected Expression bind(UnboundPredicate<?> pred) { // instead of binding the predicate using the top-level schema, bind it to the partition data return pred.bind(partitionStruct); } } private static <C extends Comparable<C>, COL extends Operators.Column<C> & Operators.SupportsLtGt> FilterPredicate pred(Operation op, COL col, C value) { switch (op) { case IS_NULL: return FilterApi.eq(col, null); case NOT_NULL: return FilterApi.notEq(col, null); case EQ: return FilterApi.eq(col, value); case NOT_EQ: return FilterApi.notEq(col, value); case GT: return FilterApi.gt(col, value); case GT_EQ: return FilterApi.gtEq(col, value); case LT: return FilterApi.lt(col, value); case LT_EQ: return FilterApi.ltEq(col, value); default: throw new UnsupportedOperationException("Unsupported predicate operation: " + op); } } @SuppressWarnings("unchecked") private static <C extends Comparable<C>> C getParquetPrimitive(Literal<?> lit) { if (lit == null) { return null; } // TODO: this needs to convert to handle BigDecimal and UUID Object value = lit.value(); if (value instanceof Number) { return (C) lit.value(); } else if (value instanceof CharSequence) { return (C) Binary.fromString(value.toString()); } else if (value instanceof ByteBuffer) { return (C) Binary.fromReusedByteBuffer((ByteBuffer) value); } throw new UnsupportedOperationException( "Type not supported yet: " + value.getClass().getName()); } private static class AlwaysTrue implements FilterPredicate { static final AlwaysTrue INSTANCE = new AlwaysTrue(); @Override public <R> R accept(Visitor<R> visitor) { throw new UnsupportedOperationException("AlwaysTrue is a placeholder only"); } } private static class AlwaysFalse implements FilterPredicate { static final AlwaysFalse INSTANCE = new AlwaysFalse(); @Override public <R> R accept(Visitor<R> visitor) { throw new UnsupportedOperationException("AlwaysTrue is a placeholder only"); } } }
6,545
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/Parquet.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import com.netflix.iceberg.SchemaParser; import com.netflix.iceberg.Table; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.hadoop.HadoopInputFile; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import org.apache.hadoop.conf.Configuration; import org.apache.parquet.HadoopReadOptions; import org.apache.parquet.ParquetReadOptions; import org.apache.parquet.avro.AvroReadSupport; import org.apache.parquet.avro.AvroWriteSupport; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.ParquetFileWriter; import org.apache.parquet.hadoop.ParquetReader; import org.apache.parquet.hadoop.ParquetWriter; import org.apache.parquet.hadoop.api.ReadSupport; import org.apache.parquet.hadoop.api.WriteSupport; import org.apache.parquet.hadoop.metadata.CompressionCodecName; import org.apache.parquet.schema.MessageType; import java.io.IOException; import java.util.Locale; import java.util.Map; import java.util.function.Function; import static com.netflix.iceberg.TableProperties.PARQUET_COMPRESSION; import static com.netflix.iceberg.TableProperties.PARQUET_COMPRESSION_DEFAULT; import static com.netflix.iceberg.TableProperties.PARQUET_DICT_SIZE_BYTES; import static com.netflix.iceberg.TableProperties.PARQUET_DICT_SIZE_BYTES_DEFAULT; import static com.netflix.iceberg.TableProperties.PARQUET_PAGE_SIZE_BYTES; import static com.netflix.iceberg.TableProperties.PARQUET_PAGE_SIZE_BYTES_DEFAULT; import static com.netflix.iceberg.TableProperties.PARQUET_ROW_GROUP_SIZE_BYTES; import static com.netflix.iceberg.TableProperties.PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT; public class Parquet { private Parquet() { } public static WriteBuilder write(OutputFile file) { return new WriteBuilder(file); } public static class WriteBuilder { private final OutputFile file; private Schema schema = null; private String name = "table"; private WriteSupport<?> writeSupport = null; private Map<String, String> metadata = Maps.newLinkedHashMap(); private Map<String, String> config = Maps.newLinkedHashMap(); private Function<MessageType, ParquetValueWriter<?>> createWriterFunc = null; private WriteBuilder(OutputFile file) { this.file = file; } public WriteBuilder forTable(Table table) { schema(table.schema()); setAll(table.properties()); return this; } public WriteBuilder schema(Schema schema) { this.schema = schema; return this; } public WriteBuilder named(String name) { this.name = name; return this; } public WriteBuilder writeSupport(WriteSupport<?> writeSupport) { this.writeSupport = writeSupport; return this; } public WriteBuilder set(String property, String value) { config.put(property, value); return this; } public WriteBuilder setAll(Map<String, String> properties) { config.putAll(properties); return this; } public WriteBuilder meta(String property, String value) { metadata.put(property, value); return this; } public WriteBuilder createWriterFunc( Function<MessageType, ParquetValueWriter<?>> createWriterFunc) { this.createWriterFunc = createWriterFunc; return this; } @SuppressWarnings("unchecked") private <T> WriteSupport<T> getWriteSupport(MessageType type) { if (writeSupport != null) { return (WriteSupport<T>) writeSupport; } else { return new AvroWriteSupport<>( type, ParquetAvro.parquetAvroSchema(AvroSchemaUtil.convert(schema, name)), ParquetAvro.DEFAULT_MODEL); } } private CompressionCodecName codec() { String codec = config.getOrDefault(PARQUET_COMPRESSION, PARQUET_COMPRESSION_DEFAULT); try { return CompressionCodecName.valueOf(codec.toUpperCase(Locale.ENGLISH)); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Unsupported compression codec: " + codec); } } void forwardConfig(String parquetProperty, String icebergProperty, String defaultValue) { String value = config.getOrDefault(icebergProperty, defaultValue); if (value != null) { set(parquetProperty, value); } } public <D> FileAppender<D> build() throws IOException { Preconditions.checkNotNull(schema, "Schema is required"); Preconditions.checkNotNull(name, "Table name is required and cannot be null"); // add the Iceberg schema to keyValueMetadata meta("iceberg.schema", SchemaParser.toJson(schema)); // add Parquet configuration forwardConfig("parquet.block.size", PARQUET_ROW_GROUP_SIZE_BYTES, PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT); forwardConfig("parquet.page.size", PARQUET_PAGE_SIZE_BYTES, PARQUET_PAGE_SIZE_BYTES_DEFAULT); forwardConfig("parquet.dictionary.page.size", PARQUET_DICT_SIZE_BYTES, PARQUET_DICT_SIZE_BYTES_DEFAULT); set("parquet.avro.write-old-list-structure", "false"); MessageType type = ParquetSchemaUtil.convert(schema, name); if (createWriterFunc != null) { Preconditions.checkArgument(writeSupport == null, "Cannot write with both write support and Parquet value writer"); Configuration conf; if (file instanceof HadoopInputFile) { conf = ((HadoopInputFile) file).getConf(); } else { conf = new Configuration(); } for (Map.Entry<String, String> entry : config.entrySet()) { conf.set(entry.getKey(), entry.getValue()); } long rowGroupSize = Long.parseLong(config.getOrDefault( PARQUET_ROW_GROUP_SIZE_BYTES, PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT)); return new com.netflix.iceberg.parquet.ParquetWriter<>( conf, file, schema, rowGroupSize, metadata, createWriterFunc, codec()); } else { return new ParquetWriteAdapter<>(new ParquetWriteBuilder<D>(ParquetIO.file(file)) .setType(type) .setConfig(config) .setKeyValueMetadata(metadata) .setWriteSupport(getWriteSupport(type)) .withCompressionCodec(codec()) .withWriteMode(ParquetFileWriter.Mode.OVERWRITE) // TODO: support modes .build()); } } } private static class ParquetWriteBuilder<T> extends ParquetWriter.Builder<T, ParquetWriteBuilder<T>> { private Map<String, String> keyValueMetadata = Maps.newHashMap(); private Map<String, String> config = Maps.newHashMap(); private MessageType type; private WriteSupport<T> writeSupport; private ParquetWriteBuilder(org.apache.parquet.io.OutputFile path) { super(path); } @Override protected ParquetWriteBuilder<T> self() { return this; } public ParquetWriteBuilder<T> setKeyValueMetadata(Map<String, String> keyValueMetadata) { this.keyValueMetadata = keyValueMetadata; return self(); } public ParquetWriteBuilder<T> setConfig(Map<String, String> config) { this.config = config; return self(); } public ParquetWriteBuilder<T> setType(MessageType type) { this.type = type; return self(); } public ParquetWriteBuilder<T> setWriteSupport(WriteSupport<T> writeSupport) { this.writeSupport = writeSupport; return self(); } @Override protected WriteSupport<T> getWriteSupport(Configuration configuration) { for (Map.Entry<String, String> entry : config.entrySet()) { configuration.set(entry.getKey(), entry.getValue()); } return new ParquetWriteSupport<>(type, keyValueMetadata, writeSupport); } } public static ReadBuilder read(InputFile file) { return new ReadBuilder(file); } public static class ReadBuilder { private final InputFile file; private Long start = null; private Long length = null; private Schema schema = null; private Expression filter = null; private ReadSupport<?> readSupport = null; private Function<MessageType, ParquetValueReader<?>> readerFunc = null; private boolean filterRecords = true; private Map<String, String> properties = Maps.newHashMap(); private boolean callInit = false; private boolean reuseContainers = false; private ReadBuilder(InputFile file) { this.file = file; } /** * Restricts the read to the given range: [start, start + length). * * @param start the start position for this read * @param length the length of the range this read should scan * @return this builder for method chaining */ public ReadBuilder split(long start, long length) { this.start = start; this.length = length; return this; } public ReadBuilder project(Schema schema) { this.schema = schema; return this; } public ReadBuilder filterRecords(boolean filterRecords) { this.filterRecords = filterRecords; return this; } public ReadBuilder filter(Expression filter) { this.filter = filter; return this; } public ReadBuilder readSupport(ReadSupport<?> readSupport) { this.readSupport = readSupport; return this; } public ReadBuilder createReaderFunc(Function<MessageType, ParquetValueReader<?>> readerFunc) { this.readerFunc = readerFunc; return this; } public ReadBuilder set(String key, String value) { properties.put(key, value); return this; } public ReadBuilder callInit() { this.callInit = true; return this; } public ReadBuilder reuseContainers() { this.reuseContainers = true; return this; } @SuppressWarnings("unchecked") public <D> CloseableIterable<D> build() { if (readerFunc != null) { ParquetReadOptions.Builder optionsBuilder; if (file instanceof HadoopInputFile) { optionsBuilder = HadoopReadOptions.builder(((HadoopInputFile) file).getConf()); } else { optionsBuilder = ParquetReadOptions.builder(); } for (Map.Entry<String, String> entry : properties.entrySet()) { optionsBuilder.set(entry.getKey(), entry.getValue()); } if (start != null) { optionsBuilder.withRange(start, start + length); } ParquetReadOptions options = optionsBuilder.build(); return new com.netflix.iceberg.parquet.ParquetReader<>( file, schema, options, readerFunc, filter, reuseContainers); } ParquetReadBuilder<D> builder = new ParquetReadBuilder<>(ParquetIO.file(file)); builder.project(schema); if (readSupport != null) { builder.readSupport((ReadSupport<D>) readSupport); } else { builder.readSupport(new AvroReadSupport<>(ParquetAvro.DEFAULT_MODEL)); } // default options for readers builder.set("parquet.strict.typing", "false") // allow type promotion .set("parquet.avro.compatible", "false") // use the new RecordReader with Utf8 support .set("parquet.avro.add-list-element-records", "false"); // assume that lists use a 3-level schema for (Map.Entry<String, String> entry : properties.entrySet()) { builder.set(entry.getKey(), entry.getValue()); } if (filter != null) { // TODO: should not need to get the schema to push down before opening the file. // Parquet should allow setting a filter inside its read support MessageType type; try (ParquetFileReader schemaReader = ParquetFileReader.open(ParquetIO.file(file))) { type = schemaReader.getFileMetaData().getSchema(); } catch (IOException e) { throw new RuntimeIOException(e); } Schema fileSchema = ParquetSchemaUtil.convert(type); builder.useStatsFilter() .useDictionaryFilter() .useRecordFilter(filterRecords) .withFilter(ParquetFilters.convert(fileSchema, filter)); } else { // turn off filtering builder.useStatsFilter(false) .useDictionaryFilter(false) .useRecordFilter(false); } if (callInit) { builder.callInit(); } if (start != null) { builder.withFileRange(start, start + length); } return new ParquetIterable<>(builder); } } private static class ParquetReadBuilder<T> extends ParquetReader.Builder<T> { private Schema schema = null; private ReadSupport<T> readSupport = null; private boolean callInit = false; private ParquetReadBuilder(org.apache.parquet.io.InputFile file) { super(file); } public ParquetReadBuilder<T> project(Schema schema) { this.schema = schema; return this; } public ParquetReadBuilder<T> readSupport(ReadSupport<T> readSupport) { this.readSupport = readSupport; return this; } public ParquetReadBuilder<T> callInit() { this.callInit = true; return this; } @Override protected ReadSupport<T> getReadSupport() { return new ParquetReadSupport<>(schema, readSupport, callInit); } } }
6,546
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetDictionaryRowGroupFilter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import avro.shaded.com.google.common.collect.Sets; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.expressions.Binder; import com.netflix.iceberg.expressions.BoundReference; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.ExpressionVisitors; import com.netflix.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.Dictionary; import org.apache.parquet.column.Encoding; import org.apache.parquet.column.EncodingStats; import org.apache.parquet.column.page.DictionaryPage; import org.apache.parquet.column.page.DictionaryPageReadStore; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import java.io.IOException; import java.util.Comparator; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.function.Function; import static com.netflix.iceberg.expressions.Expressions.rewriteNot; import static com.netflix.iceberg.parquet.ParquetConversions.converterFromParquet; public class ParquetDictionaryRowGroupFilter { private final Schema schema; private final StructType struct; private final Expression expr; private transient ThreadLocal<EvalVisitor> visitors = null; private EvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(EvalVisitor::new); } return visitors.get(); } public ParquetDictionaryRowGroupFilter(Schema schema, Expression unbound) { this.schema = schema; this.struct = schema.asStruct(); this.expr = Binder.bind(struct, rewriteNot(unbound)); } /** * Test whether the dictionaries for a row group may contain records that match the expression. * * @param fileSchema schema for the Parquet file * @param dictionaries a dictionary page read store * @return false if the file cannot contain rows that match the expression, true otherwise. */ public boolean shouldRead(MessageType fileSchema, BlockMetaData rowGroup, DictionaryPageReadStore dictionaries) { return visitor().eval(fileSchema, rowGroup, dictionaries); } private static final boolean ROWS_MIGHT_MATCH = true; private static final boolean ROWS_CANNOT_MATCH = false; private class EvalVisitor extends BoundExpressionVisitor<Boolean> { private DictionaryPageReadStore dictionaries = null; private Map<Integer, Set<?>> dictCache = null; private Map<Integer, Boolean> isFallback = null; private Map<Integer, ColumnDescriptor> cols = null; private Map<Integer, Function<Object, Object>> conversions = null; private boolean eval(MessageType fileSchema, BlockMetaData rowGroup, DictionaryPageReadStore dictionaries) { this.dictionaries = dictionaries; this.dictCache = Maps.newHashMap(); this.isFallback = Maps.newHashMap(); this.cols = Maps.newHashMap(); this.conversions = Maps.newHashMap(); for (ColumnDescriptor desc : fileSchema.getColumns()) { PrimitiveType colType = fileSchema.getType(desc.getPath()).asPrimitiveType(); if (colType.getId() != null) { int id = colType.getId().intValue(); cols.put(id, desc); conversions.put(id, converterFromParquet(colType)); } } for (ColumnChunkMetaData meta : rowGroup.getColumns()) { PrimitiveType colType = fileSchema.getType(meta.getPath().toArray()).asPrimitiveType(); if (colType.getId() != null) { int id = colType.getId().intValue(); isFallback.put(id, hasNonDictionaryPages(meta)); } } return ExpressionVisitors.visit(expr, this); } @Override public Boolean alwaysTrue() { return ROWS_MIGHT_MATCH; // all rows match } @Override public Boolean alwaysFalse() { return ROWS_CANNOT_MATCH; // all rows fail } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { // dictionaries only contain non-nulls and cannot eliminate based on isNull or NotNull return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notNull(BoundReference<T> ref) { // dictionaries only contain non-nulls and cannot eliminate based on isNull or NotNull return ROWS_MIGHT_MATCH; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Boolean hasNonDictPage = isFallback.get(id); if (hasNonDictPage == null || hasNonDictPage) { return ROWS_MIGHT_MATCH; } Set<T> dictionary = dict(id, lit.comparator()); // if any item in the dictionary matches the predicate, then at least one row does for (T item : dictionary) { int cmp = lit.comparator().compare(item, lit.value()); if (cmp < 0) { return ROWS_MIGHT_MATCH; } } return ROWS_CANNOT_MATCH; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Boolean hasNonDictPage = isFallback.get(id); if (hasNonDictPage == null || hasNonDictPage) { return ROWS_MIGHT_MATCH; } Set<T> dictionary = dict(id, lit.comparator()); // if any item in the dictionary matches the predicate, then at least one row does for (T item : dictionary) { int cmp = lit.comparator().compare(item, lit.value()); if (cmp <= 0) { return ROWS_MIGHT_MATCH; } } return ROWS_CANNOT_MATCH; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Boolean hasNonDictPage = isFallback.get(id); if (hasNonDictPage == null || hasNonDictPage) { return ROWS_MIGHT_MATCH; } Set<T> dictionary = dict(id, lit.comparator()); // if any item in the dictionary matches the predicate, then at least one row does for (T item : dictionary) { int cmp = lit.comparator().compare(item, lit.value()); if (cmp > 0) { return ROWS_MIGHT_MATCH; } } return ROWS_CANNOT_MATCH; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Boolean hasNonDictPage = isFallback.get(id); if (hasNonDictPage == null || hasNonDictPage) { return ROWS_MIGHT_MATCH; } Set<T> dictionary = dict(id, lit.comparator()); // if any item in the dictionary matches the predicate, then at least one row does for (T item : dictionary) { int cmp = lit.comparator().compare(item, lit.value()); if (cmp >= 0) { return ROWS_MIGHT_MATCH; } } return ROWS_CANNOT_MATCH; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Boolean hasNonDictPage = isFallback.get(id); if (hasNonDictPage == null || hasNonDictPage) { return ROWS_MIGHT_MATCH; } Set<T> dictionary = dict(id, lit.comparator()); return dictionary.contains(lit.value()) ? ROWS_MIGHT_MATCH : ROWS_CANNOT_MATCH; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Boolean hasNonDictPage = isFallback.get(id); if (hasNonDictPage == null || hasNonDictPage) { return ROWS_MIGHT_MATCH; } Set<T> dictionary = dict(id, lit.comparator()); if (dictionary.size() > 1) { return ROWS_MIGHT_MATCH; } return dictionary.contains(lit.value()) ? ROWS_CANNOT_MATCH : ROWS_MIGHT_MATCH; } @Override public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } @SuppressWarnings("unchecked") private <T> Set<T> dict(int id, Comparator<T> comparator) { Set<?> cached = dictCache.get(id); if (cached != null) { return (Set<T>) cached; } ColumnDescriptor col = cols.get(id); DictionaryPage page = dictionaries.readDictionaryPage(col); // may not be dictionary-encoded if (page == null) { return null; } Function<Object, Object> conversion = conversions.get(id); Dictionary dict; try { dict = page.getEncoding().initDictionary(col, page); } catch (IOException e) { throw new RuntimeIOException("Failed to create reader for dictionary page"); } Set<T> dictSet = Sets.newTreeSet(comparator);; for (int i=0; i<=dict.getMaxId(); i++) { switch (col.getType()) { case BINARY: dictSet.add((T) conversion.apply(dict.decodeToBinary(i))); break; case INT32: dictSet.add((T) conversion.apply(dict.decodeToInt(i))); break; case INT64: dictSet.add((T) conversion.apply(dict.decodeToLong(i))); break; case FLOAT: dictSet.add((T) conversion.apply(dict.decodeToFloat(i))); break; case DOUBLE: dictSet.add((T) conversion.apply(dict.decodeToDouble(i))); break; default: throw new IllegalArgumentException( "Cannot decode dictionary of type: " + col.getType()); } } dictCache.put(id, dictSet); return dictSet; } } @SuppressWarnings("deprecation") private static boolean hasNonDictionaryPages(ColumnChunkMetaData meta) { EncodingStats stats = meta.getEncodingStats(); if (stats != null) { return stats.hasNonDictionaryEncodedPages(); } // without EncodingStats, fall back to testing the encoding list Set<Encoding> encodings = new HashSet<Encoding>(meta.getEncodings()); if (encodings.remove(Encoding.PLAIN_DICTIONARY)) { // if remove returned true, PLAIN_DICTIONARY was present, which means at // least one page was dictionary encoded and 1.0 encodings are used // RLE and BIT_PACKED are only used for repetition or definition levels encodings.remove(Encoding.RLE); encodings.remove(Encoding.BIT_PACKED); if (encodings.isEmpty()) { return false; // no encodings other than dictionary or rep/def levels } return true; } else { // if PLAIN_DICTIONARY wasn't present, then either the column is not // dictionary-encoded, or the 2.0 encoding, RLE_DICTIONARY, was used. // for 2.0, this cannot determine whether a page fell back without // page encoding stats return true; } } }
6,547
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/MessageTypeToType.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Joiner; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.apache.parquet.Preconditions; import org.apache.parquet.schema.DecimalMetadata; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.OriginalType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type.Repetition; import java.util.List; import java.util.Map; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; class MessageTypeToType extends ParquetTypeVisitor<Type> { private static final Joiner DOT = Joiner.on("."); private final Map<String, Integer> aliasToId = Maps.newHashMap(); private final GroupType root; private int nextId = 1; public MessageTypeToType(GroupType root) { this.root = root; this.nextId = 1_000; // use ids that won't match other than for root } public Map<String, Integer> getAliases() { return aliasToId; } @Override public Type message(MessageType message, List<Type> fields) { return struct(message, fields); } @Override public Type struct(GroupType struct, List<Type> fieldTypes) { if (struct == root) { nextId = 1; // use the reserved IDs for the root struct } List<org.apache.parquet.schema.Type> parquetFields = struct.getFields(); List<Types.NestedField> fields = Lists.newArrayListWithExpectedSize(fieldTypes.size()); for (int i = 0; i < parquetFields.size(); i += 1) { org.apache.parquet.schema.Type field = parquetFields.get(i); Preconditions.checkArgument( !field.isRepetition(Repetition.REPEATED), "Fields cannot have repetition REPEATED: {}", field); int fieldId = getId(field); addAlias(field.getName(), fieldId); if (parquetFields.get(i).isRepetition(Repetition.OPTIONAL)) { fields.add(optional(fieldId, field.getName(), fieldTypes.get(i))); } else { fields.add(required(fieldId, field.getName(), fieldTypes.get(i))); } } return Types.StructType.of(fields); } @Override public Type list(GroupType array, Type elementType) { GroupType repeated = array.getType(0).asGroupType(); org.apache.parquet.schema.Type element = repeated.getType(0); Preconditions.checkArgument( !element.isRepetition(Repetition.REPEATED), "Elements cannot have repetition REPEATED: {}", element); int elementFieldId = getId(element); addAlias(element.getName(), elementFieldId); if (element.isRepetition(Repetition.OPTIONAL)) { return Types.ListType.ofOptional(elementFieldId, elementType); } else { return Types.ListType.ofRequired(elementFieldId, elementType); } } @Override public Type map(GroupType map, Type keyType, Type valueType) { GroupType keyValue = map.getType(0).asGroupType(); org.apache.parquet.schema.Type key = keyValue.getType(0); org.apache.parquet.schema.Type value = keyValue.getType(1); Preconditions.checkArgument( !value.isRepetition(Repetition.REPEATED), "Values cannot have repetition REPEATED: {}", value); int keyFieldId = getId(key); int valueFieldId = getId(value); addAlias(key.getName(), keyFieldId); addAlias(value.getName(), valueFieldId); if (value.isRepetition(Repetition.OPTIONAL)) { return Types.MapType.ofOptional(keyFieldId, valueFieldId, keyType, valueType); } else { return Types.MapType.ofRequired(keyFieldId, valueFieldId, keyType, valueType); } } @Override public Type primitive(PrimitiveType primitive) { OriginalType annotation = primitive.getOriginalType(); if (annotation != null) { switch (annotation) { case INT_8: case UINT_8: case INT_16: case UINT_16: case INT_32: return Types.IntegerType.get(); case INT_64: return Types.LongType.get(); case DATE: return Types.DateType.get(); case TIME_MILLIS: case TIME_MICROS: return Types.TimeType.get(); case TIMESTAMP_MILLIS: case TIMESTAMP_MICROS: return Types.TimestampType.withZone(); case JSON: case BSON: case ENUM: case UTF8: return Types.StringType.get(); case DECIMAL: DecimalMetadata decimal = primitive.getDecimalMetadata(); return Types.DecimalType.of( decimal.getPrecision(), decimal.getScale()); default: throw new UnsupportedOperationException("Unsupported logical type: " + annotation); } } switch (primitive.getPrimitiveTypeName()) { case BOOLEAN: return Types.BooleanType.get(); case INT32: return Types.IntegerType.get(); case INT64: return Types.LongType.get(); case FLOAT: return Types.FloatType.get(); case DOUBLE: return Types.DoubleType.get(); case FIXED_LEN_BYTE_ARRAY: return Types.FixedType.ofLength(primitive.getTypeLength()); case BINARY: return Types.BinaryType.get(); } throw new UnsupportedOperationException( "Cannot convert unknown primitive type: " + primitive); } private void addAlias(int fieldId) { if (!fieldNames.isEmpty()) { String fullName = DOT.join(fieldNames.descendingIterator()); aliasToId.put(fullName, fieldId); } } private void addAlias(String name, int fieldId) { String fullName = name; if (!fieldNames.isEmpty()) { fullName = DOT.join(DOT.join(fieldNames.descendingIterator()), name); } aliasToId.put(fullName, fieldId); } protected int nextId() { int current = nextId; nextId += 1; return current; } private int getId(org.apache.parquet.schema.Type type) { org.apache.parquet.schema.Type.ID id = type.getId(); if (id != null) { return id.intValue(); } else { return nextId(); } } }
6,548
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetConversions.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.apache.commons.io.Charsets; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.PrimitiveType; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.UUID; import java.util.function.Function; class ParquetConversions { private ParquetConversions() { } static <T> Literal<T> fromParquetPrimitive(Type type, Object value) { if (value instanceof Boolean) { return Literal.of((Boolean) value).to(type); } else if (value instanceof Integer) { return Literal.of((Integer) value).to(type); } else if (value instanceof Long) { return Literal.of((Long) value).to(type); } else if (value instanceof Float) { return Literal.of((Float) value).to(type); } else if (value instanceof Double) { return Literal.of((Double) value).to(type); } else if (value instanceof Binary) { switch (type.typeId()) { case STRING: return Literal.of(Charsets.UTF_8.decode(((Binary) value).toByteBuffer())).to(type); case UUID: ByteBuffer buffer = ((Binary) value).toByteBuffer().order(ByteOrder.BIG_ENDIAN); long mostSigBits = buffer.getLong(); long leastSigBits = buffer.getLong(); return Literal.of(new UUID(mostSigBits, leastSigBits)).to(type); case FIXED: case BINARY: return Literal.of(((Binary) value).toByteBuffer()).to(type); case DECIMAL: Types.DecimalType decimal = (Types.DecimalType) type; return Literal.of( new BigDecimal(new BigInteger(((Binary) value).getBytes()), decimal.scale()) ).to(type); default: throw new IllegalArgumentException("Unsupported primitive type: " + type); } } else { throw new IllegalArgumentException("Unsupported primitive value: " + value); } } static Function<Object, Object> converterFromParquet(PrimitiveType type) { if (type.getOriginalType() != null) { switch (type.getOriginalType()) { case UTF8: // decode to CharSequence to avoid copying into a new String return binary -> Charsets.UTF_8.decode(((Binary) binary).toByteBuffer()); case DECIMAL: int scale = type.getDecimalMetadata().getScale(); switch (type.getPrimitiveTypeName()) { case INT32: case INT64: return num -> BigDecimal.valueOf(((Number) num).longValue(), scale); case FIXED_LEN_BYTE_ARRAY: case BINARY: return bin -> new BigDecimal(new BigInteger(((Binary) bin).getBytes()), scale); default: throw new IllegalArgumentException( "Unsupported primitive type for decimal: " + type.getPrimitiveTypeName()); } default: } } switch (type.getPrimitiveTypeName()) { case FIXED_LEN_BYTE_ARRAY: case BINARY: return binary -> ByteBuffer.wrap(((Binary) binary).getBytes()); default: } return obj -> obj; } }
6,549
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetAvroValueReaders.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.parquet.ParquetValueReaders.BytesReader; import com.netflix.iceberg.parquet.ParquetValueReaders.FloatAsDoubleReader; import com.netflix.iceberg.parquet.ParquetValueReaders.IntAsLongReader; import com.netflix.iceberg.parquet.ParquetValueReaders.IntegerAsDecimalReader; import com.netflix.iceberg.parquet.ParquetValueReaders.ListReader; import com.netflix.iceberg.parquet.ParquetValueReaders.LongAsDecimalReader; import com.netflix.iceberg.parquet.ParquetValueReaders.MapReader; import com.netflix.iceberg.parquet.ParquetValueReaders.StructReader; import com.netflix.iceberg.parquet.ParquetValueReaders.UnboxedReader; import com.netflix.iceberg.types.Type.TypeID; import com.netflix.iceberg.types.Types; import org.apache.avro.Schema; import org.apache.avro.generic.GenericData.Fixed; import org.apache.avro.generic.GenericData.Record; import org.apache.avro.util.Utf8; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.schema.DecimalMetadata; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.UUID; import static com.netflix.iceberg.parquet.ParquetValueReaders.option; public class ParquetAvroValueReaders { private ParquetAvroValueReaders() { } @SuppressWarnings("unchecked") public static ParquetValueReader<Record> buildReader(com.netflix.iceberg.Schema expectedSchema, MessageType fileSchema) { return (ParquetValueReader<Record>) TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema, new ReadBuilder(expectedSchema, fileSchema)); } private static class ReadBuilder extends TypeWithSchemaVisitor<ParquetValueReader<?>> { private final com.netflix.iceberg.Schema schema; private final Map<com.netflix.iceberg.types.Type, Schema> avroSchemas; private final MessageType type; ReadBuilder(com.netflix.iceberg.Schema schema, MessageType type) { this.schema = schema; this.avroSchemas = AvroSchemaUtil.convertTypes(schema.asStruct(), type.getName()); this.type = type; } @Override public ParquetValueReader<?> message(Types.StructType expected, MessageType message, List<ParquetValueReader<?>> fieldReaders) { return struct(expected, message.asGroupType(), fieldReaders); } @Override public ParquetValueReader<?> struct(Types.StructType expected, GroupType struct, List<ParquetValueReader<?>> fieldReaders) { Schema avroSchema = avroSchemas.get(expected); // match the expected struct's order Map<Integer, ParquetValueReader<?>> readersById = Maps.newHashMap(); Map<Integer, Type> typesById = Maps.newHashMap(); List<Type> fields = struct.getFields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i); int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName()))-1; int id = fieldType.getId().intValue(); readersById.put(id, option(fieldType, fieldD, fieldReaders.get(i))); typesById.put(id, fieldType); } List<Types.NestedField> expectedFields = expected != null ? expected.fields() : ImmutableList.of(); List<ParquetValueReader<?>> reorderedFields = Lists.newArrayListWithExpectedSize( expectedFields.size()); List<Type> types = Lists.newArrayListWithExpectedSize(expectedFields.size()); for (Types.NestedField field : expectedFields) { int id = field.fieldId(); ParquetValueReader<?> reader = readersById.get(id); if (reader != null) { reorderedFields.add(reader); types.add(typesById.get(id)); } else { reorderedFields.add(ParquetValueReaders.nulls()); types.add(null); } } return new RecordReader(types, reorderedFields, avroSchema); } @Override public ParquetValueReader<?> list(Types.ListType expectedList, GroupType array, ParquetValueReader<?> elementReader) { GroupType repeated = array.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath)-1; int repeatedR = type.getMaxRepetitionLevel(repeatedPath)-1; Type elementType = repeated.getType(0); int elementD = type.getMaxDefinitionLevel(path(elementType.getName()))-1; return new ListReader<>(repeatedD, repeatedR, option(elementType, elementD, elementReader)); } @Override public ParquetValueReader<?> map(Types.MapType expectedMap, GroupType map, ParquetValueReader<?> keyReader, ParquetValueReader<?> valueReader) { GroupType repeatedKeyValue = map.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath)-1; int repeatedR = type.getMaxRepetitionLevel(repeatedPath)-1; Type keyType = repeatedKeyValue.getType(0); int keyD = type.getMaxDefinitionLevel(path(keyType.getName()))-1; Type valueType = repeatedKeyValue.getType(1); int valueD = type.getMaxDefinitionLevel(path(valueType.getName()))-1; return new MapReader<>(repeatedD, repeatedR, option(keyType, keyD, keyReader), option(valueType, valueD, valueReader)); } @Override public ParquetValueReader<?> primitive(com.netflix.iceberg.types.Type.PrimitiveType expected, PrimitiveType primitive) { ColumnDescriptor desc = type.getColumnDescription(currentPath()); boolean isMapKey = fieldNames.contains("key"); if (primitive.getOriginalType() != null) { switch (primitive.getOriginalType()) { case ENUM: case JSON: case UTF8: if (isMapKey) { return new StringReader(desc); } return new Utf8Reader(desc); case DATE: case INT_8: case INT_16: case INT_32: case INT_64: case TIME_MICROS: case TIMESTAMP_MICROS: return new UnboxedReader<>(desc); case TIME_MILLIS: return new TimeMillisReader(desc); case TIMESTAMP_MILLIS: return new TimestampMillisReader(desc); case DECIMAL: DecimalMetadata decimal = primitive.getDecimalMetadata(); switch (primitive.getPrimitiveTypeName()) { case BINARY: case FIXED_LEN_BYTE_ARRAY: return new DecimalReader(desc, decimal.getScale()); case INT64: return new IntegerAsDecimalReader(desc, decimal.getScale()); case INT32: return new LongAsDecimalReader(desc, decimal.getScale()); default: throw new UnsupportedOperationException( "Unsupported base type for decimal: " + primitive.getPrimitiveTypeName()); } case BSON: return new BytesReader(desc); default: throw new UnsupportedOperationException( "Unsupported logical type: " + primitive.getOriginalType()); } } switch (primitive.getPrimitiveTypeName()) { case FIXED_LEN_BYTE_ARRAY: int fieldId = primitive.getId().intValue(); Schema avroSchema = AvroSchemaUtil.convert(schema.findType(fieldId)); return new FixedReader(desc, avroSchema); case BINARY: return new BytesReader(desc); case INT32: if (expected != null && expected.typeId() == TypeID.LONG) { return new IntAsLongReader(desc); } else { return new UnboxedReader<>(desc); } case FLOAT: if (expected != null && expected.typeId() == TypeID.DOUBLE) { return new FloatAsDoubleReader(desc); } else { return new UnboxedReader<>(desc); } case BOOLEAN: case INT64: case DOUBLE: return new UnboxedReader<>(desc); default: throw new UnsupportedOperationException("Unsupported type: " + primitive); } } private String[] currentPath() { String[] path = new String[fieldNames.size()]; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } private String[] path(String name) { String[] path = new String[fieldNames.size() + 1]; path[fieldNames.size()] = name; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } } static class DecimalReader extends ParquetValueReaders.PrimitiveReader<BigDecimal> { private final int scale; DecimalReader(ColumnDescriptor desc, int scale) { super(desc); this.scale = scale; } @Override public BigDecimal read(BigDecimal ignored) { return new BigDecimal(new BigInteger(column.nextBinary().getBytesUnsafe()), scale); } } static class StringReader extends ParquetValueReaders.PrimitiveReader<String> { StringReader(ColumnDescriptor desc) { super(desc); } @Override public String read(String ignored) { return column.nextBinary().toStringUsingUTF8(); } } static class Utf8Reader extends ParquetValueReaders.PrimitiveReader<Utf8> { Utf8Reader(ColumnDescriptor desc) { super(desc); } @Override public Utf8 read(Utf8 reuse) { Utf8 utf8; if (reuse != null) { utf8 = reuse; } else { utf8 = new Utf8(); } // use a byte buffer because it never results in a copy ByteBuffer buffer = column.nextBinary().toByteBuffer(); // always copy the bytes into the Utf8. for constant binary data backed by an array starting // at 0, it is possible to wrap the bytes in a Utf8, but reusing that Utf8 could corrupt the // constant binary if its backing buffer is copied to. utf8.setByteLength(buffer.remaining()); buffer.get(utf8.getBytes(), 0, buffer.remaining()); return utf8; } } static class UUIDReader extends ParquetValueReaders.PrimitiveReader<UUID> { UUIDReader(ColumnDescriptor desc) { super(desc); } @Override public UUID read(UUID ignored) { ByteBuffer buffer = column.nextBinary().toByteBuffer(); buffer.order(ByteOrder.BIG_ENDIAN); long mostSigBits = buffer.getLong(); long leastSigBits = buffer.getLong(); return new UUID(mostSigBits, leastSigBits); } } static class FixedReader extends ParquetValueReaders.PrimitiveReader<Fixed> { private final Schema schema; FixedReader(ColumnDescriptor desc, Schema schema) { super(desc); this.schema = schema; } @Override public Fixed read(Fixed reuse) { Fixed fixed; if (reuse != null) { fixed = reuse; } else { fixed = new Fixed(schema); } column.nextBinary().toByteBuffer().get(fixed.bytes()); return fixed; } } public static class TimeMillisReader extends UnboxedReader<Long> { TimeMillisReader(ColumnDescriptor desc) { super(desc); } @Override public long readLong() { return 1000 * column.nextLong(); } } public static class TimestampMillisReader extends UnboxedReader<Long> { TimestampMillisReader(ColumnDescriptor desc) { super(desc); } @Override public long readLong() { return 1000 * column.nextLong(); } } static class RecordReader extends StructReader<Record, Record> { private final Schema schema; RecordReader(List<Type> types, List<ParquetValueReader<?>> readers, Schema schema) { super(types, readers); this.schema = schema; } @Override protected Record newStructData(Record reuse) { if (reuse != null) { return reuse; } else { return new Record(schema); } } @Override @SuppressWarnings("unchecked") protected Object getField(Record intermediate, int pos) { return intermediate.get(pos); } @Override protected Record buildStruct(Record struct) { return struct; } @Override protected void set(Record struct, int pos, Object value) { struct.put(pos, value); } } }
6,550
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetValueReaders.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.page.PageReadStore; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.Type; import java.lang.reflect.Array; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.util.Iterator; import java.util.List; import java.util.Map; import static java.util.Collections.emptyIterator; public class ParquetValueReaders { private ParquetValueReaders() { } public static <T> ParquetValueReader<T> option(Type type, int definitionLevel, ParquetValueReader<T> reader) { if (type.isRepetition(Type.Repetition.OPTIONAL)) { return new OptionReader<>(definitionLevel, reader); } return reader; } @SuppressWarnings("unchecked") public static <T> ParquetValueReader<T> nulls() { return (ParquetValueReader<T>) NullReader.INSTANCE; } private static class NullReader<T> implements ParquetValueReader<T> { private static final NullReader<Void> INSTANCE = new NullReader<>(); private static final List<TripleIterator<?>> COLUMNS = ImmutableList.of(); private static final TripleIterator<?> NULL_COLUMN = new TripleIterator<Object> () { @Override public int currentDefinitionLevel() { return 0; } @Override public int currentRepetitionLevel() { return 0; } @Override public <N> N nextNull() { return null; } @Override public boolean hasNext() { return false; } @Override public Object next() { return null; } }; private NullReader() { } @Override public T read(T reuse) { return null; } @Override public TripleIterator<?> column() { return NULL_COLUMN; } @Override public List<TripleIterator<?>> columns() { return COLUMNS; } @Override public void setPageSource(PageReadStore pageStore) { } } public abstract static class PrimitiveReader<T> implements ParquetValueReader<T> { private final ColumnDescriptor desc; protected final ColumnIterator<?> column; private final List<TripleIterator<?>> children; protected PrimitiveReader(ColumnDescriptor desc) { this.desc = desc; this.column = ColumnIterator.newIterator(desc, ""); this.children = ImmutableList.of(column); } @Override public void setPageSource(PageReadStore pageStore) { column.setPageSource(pageStore.getPageReader(desc)); } @Override public TripleIterator<?> column() { return column; } @Override public List<TripleIterator<?>> columns() { return children; } } public static class UnboxedReader<T> extends PrimitiveReader<T> { public UnboxedReader(ColumnDescriptor desc) { super(desc); } @Override @SuppressWarnings("unchecked") public T read(T ignored) { return (T) column.next(); } public boolean readBoolean() { return column.nextBoolean(); } public int readInteger() { return column.nextInteger(); } public long readLong() { return column.nextLong(); } public float readFloat() { return column.nextFloat(); } public double readDouble() { return column.nextDouble(); } public Binary readBinary() { return column.nextBinary(); } } public static class StringReader extends PrimitiveReader<String> { public StringReader(ColumnDescriptor desc) { super(desc); } @Override public String read(String reuse) { return column.nextBinary().toStringUsingUTF8(); } } public static class IntAsLongReader extends UnboxedReader<Long> { public IntAsLongReader(ColumnDescriptor desc) { super(desc); } @Override public Long read(Long ignored) { return readLong(); } @Override public long readLong() { return super.readInteger(); } } public static class FloatAsDoubleReader extends UnboxedReader<Double> { public FloatAsDoubleReader(ColumnDescriptor desc) { super(desc); } @Override public Double read(Double ignored) { return readDouble(); } @Override public double readDouble() { return super.readFloat(); } } public static class IntegerAsDecimalReader extends PrimitiveReader<BigDecimal> { private final int scale; public IntegerAsDecimalReader(ColumnDescriptor desc, int scale) { super(desc); this.scale = scale; } @Override public BigDecimal read(BigDecimal ignored) { return new BigDecimal(BigInteger.valueOf(column.nextInteger()), scale); } } public static class LongAsDecimalReader extends PrimitiveReader<BigDecimal> { private final int scale; public LongAsDecimalReader(ColumnDescriptor desc, int scale) { super(desc); this.scale = scale; } @Override public BigDecimal read(BigDecimal ignored) { return new BigDecimal(BigInteger.valueOf(column.nextLong()), scale); } } public static class BinaryAsDecimalReader extends PrimitiveReader<BigDecimal> { private int scale; public BinaryAsDecimalReader(ColumnDescriptor desc, int scale) { super(desc); this.scale = scale; } @Override public BigDecimal read(BigDecimal reuse) { byte[] bytes = column.nextBinary().getBytesUnsafe(); return new BigDecimal(new BigInteger(bytes), scale); } } public static class BytesReader extends PrimitiveReader<ByteBuffer> { public BytesReader(ColumnDescriptor desc) { super(desc); } @Override public ByteBuffer read(ByteBuffer reuse) { Binary binary = column.nextBinary(); ByteBuffer data = binary.toByteBuffer(); if (reuse != null && reuse.hasArray() && reuse.capacity() >= data.remaining()) { data.get(reuse.array(), reuse.arrayOffset(), data.remaining()); reuse.position(0); reuse.limit(data.remaining()); return reuse; } else { byte[] array = new byte[data.remaining()]; data.get(array, 0, data.remaining()); return ByteBuffer.wrap(array); } } } private static class OptionReader<T> implements ParquetValueReader<T> { private final int definitionLevel; private final ParquetValueReader<T> reader; private final TripleIterator<?> column; private final List<TripleIterator<?>> children; OptionReader(int definitionLevel, ParquetValueReader<T> reader) { this.definitionLevel = definitionLevel; this.reader = reader; this.column = reader.column(); this.children = reader.columns(); } @Override public void setPageSource(PageReadStore pageStore) { reader.setPageSource(pageStore); } @Override public TripleIterator<?> column() { return column; } @Override public T read(T reuse) { if (column.currentDefinitionLevel() > definitionLevel) { return reader.read(reuse); } for (TripleIterator<?> column : children) { column.nextNull(); } return null; } @Override public List<TripleIterator<?>> columns() { return children; } } public abstract static class RepeatedReader<T, I, E> implements ParquetValueReader<T> { private final int definitionLevel; private final int repetitionLevel; private final ParquetValueReader<E> reader; private final TripleIterator<?> column; private final List<TripleIterator<?>> children; protected RepeatedReader(int definitionLevel, int repetitionLevel, ParquetValueReader<E> reader) { this.definitionLevel = definitionLevel; this.repetitionLevel = repetitionLevel; this.reader = reader; this.column = reader.column(); this.children = reader.columns(); } @Override public void setPageSource(PageReadStore pageStore) { reader.setPageSource(pageStore); } @Override public TripleIterator<?> column() { return column; } @Override public T read(T reuse) { I intermediate = newListData(reuse); do { if (column.currentDefinitionLevel() > definitionLevel) { addElement(intermediate, reader.read(getElement(intermediate))); } else { // consume the empty list triple for (TripleIterator<?> column : children) { column.nextNull(); } // if the current definition level is equal to the definition level of this repeated type, // then the result is an empty list and the repetition level will always be <= rl. break; } } while (column.currentRepetitionLevel() > repetitionLevel); return buildList(intermediate); } @Override public List<TripleIterator<?>> columns() { return children; } protected abstract I newListData(T reuse); protected abstract E getElement(I list); protected abstract void addElement(I list, E element); protected abstract T buildList(I list); } public static class ListReader<E> extends RepeatedReader<List<E>, List<E>, E> { private List<E> lastList = null; private Iterator<E> elements = null; public ListReader(int definitionLevel, int repetitionLevel, ParquetValueReader<E> reader) { super(definitionLevel, repetitionLevel, reader); } @Override protected List<E> newListData(List<E> reuse) { List<E> list; if (lastList != null) { lastList.clear(); list = lastList; } else { list = Lists.newArrayList(); } if (reuse != null) { this.lastList = reuse; this.elements = reuse.iterator(); } else { this.lastList = null; this.elements = emptyIterator(); } return list; } @Override protected E getElement(List<E> reuse) { if (elements.hasNext()) { return elements.next(); } return null; } @Override protected void addElement(List<E> list, E element) { list.add(element); } @Override protected List<E> buildList(List<E> list) { return list; } } public abstract static class RepeatedKeyValueReader<M, I, K, V> implements ParquetValueReader<M> { private final int definitionLevel; private final int repetitionLevel; private final ParquetValueReader<K> keyReader; private final ParquetValueReader<V> valueReader; private final TripleIterator<?> column; private final List<TripleIterator<?>> children; protected RepeatedKeyValueReader(int definitionLevel, int repetitionLevel, ParquetValueReader<K> keyReader, ParquetValueReader<V> valueReader) { this.definitionLevel = definitionLevel; this.repetitionLevel = repetitionLevel; this.keyReader = keyReader; this.valueReader = valueReader; this.column = keyReader.column(); this.children = ImmutableList.<TripleIterator<?>>builder() .addAll(keyReader.columns()) .addAll(valueReader.columns()) .build(); } @Override public void setPageSource(PageReadStore pageStore) { keyReader.setPageSource(pageStore); valueReader.setPageSource(pageStore); } @Override public TripleIterator<?> column() { return column; } @Override public M read(M reuse) { I intermediate = newMapData(reuse); do { if (column.currentDefinitionLevel() > definitionLevel) { Map.Entry<K, V> pair = getPair(intermediate); addPair(intermediate, keyReader.read(pair.getKey()), valueReader.read(pair.getValue())); } else { // consume the empty map triple for (TripleIterator<?> column : children) { column.nextNull(); } // if the current definition level is equal to the definition level of this repeated type, // then the result is an empty list and the repetition level will always be <= rl. break; } } while (column.currentRepetitionLevel() > repetitionLevel); return buildMap(intermediate); } @Override public List<TripleIterator<?>> columns() { return children; } protected abstract I newMapData(M reuse); protected abstract Map.Entry<K, V> getPair(I map); protected abstract void addPair(I map, K key, V value); protected abstract M buildMap(I map); } public static class MapReader<K, V> extends RepeatedKeyValueReader<Map<K, V>, Map<K, V>, K, V> { private final ReusableEntry<K, V> nullEntry = new ReusableEntry<>(); private Map<K, V> lastMap = null; private Iterator<Map.Entry<K, V>> pairs = null; public MapReader(int definitionLevel, int repetitionLevel, ParquetValueReader<K> keyReader, ParquetValueReader<V> valueReader) { super(definitionLevel, repetitionLevel, keyReader, valueReader); } @Override protected Map<K, V> newMapData(Map<K, V> reuse) { Map<K, V> map; if (lastMap != null) { lastMap.clear(); map = lastMap; } else { map = Maps.newLinkedHashMap(); } if (reuse != null) { this.lastMap = reuse; this.pairs = reuse.entrySet().iterator(); } else { this.lastMap = null; this.pairs = emptyIterator(); } return map; } @Override protected Map.Entry<K, V> getPair(Map<K, V> map) { if (pairs.hasNext()) { return pairs.next(); } else { return nullEntry; } } @Override protected void addPair(Map<K, V> map, K key, V value) { map.put(key, value); } @Override protected Map<K, V> buildMap(Map<K, V> map) { return map; } } public static class ReusableEntry<K, V> implements Map.Entry<K, V> { private K key = null; private V value = null; public void set(K key, V value) { this.key = key; this.value = value; } @Override public K getKey() { return key; } @Override public V getValue() { return value; } @Override public V setValue(V value) { V lastValue = this.value; this.value = value; return lastValue; } } public abstract static class StructReader<T, I> implements ParquetValueReader<T> { private interface Setter<R> { void set(R record, int pos, Object reuse); } private final ParquetValueReader<?>[] readers; private final TripleIterator<?> column; private final TripleIterator<?>[] columns; private final Setter<I>[] setters; private final List<TripleIterator<?>> children; @SuppressWarnings("unchecked") protected StructReader(List<Type> types, List<ParquetValueReader<?>> readers) { this.readers = (ParquetValueReader<?>[]) Array.newInstance( ParquetValueReader.class, readers.size()); this.columns = (TripleIterator<?>[]) Array.newInstance(TripleIterator.class, readers.size()); this.setters = (Setter<I>[]) Array.newInstance(Setter.class, readers.size()); ImmutableList.Builder<TripleIterator<?>> columnsBuilder = ImmutableList.builder(); for (int i = 0; i < readers.size(); i += 1) { ParquetValueReader<?> reader = readers.get(i); this.readers[i] = readers.get(i); this.columns[i] = reader.column(); this.setters[i] = newSetter(reader, types.get(i)); columnsBuilder.addAll(reader.columns()); } this.children = columnsBuilder.build(); if (children.size() > 0) { this.column = children.get(0); } else { this.column = NullReader.NULL_COLUMN; } } @Override public final void setPageSource(PageReadStore pageStore) { for (int i = 0; i < readers.length; i += 1) { readers[i].setPageSource(pageStore); } } @Override public final TripleIterator<?> column() { return column; } @Override public final T read(T reuse) { I intermediate = newStructData(reuse); for (int i = 0; i < readers.length; i += 1) { set(intermediate, i, readers[i].read(get(intermediate, i))); //setters[i].set(intermediate, i, get(intermediate, i)); } return buildStruct(intermediate); } @Override public List<TripleIterator<?>> columns() { return children; } @SuppressWarnings("unchecked") private <E> Setter<I> newSetter(ParquetValueReader<E> reader, Type type) { if (reader instanceof UnboxedReader && type.isPrimitive()) { UnboxedReader<?> unboxed = (UnboxedReader<?>) reader; switch (type.asPrimitiveType().getPrimitiveTypeName()) { case BOOLEAN: return (record, pos, ignored) -> setBoolean(record, pos, unboxed.readBoolean()); case INT32: return (record, pos, ignored) -> setInteger(record, pos, unboxed.readInteger()); case INT64: return (record, pos, ignored) -> setLong(record, pos, unboxed.readLong()); case FLOAT: return (record, pos, ignored) -> setFloat(record, pos, unboxed.readFloat()); case DOUBLE: return (record, pos, ignored) -> setDouble(record, pos, unboxed.readDouble()); case FIXED_LEN_BYTE_ARRAY: case BINARY: return (record, pos, ignored) -> set(record, pos, unboxed.readBinary()); default: throw new UnsupportedOperationException("Unsupported type: " + type); } } // TODO: Add support for options to avoid the null check return (record, pos, reuse) -> { Object obj = reader.read((E) reuse); if (obj != null) { set(record, pos, obj); } else { setNull(record, pos); } }; } @SuppressWarnings("unchecked") private <E> E get(I intermediate, int pos) { return (E) getField(intermediate, pos); } protected abstract I newStructData(T reuse); protected abstract Object getField(I intermediate, int pos); protected abstract T buildStruct(I struct); /** * Used to set a struct value by position. * <p> * To avoid boxing, override {@link #setInteger(Object, int, int)} and similar methods. * * @param struct a struct object created by {@link #newStructData(Object)} * @param pos the position in the struct to set * @param value the value to set */ protected abstract void set(I struct, int pos, Object value); protected void setNull(I struct, int pos) { set(struct, pos, null); } protected void setBoolean(I struct, int pos, boolean value) { set(struct, pos, value); } protected void setInteger(I struct, int pos, int value) { set(struct, pos, value); } protected void setLong(I struct, int pos, long value) { set(struct, pos, value); } protected void setFloat(I struct, int pos, float value) { set(struct, pos, value); } protected void setDouble(I struct, int pos, double value) { set(struct, pos, value); } } }
6,551
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/PageIterator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import org.apache.parquet.CorruptDeltaByteArrays; import org.apache.parquet.bytes.ByteBufferInputStream; import org.apache.parquet.bytes.BytesInput; import org.apache.parquet.bytes.BytesUtils; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.Dictionary; import org.apache.parquet.column.Encoding; import org.apache.parquet.column.page.DataPage; import org.apache.parquet.column.page.DataPageV1; import org.apache.parquet.column.page.DataPageV2; import org.apache.parquet.column.values.RequiresPreviousReader; import org.apache.parquet.column.values.ValuesReader; import org.apache.parquet.column.values.rle.RunLengthBitPackingHybridDecoder; import org.apache.parquet.io.ParquetDecodingException; import org.apache.parquet.io.api.Binary; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import static java.lang.String.format; import static org.apache.parquet.column.ValuesType.DEFINITION_LEVEL; import static org.apache.parquet.column.ValuesType.REPETITION_LEVEL; import static org.apache.parquet.column.ValuesType.VALUES; abstract class PageIterator<T> implements TripleIterator<T> { private static final Logger LOG = LoggerFactory.getLogger(PageIterator.class); @SuppressWarnings("unchecked") static <T> PageIterator<T> newIterator(ColumnDescriptor desc, String writerVersion) { switch (desc.getType()) { case BOOLEAN: return (PageIterator<T>) new PageIterator<Boolean>(desc, writerVersion) { @Override public Boolean next() { return nextBoolean(); } }; case INT32: return (PageIterator<T>) new PageIterator<Integer>(desc, writerVersion) { @Override public Integer next() { return nextInteger(); } }; case INT64: return (PageIterator<T>) new PageIterator<Long>(desc, writerVersion) { @Override public Long next() { return nextLong(); } }; case FLOAT: return (PageIterator<T>) new PageIterator<Float>(desc, writerVersion) { @Override public Float next() { return nextFloat(); } }; case DOUBLE: return (PageIterator<T>) new PageIterator<Double>(desc, writerVersion) { @Override public Double next() { return nextDouble(); } }; case FIXED_LEN_BYTE_ARRAY: case BINARY: return (PageIterator<T>) new PageIterator<Binary>(desc, writerVersion) { @Override public Binary next() { return nextBinary(); } }; default: throw new UnsupportedOperationException("Unsupported primitive type: " + desc.getType()); } } private final ColumnDescriptor desc; private final String writerVersion; // iterator state private boolean hasNext = false; private int triplesRead = 0; private int currentDL = 0; private int currentRL = 0; // page bookkeeping private Dictionary dict = null; private DataPage page = null; private int triplesCount = 0; private Encoding valueEncoding = null; private IntIterator definitionLevels = null; private IntIterator repetitionLevels = null; private ValuesReader values = null; private PageIterator(ColumnDescriptor desc, String writerVersion) { this.desc = desc; this.writerVersion = writerVersion; } public void setPage(DataPage page) { Preconditions.checkNotNull(page, "Cannot read from null page"); this.page = page; this.page.accept(new DataPage.Visitor<ValuesReader>() { @Override public ValuesReader visit(DataPageV1 dataPageV1) { initFromPage(dataPageV1); return null; } @Override public ValuesReader visit(DataPageV2 dataPageV2) { initFromPage(dataPageV2); return null; } }); this.triplesRead = 0; advance(); } public void setDictionary(Dictionary dict) { this.dict = dict; } public void reset() { this.page = null; this.triplesCount = 0; this.triplesRead = 0; this.definitionLevels = null; this.repetitionLevels = null; this.values = null; this.hasNext = false; } public int currentPageCount() { return triplesCount; } @Override public boolean hasNext() { return hasNext; } @Override public int currentDefinitionLevel() { Preconditions.checkArgument(currentDL >= 0, "Should not read definition, past page end"); return currentDL; } @Override public int currentRepetitionLevel() { // Preconditions.checkArgument(currentDL >= 0, "Should not read repetition, past page end"); return currentRL; } @Override public boolean nextBoolean() { advance(); try { return values.readBoolean(); } catch (RuntimeException e) { throw handleRuntimeException(e); } } @Override public int nextInteger() { advance(); try { return values.readInteger(); } catch (RuntimeException e) { throw handleRuntimeException(e); } } @Override public long nextLong() { advance(); try { return values.readLong(); } catch (RuntimeException e) { throw handleRuntimeException(e); } } @Override public float nextFloat() { advance(); try { return values.readFloat(); } catch (RuntimeException e) { throw handleRuntimeException(e); } } @Override public double nextDouble() { advance(); try { return values.readDouble(); } catch (RuntimeException e) { throw handleRuntimeException(e); } } @Override public Binary nextBinary() { advance(); try { return values.readBytes(); } catch (RuntimeException e) { throw handleRuntimeException(e); } } @Override public <V> V nextNull() { advance(); // values do not contain nulls return null; } private void advance() { if (triplesRead < triplesCount) { this.currentDL = definitionLevels.nextInt(); this.currentRL = repetitionLevels.nextInt(); this.triplesRead += 1; this.hasNext = true; } else { this.currentDL = -1; this.currentRL = -1; this.hasNext = false; } } RuntimeException handleRuntimeException(RuntimeException e) { if (CorruptDeltaByteArrays.requiresSequentialReads(writerVersion, valueEncoding) && e instanceof ArrayIndexOutOfBoundsException) { // this is probably PARQUET-246, which may happen if reading data with // MR because this can't be detected without reading all footers throw new ParquetDecodingException("Read failure possibly due to " + "PARQUET-246: try setting parquet.split.files to false", new ParquetDecodingException( format("Can't read value in column %s at value %d out of %d in current page. " + "repetition level: %d, definition level: %d", desc, triplesRead, triplesCount, currentRL, currentDL), e)); } throw new ParquetDecodingException( format("Can't read value in column %s at value %d out of %d in current page. " + "repetition level: %d, definition level: %d", desc, triplesRead, triplesCount, currentRL, currentDL), e); } private void initDataReader(Encoding dataEncoding, ByteBufferInputStream in, int valueCount) { ValuesReader previousReader = values; this.valueEncoding = dataEncoding; // TODO: May want to change this so that this class is not dictionary-aware. // For dictionary columns, this class could rely on wrappers to correctly handle dictionaries // This isn't currently possible because RLE must be read by getDictionaryBasedValuesReader if (dataEncoding.usesDictionary()) { if (dict == null) { throw new ParquetDecodingException( "could not read page in col " + desc + " as the dictionary was missing for encoding " + dataEncoding); } this.values = dataEncoding.getDictionaryBasedValuesReader(desc, VALUES, dict); } else { this.values = dataEncoding.getValuesReader(desc, VALUES); } // if (dataEncoding.usesDictionary() && converter.hasDictionarySupport()) { // bindToDictionary(dictionary); // } else { // bind(path.getType()); // } try { values.initFromPage(valueCount, in); } catch (IOException e) { throw new ParquetDecodingException("could not read page in col " + desc, e); } if (CorruptDeltaByteArrays.requiresSequentialReads(writerVersion, dataEncoding) && previousReader != null && previousReader instanceof RequiresPreviousReader) { // previous reader can only be set if reading sequentially ((RequiresPreviousReader) values).setPreviousReader(previousReader); } } private void initFromPage(DataPageV1 page) { this.triplesCount = page.getValueCount(); ValuesReader rlReader = page.getRlEncoding().getValuesReader(desc, REPETITION_LEVEL); ValuesReader dlReader = page.getDlEncoding().getValuesReader(desc, DEFINITION_LEVEL); this.repetitionLevels = new ValuesReaderIntIterator(rlReader); this.definitionLevels = new ValuesReaderIntIterator(dlReader); try { BytesInput bytes = page.getBytes(); LOG.debug("page size {} bytes and {} records", bytes.size(), triplesCount); LOG.debug("reading repetition levels at 0"); ByteBufferInputStream in = bytes.toInputStream(); rlReader.initFromPage(triplesCount, in); LOG.debug("reading definition levels at {}", in.position()); dlReader.initFromPage(triplesCount, in); LOG.debug("reading data at {}", in.position()); initDataReader(page.getValueEncoding(), in, page.getValueCount()); } catch (IOException e) { throw new ParquetDecodingException("could not read page " + page + " in col " + desc, e); } } private void initFromPage(DataPageV2 page) { this.triplesCount = page.getValueCount(); this.repetitionLevels = newRLEIterator(desc.getMaxRepetitionLevel(), page.getRepetitionLevels()); this.definitionLevels = newRLEIterator(desc.getMaxDefinitionLevel(), page.getDefinitionLevels()); LOG.debug("page data size {} bytes and {} records", page.getData().size(), triplesCount); try { initDataReader(page.getDataEncoding(), page.getData().toInputStream(), triplesCount); } catch (IOException e) { throw new ParquetDecodingException("could not read page " + page + " in col " + desc, e); } } private IntIterator newRLEIterator(int maxLevel, BytesInput bytes) { try { if (maxLevel == 0) { return new NullIntIterator(); } return new RLEIntIterator( new RunLengthBitPackingHybridDecoder( BytesUtils.getWidthFromMaxInt(maxLevel), bytes.toInputStream())); } catch (IOException e) { throw new ParquetDecodingException("could not read levels in page for col " + desc, e); } } static abstract class IntIterator { abstract int nextInt(); } static class ValuesReaderIntIterator extends IntIterator { ValuesReader delegate; ValuesReaderIntIterator(ValuesReader delegate) { super(); this.delegate = delegate; } @Override int nextInt() { return delegate.readInteger(); } } static class RLEIntIterator extends IntIterator { RunLengthBitPackingHybridDecoder delegate; RLEIntIterator(RunLengthBitPackingHybridDecoder delegate) { this.delegate = delegate; } @Override int nextInt() { try { return delegate.readInt(); } catch (IOException e) { throw new ParquetDecodingException(e); } } } private static final class NullIntIterator extends IntIterator { @Override int nextInt() { return 0; } } }
6,552
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetWriteAdapter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.io.FileAppender; import org.apache.parquet.hadoop.ParquetWriter; import java.io.IOException; public class ParquetWriteAdapter<D> implements FileAppender<D> { private ParquetWriter<D> writer = null; private long numRecords = 0L; public ParquetWriteAdapter(ParquetWriter<D> writer) throws IOException { this.writer = writer; } @Override public void add(D datum) { try { numRecords += 1L; writer.write(datum); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to write record %s", datum); } } @Override public Metrics metrics() { return new Metrics(numRecords, null, null, null); } @Override public void close() throws IOException { if (writer != null) { writer.close(); this.writer = null; } } }
6,553
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetIO.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.hadoop.HadoopInputFile; import com.netflix.iceberg.hadoop.HadoopOutputFile; import com.netflix.iceberg.io.DelegatingInputStream; import com.netflix.iceberg.io.DelegatingOutputStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.parquet.hadoop.util.HadoopStreams; import org.apache.parquet.io.InputFile; import org.apache.parquet.io.OutputFile; import org.apache.parquet.io.DelegatingSeekableInputStream; import org.apache.parquet.io.DelegatingPositionOutputStream; import org.apache.parquet.io.PositionOutputStream; import org.apache.parquet.io.SeekableInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import static org.apache.parquet.hadoop.util.HadoopOutputFile.fromPath; import static org.apache.parquet.hadoop.util.HadoopInputFile.fromStatus; /** * Methods in this class translate from the IO API to Parquet's IO API. */ class ParquetIO { private ParquetIO() { } static InputFile file(com.netflix.iceberg.io.InputFile file) { // TODO: use reflection to avoid depending on classes from iceberg-hadoop // TODO: use reflection to avoid depending on classes from hadoop if (file instanceof HadoopInputFile) { HadoopInputFile hfile = (HadoopInputFile) file; try { return fromStatus(hfile.getStat(), hfile.getConf()); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to create Parquet input file for %s", file); } } return new ParquetInputFile(file); } static OutputFile file(com.netflix.iceberg.io.OutputFile file) { if (file instanceof HadoopOutputFile) { HadoopOutputFile hfile = (HadoopOutputFile) file; try { return fromPath(hfile.getPath(), hfile.getConf()); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to create Parquet output file for %s", file); } } return new ParquetOutputFile(file); } static OutputFile file(com.netflix.iceberg.io.OutputFile file, Configuration conf) { if (file instanceof HadoopOutputFile) { HadoopOutputFile hfile = (HadoopOutputFile) file; try { return fromPath(hfile.getPath(), conf); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to create Parquet output file for %s", file); } } return new ParquetOutputFile(file); } static SeekableInputStream stream(com.netflix.iceberg.io.SeekableInputStream stream) { if (stream instanceof DelegatingInputStream) { InputStream wrapped = ((DelegatingInputStream) stream).getDelegate(); if (wrapped instanceof FSDataInputStream) { return HadoopStreams.wrap((FSDataInputStream) wrapped); } } return new ParquetInputStreamAdapter(stream); } static PositionOutputStream stream(com.netflix.iceberg.io.PositionOutputStream stream) { if (stream instanceof DelegatingOutputStream) { OutputStream wrapped = ((DelegatingOutputStream) stream).getDelegate(); if (wrapped instanceof FSDataOutputStream) { return HadoopStreams.wrap((FSDataOutputStream) wrapped); } } return new ParquetOutputStreamAdapter(stream); } private static class ParquetInputStreamAdapter extends DelegatingSeekableInputStream { private final com.netflix.iceberg.io.SeekableInputStream delegate; private ParquetInputStreamAdapter(com.netflix.iceberg.io.SeekableInputStream delegate) { super(delegate); this.delegate = delegate; } @Override public long getPos() throws IOException { return delegate.getPos(); } @Override public void seek(long newPos) throws IOException { delegate.seek(newPos); } } private static class ParquetOutputStreamAdapter extends DelegatingPositionOutputStream { private final com.netflix.iceberg.io.PositionOutputStream delegate; private ParquetOutputStreamAdapter(com.netflix.iceberg.io.PositionOutputStream delegate) { super(delegate); this.delegate = delegate; } @Override public long getPos() throws IOException { return delegate.getPos(); } } private static class ParquetOutputFile implements OutputFile { private final com.netflix.iceberg.io.OutputFile file; private ParquetOutputFile(com.netflix.iceberg.io.OutputFile file) { this.file = file; } @Override public PositionOutputStream create(long ignored) throws IOException { return stream(file.create()); } @Override public PositionOutputStream createOrOverwrite(long ignored) throws IOException { return stream(file.createOrOverwrite()); } @Override public boolean supportsBlockSize() { return false; } @Override public long defaultBlockSize() { return 0; } } private static class ParquetInputFile implements InputFile { private final com.netflix.iceberg.io.InputFile file; private ParquetInputFile(com.netflix.iceberg.io.InputFile file) { this.file = file; } @Override public long getLength() throws IOException { return file.getLength(); } @Override public SeekableInputStream newStream() throws IOException { return stream(file.newStream()); } } }
6,554
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/DataTestHelpers.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.junit.Assert; import java.util.List; import java.util.Map; public class DataTestHelpers { public static void assertEquals(Types.StructType struct, Record expected, Record actual) { List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i).type(); Object expectedValue = expected.get(i); Object actualValue = actual.get(i); assertEquals(fieldType, expectedValue, actualValue); } } public static void assertEquals(Types.ListType list, List<?> expected, List<?> actual) { Type elementType = list.elementType(); Assert.assertEquals("List size should match", expected.size(), actual.size()); for (int i = 0; i < expected.size(); i += 1) { Object expectedValue = expected.get(i); Object actualValue = actual.get(i); assertEquals(elementType, expectedValue, actualValue); } } public static void assertEquals(Types.MapType map, Map<?, ?> expected, Map<?, ?> actual) { Type valueType = map.valueType(); Assert.assertEquals("Map size should match", expected.size(), actual.size()); for (Object expectedKey : expected.keySet()) { Object expectedValue = expected.get(expectedKey); Object actualValue = actual.get(expectedKey); assertEquals(valueType, expectedValue, actualValue); } } private static void assertEquals(Type type, Object expected, Object actual) { if (expected == null && actual == null) { return; } switch (type.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case STRING: case DATE: case TIME: case TIMESTAMP: case UUID: case BINARY: case DECIMAL: Assert.assertEquals("Primitive value should be equal to expected", expected, actual); break; case FIXED: Assert.assertTrue("Expected should be a byte[]", expected instanceof byte[]); Assert.assertTrue("Actual should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Array contents should be equal", (byte[]) expected, (byte[]) actual); break; case STRUCT: Assert.assertTrue("Expected should be a Record", expected instanceof Record); Assert.assertTrue("Actual should be a Record", actual instanceof Record); assertEquals(type.asStructType(), (Record) expected, (Record) actual); break; case LIST: Assert.assertTrue("Expected should be a List", expected instanceof List); Assert.assertTrue("Actual should be a List", actual instanceof List); assertEquals(type.asListType(), (List) expected, (List) actual); break; case MAP: Assert.assertTrue("Expected should be a Map", expected instanceof Map); Assert.assertTrue("Actual should be a Map", actual instanceof Map); assertEquals(type.asMapType(), (Map<?, ?>) expected, (Map<?, ?>) actual); break; default: throw new IllegalArgumentException("Not a supported type: " + type); } } }
6,555
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/TestLocalScan.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.netflix.iceberg.AppendFiles; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.TableProperties; import com.netflix.iceberg.Tables; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.data.avro.DataWriter; import com.netflix.iceberg.data.parquet.GenericParquetWriter; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.hadoop.HadoopInputFile; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.types.Types; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Set; import static com.google.common.collect.Iterables.concat; import static com.google.common.collect.Iterables.filter; import static com.google.common.collect.Iterables.transform; import static com.netflix.iceberg.DataFiles.fromInputFile; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual; import static com.netflix.iceberg.hadoop.HadoopOutputFile.fromPath; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; @RunWith(Parameterized.class) public class TestLocalScan { private static final Schema SCHEMA = new Schema( required(1, "id", Types.LongType.get()), optional(2, "data", Types.StringType.get())); private static final Configuration CONF = new Configuration(); private static final Tables TABLES = new HadoopTables(CONF); @Rule public final TemporaryFolder temp = new TemporaryFolder(); @Parameterized.Parameters public static Object[][] parameters() { return new Object[][] { new Object[] { "parquet" }, new Object[] { "avro" } }; } private final FileFormat format; public TestLocalScan(String format) { this.format = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); } private String sharedTableLocation = null; private Table sharedTable = null; private List<Record> file1Records = null; private List<Record> file2Records = null; private List<Record> file3Records = null; @Before public void createTables() throws IOException { File location = temp.newFolder("shared"); Assert.assertTrue(location.delete()); this.sharedTableLocation = location.toString(); this.sharedTable = TABLES.create( SCHEMA, PartitionSpec.unpartitioned(), ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), sharedTableLocation); Record record = GenericRecord.create(SCHEMA); this.file1Records = Lists.newArrayList( record.copy(ImmutableMap.of("id", 0L, "data", "clarification")), record.copy(ImmutableMap.of("id", 1L, "data", "risky")), record.copy(ImmutableMap.of("id", 2L, "data", "falafel")) ); InputFile file1 = writeFile(sharedTableLocation, format.addExtension("file-1"), file1Records); Record nullData = record.copy(); nullData.setField("id", 11L); nullData.setField("data", null); this.file2Records = Lists.newArrayList( record.copy(ImmutableMap.of("id", 10L, "data", "clammy")), record.copy(ImmutableMap.of("id", 11L, "data", "evacuate")), record.copy(ImmutableMap.of("id", 12L, "data", "tissue")) ); InputFile file2 = writeFile(sharedTableLocation, format.addExtension("file-2"), file2Records); this.file3Records = Lists.newArrayList( record.copy(ImmutableMap.of("id", 20L, "data", "ocean")), record.copy(ImmutableMap.of("id", 21L, "data", "holistic")), record.copy(ImmutableMap.of("id", 22L, "data", "preventative")) ); InputFile file3 = writeFile(sharedTableLocation, format.addExtension("file-3"), file3Records); // commit the test data sharedTable.newAppend() .appendFile(DataFiles.builder(PartitionSpec.unpartitioned()) .withInputFile(file1) .withMetrics(new Metrics(3L, null, // no column sizes ImmutableMap.of(1, 3L), // value count ImmutableMap.of(1, 0L), // null count ImmutableMap.of(1, longToBuffer(0L)), // lower bounds ImmutableMap.of(1, longToBuffer(2L)))) // upper bounds) .build()) .appendFile(DataFiles.builder(PartitionSpec.unpartitioned()) .withInputFile(file2) .withMetrics(new Metrics(3L, null, // no column sizes ImmutableMap.of(1, 3L), // value count ImmutableMap.of(1, 0L), // null count ImmutableMap.of(1, longToBuffer(10L)), // lower bounds ImmutableMap.of(1, longToBuffer(12L)))) // upper bounds) .build()) .appendFile(DataFiles.builder(PartitionSpec.unpartitioned()) .withInputFile(file3) .withMetrics(new Metrics(3L, null, // no column sizes ImmutableMap.of(1, 3L), // value count ImmutableMap.of(1, 0L), // null count ImmutableMap.of(1, longToBuffer(20L)), // lower bounds ImmutableMap.of(1, longToBuffer(22L)))) // upper bounds) .build()) .commit(); } @Test public void testRandomData() throws IOException { List<Record> expected = RandomGenericData.generate(SCHEMA, 1000, 435691832918L); File location = temp.newFolder(format.name()); Assert.assertTrue(location.delete()); Table table = TABLES.create(SCHEMA, PartitionSpec.unpartitioned(), ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), location.toString()); AppendFiles append = table.newAppend(); int fileNum = 0; int recordsPerFile = 200; Iterator<Record> iter = expected.iterator(); while (iter.hasNext()) { Path path = new Path(location.toString(), format.addExtension("file-" + fileNum)); int numRecords; List<Record> records = Lists.newArrayList(); for (numRecords = 0; numRecords < recordsPerFile && iter.hasNext(); numRecords += 1) { records.add(iter.next()); } writeFile(location.toString(), format.addExtension("file-" + fileNum), records); append.appendFile(fromInputFile(HadoopInputFile.fromPath(path, CONF), numRecords)); fileNum += 1; } append.commit(); Set<Record> records = Sets.newHashSet(IcebergGenerics.read(table).build()); Assert.assertEquals("Should produce correct number of records", expected.size(), records.size()); Assert.assertEquals("Random record set should match", Sets.newHashSet(expected), records); } @Test public void testFullScan() { Iterable<Record> results = IcebergGenerics.read(sharedTable).build(); Set<Record> expected = Sets.newHashSet(); expected.addAll(file1Records); expected.addAll(file2Records); expected.addAll(file3Records); Set<Record> records = Sets.newHashSet(results); Assert.assertEquals("Should produce correct number of records", expected.size(), records.size()); Assert.assertEquals("Random record set should match", Sets.newHashSet(expected), records); } @Test public void testFilter() { Iterable<Record> result = IcebergGenerics.read(sharedTable).where(lessThan("id", 3)).build(); Assert.assertEquals("Records should match file 1", Sets.newHashSet(file1Records), Sets.newHashSet(result)); result = IcebergGenerics.read(sharedTable).where(lessThanOrEqual("id", 1)).build(); Assert.assertEquals("Records should match file 1 without id 2", Sets.newHashSet(filter(file1Records, r -> (Long) r.getField("id") <= 1)), Sets.newHashSet(result)); } @Test public void testProject() { Iterable<Record> results = IcebergGenerics.read(sharedTable).select("id").build(); Set<Long> expected = Sets.newHashSet(); expected.addAll(Lists.transform(file1Records, record -> (Long) record.getField("id"))); expected.addAll(Lists.transform(file2Records, record -> (Long) record.getField("id"))); expected.addAll(Lists.transform(file3Records, record -> (Long) record.getField("id"))); results.forEach(record -> Assert.assertEquals("Record should have one projected field", 1, record.size())); Assert.assertEquals("Should project only id columns", expected, Sets.newHashSet(transform(results, record -> (Long) record.getField("id")))); } @Test public void testProjectWithMissingFilterColumn() { Iterable<Record> results = IcebergGenerics.read(sharedTable) .where(Expressions.greaterThanOrEqual("id", 1)) .where(Expressions.lessThan("id", 21)) .select("data").build(); Set<String> expected = Sets.newHashSet(); for (Record record : concat(file1Records, file2Records, file3Records)) { Long id = (Long) record.getField("id"); if (id >= 1 && id < 21) { expected.add(record.getField("data").toString()); } } results.forEach(record -> Assert.assertEquals("Record should have two projected fields", 2, record.size())); Assert.assertEquals("Should project correct rows", expected, Sets.newHashSet(transform(results, record -> record.getField("data").toString()))); } private InputFile writeFile(String location, String filename, List<Record> records) throws IOException { Path path = new Path(location, filename); FileFormat format = FileFormat.fromFileName(filename); Preconditions.checkNotNull(format, "Cannot determine format for file: %s", filename); switch (format) { case AVRO: try (FileAppender<Record> appender = Avro.write(fromPath(path, CONF)) .schema(SCHEMA) .createWriterFunc(DataWriter::create) .named(format.name()) .build()) { appender.addAll(records); } return HadoopInputFile.fromPath(path, CONF); case PARQUET: try (FileAppender<Record> appender = Parquet.write(fromPath(path, CONF)) .schema(SCHEMA) .createWriterFunc(GenericParquetWriter::buildWriter) .build()) { appender.addAll(records); } return HadoopInputFile.fromPath(path, CONF); default: throw new UnsupportedOperationException("Cannot write format: " + format); } } private static ByteBuffer longToBuffer(long value) { return ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN).putLong(0, value); } }
6,556
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/TestReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Comparators; import com.netflix.iceberg.types.Types; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.IOException; import java.util.List; import java.util.Map; public abstract class TestReadProjection { protected abstract Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException; @Rule public TemporaryFolder temp = new TemporaryFolder(); @Test public void testFullProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = GenericRecord.create(schema.asStruct()); record.setField("id", 34L); record.setField("data", "test"); Record projected = writeAndRead("full_projection", schema, schema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.getField("data")); Assert.assertTrue("Should contain the correct data value", cmp == 0); } @Test public void testReorderedFullProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = GenericRecord.create(schema.asStruct()); record.setField("id", 34L); record.setField("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("full_projection", schema, reordered, record); Assert.assertEquals("Should contain the correct 0 value", "test", projected.get(0).toString()); Assert.assertEquals("Should contain the correct 1 value", 34L, projected.get(1)); } @Test public void testReorderedProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = GenericRecord.create(schema.asStruct()); record.setField("id", 34L); record.setField("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(2, "missing_1", Types.StringType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.optional(3, "missing_2", Types.LongType.get()) ); Record projected = writeAndRead("full_projection", schema, reordered, record); Assert.assertNull("Should contain the correct 0 value", projected.get(0)); Assert.assertEquals("Should contain the correct 1 value", "test", projected.get(1).toString()); Assert.assertNull("Should contain the correct 2 value", projected.get(2)); } @Test public void testEmptyProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = GenericRecord.create(schema.asStruct()); record.setField("id", 34L); record.setField("data", "test"); Record projected = writeAndRead("empty_projection", schema, schema.select(), record); Assert.assertNotNull("Should read a non-null record", projected); try { projected.get(0); Assert.fail("Should not retrieve value with ordinal 0"); } catch (ArrayIndexOutOfBoundsException e) { // this is expected because there are no values } } @Test public void testBasicProjection() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); record.setField("data", "test"); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("basic_projection_id", writeSchema, idOnly, record); Assert.assertNull("Should not project data", projected.getField("data")); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); Schema dataOnly = new Schema( Types.NestedField.optional(1, "data", Types.StringType.get()) ); projected = writeAndRead("basic_projection_data", writeSchema, dataOnly, record); Assert.assertNull("Should not project id", projected.getField("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.getField("data")); Assert.assertTrue("Should contain the correct data value", cmp == 0); } @Test public void testRename() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); record.setField("data", "test"); Schema readSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "renamed", Types.StringType.get()) ); Record projected = writeAndRead("project_and_rename", writeSchema, readSchema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.getField("renamed")); Assert.assertTrue("Should contain the correct data/renamed value", cmp == 0); } @Test public void testNestedStructProjection() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()), Types.NestedField.required(2, "long", Types.FloatType.get()) )) ); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); Record location = GenericRecord.create(writeSchema.findType("location").asStructType()); location.setField("lat", 52.995143f); location.setField("long", -1.539054f); record.setField("location", location); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Record projectedLocation = (Record) projected.getField("location"); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); Assert.assertNull("Should not project location", projectedLocation); Schema latOnly = new Schema( Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()) )) ); projected = writeAndRead("latitude_only", writeSchema, latOnly, record); projectedLocation = (Record) projected.getField("location"); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertNotNull("Should project location", projected.getField("location")); Assert.assertNull("Should not project longitude", projectedLocation.getField("long")); Assert.assertEquals("Should project latitude", 52.995143f, (float) projectedLocation.getField("lat"), 0.000001f); Schema longOnly = new Schema( Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(2, "long", Types.FloatType.get()) )) ); projected = writeAndRead("longitude_only", writeSchema, longOnly, record); projectedLocation = (Record) projected.getField("location"); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertNotNull("Should project location", projected.getField("location")); Assert.assertNull("Should not project latitutde", projectedLocation.getField("lat")); Assert.assertEquals("Should project longitude", -1.539054f, (float) projectedLocation.getField("long"), 0.000001f); Schema locationOnly = writeSchema.select("location"); projected = writeAndRead("location_only", writeSchema, locationOnly, record); projectedLocation = (Record) projected.getField("location"); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertNotNull("Should project location", projected.getField("location")); Assert.assertEquals("Should project latitude", 52.995143f, (float) projectedLocation.getField("lat"), 0.000001f); Assert.assertEquals("Should project longitude", -1.539054f, (float) projectedLocation.getField("long"), 0.000001f); } @Test public void testMapProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(5, "properties", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StringType.get())) ); Map<String, String> properties = ImmutableMap.of("a", "A", "b", "B"); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); record.setField("properties", properties); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); Assert.assertNull("Should not project properties map", projected.getField("properties")); Schema keyOnly = writeSchema.select("properties.key"); projected = writeAndRead("key_only", writeSchema, keyOnly, record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.getField("properties"))); Schema valueOnly = writeSchema.select("properties.value"); projected = writeAndRead("value_only", writeSchema, valueOnly, record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.getField("properties"))); Schema mapOnly = writeSchema.select("properties"); projected = writeAndRead("map_only", writeSchema, mapOnly, record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.getField("properties"))); } private Map<String, ?> toStringMap(Map<?, ?> map) { Map<String, Object> stringMap = Maps.newHashMap(); for (Map.Entry<?, ?> entry : map.entrySet()) { if (entry.getValue() instanceof CharSequence) { stringMap.put(entry.getKey().toString(), entry.getValue().toString()); } else { stringMap.put(entry.getKey().toString(), entry.getValue()); } } return stringMap; } @Test public void testMapOfStructsProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()), Types.NestedField.required(2, "long", Types.FloatType.get()) ) )) ); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); Record l1 = GenericRecord.create(writeSchema.findType("locations").asMapType().valueType().asStructType()); l1.setField("lat", 53.992811f); l1.setField("long", -1.542616f); Record l2 = GenericRecord.create(writeSchema.findType("locations").asMapType().valueType().asStructType()); l2.setField("lat", 52.995143f); l2.setField("long", -1.539054f); record.setField("locations", ImmutableMap.of("L1", l1, "L2", l2)); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); Assert.assertNull("Should not project locations map", projected.getField("locations")); projected = writeAndRead("all_locations", writeSchema, writeSchema.select("locations"), record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project locations map", record.getField("locations"), toStringMap((Map) projected.getField("locations"))); projected = writeAndRead("lat_only", writeSchema, writeSchema.select("locations.lat"), record); Assert.assertNull("Should not project id", projected.getField("id")); Map<String, ?> locations = toStringMap((Map) projected.getField("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); Record projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertEquals("L1 should contain lat", 53.992811f, (float) projectedL1.getField("lat"), 0.000001); Assert.assertNull("L1 should not contain long", projectedL1.getField("long")); Record projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertEquals("L2 should contain lat", 52.995143f, (float) projectedL2.getField("lat"), 0.000001); Assert.assertNull("L2 should not contain long", projectedL2.getField("long")); projected = writeAndRead("long_only", writeSchema, writeSchema.select("locations.long"), record); Assert.assertNull("Should not project id", projected.getField("id")); locations = toStringMap((Map) projected.getField("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertNull("L1 should not contain lat", projectedL1.getField("lat")); Assert.assertEquals("L1 should contain long", -1.542616f, (float) projectedL1.getField("long"), 0.000001); projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertNull("L2 should not contain lat", projectedL2.getField("lat")); Assert.assertEquals("L2 should contain long", -1.539054f, (float) projectedL2.getField("long"), 0.000001); Schema latitiudeRenamed = new Schema( Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StructType.of( Types.NestedField.required(1, "latitude", Types.FloatType.get()) ) )) ); projected = writeAndRead("latitude_renamed", writeSchema, latitiudeRenamed, record); Assert.assertNull("Should not project id", projected.getField("id")); locations = toStringMap((Map) projected.getField("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertEquals("L1 should contain latitude", 53.992811f, (float) projectedL1.getField("latitude"), 0.000001); Assert.assertNull("L1 should not contain lat", projectedL1.getField("lat")); Assert.assertNull("L1 should not contain long", projectedL1.getField("long")); projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertEquals("L2 should contain latitude", 52.995143f, (float) projectedL2.getField("latitude"), 0.000001); Assert.assertNull("L2 should not contain lat", projectedL2.getField("lat")); Assert.assertNull("L2 should not contain long", projectedL2.getField("long")); } @Test public void testListProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(10, "values", Types.ListType.ofOptional(11, Types.LongType.get())) ); List<Long> values = ImmutableList.of(56L, 57L, 58L); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); record.setField("values", values); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); Assert.assertNull("Should not project values list", projected.getField("values")); Schema elementOnly = writeSchema.select("values.element"); projected = writeAndRead("element_only", writeSchema, elementOnly, record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project entire list", values, projected.getField("values")); Schema listOnly = writeSchema.select("values"); projected = writeAndRead("list_only", writeSchema, listOnly, record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project entire list", values, projected.getField("values")); } @Test @SuppressWarnings("unchecked") public void testListOfStructsProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.required(19, "x", Types.IntegerType.get()), Types.NestedField.optional(18, "y", Types.IntegerType.get()) )) ) ); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); Record p1 = GenericRecord.create(writeSchema.findType("points").asListType().elementType().asStructType()); p1.setField("x", 1); p1.setField("y", 2); Record p2 = GenericRecord.create(writeSchema.findType("points").asListType().elementType().asStructType()); p2.setField("x", 3); p2.setField("y", null); record.setField("points", ImmutableList.of(p1, p2)); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); Assert.assertNull("Should not project points list", projected.getField("points")); projected = writeAndRead("all_points", writeSchema, writeSchema.select("points"), record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project points list", record.getField("points"), projected.getField("points")); projected = writeAndRead("x_only", writeSchema, writeSchema.select("points.x"), record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertNotNull("Should project points list", projected.getField("points")); List<Record> points = (List<Record>) projected.getField("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); Record projectedP1 = points.get(0); Assert.assertEquals("Should project x", 1, (int) projectedP1.getField("x")); Assert.assertNull("Should not project y", projectedP1.getField("y")); Record projectedP2 = points.get(1); Assert.assertEquals("Should project x", 3, (int) projectedP2.getField("x")); Assert.assertNull("Should not project y", projectedP2.getField("y")); projected = writeAndRead("y_only", writeSchema, writeSchema.select("points.y"), record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertNotNull("Should project points list", projected.getField("points")); points = (List<Record>) projected.getField("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertNull("Should not project x", projectedP1.getField("x")); Assert.assertEquals("Should project y", 2, (int) projectedP1.getField("y")); projectedP2 = points.get(1); Assert.assertNull("Should not project x", projectedP2.getField("x")); Assert.assertEquals("Should project null y", null, projectedP2.getField("y")); Schema yRenamed = new Schema( Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.optional(18, "z", Types.IntegerType.get()) )) ) ); projected = writeAndRead("y_renamed", writeSchema, yRenamed, record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertNotNull("Should project points list", projected.getField("points")); points = (List<Record>) projected.getField("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertNull("Should not project x", projectedP1.getField("x")); Assert.assertNull("Should not project y", projectedP1.getField("y")); Assert.assertEquals("Should project z", 2, (int) projectedP1.getField("z")); projectedP2 = points.get(1); Assert.assertNull("Should not project x", projectedP2.getField("x")); Assert.assertNull("Should not project y", projectedP2.getField("y")); Assert.assertEquals("Should project null z", null, projectedP2.getField("z")); } }
6,557
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/RandomGenericData.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.google.common.base.Charsets; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.time.Instant; import java.time.LocalDate; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.function.Supplier; import static java.time.temporal.ChronoUnit.MICROS; public class RandomGenericData { public static List<Record> generate(Schema schema, int numRecords, long seed) { RandomDataGenerator generator = new RandomDataGenerator(seed); List<Record> records = Lists.newArrayListWithExpectedSize(numRecords); for (int i = 0; i < numRecords; i += 1) { records.add((Record) TypeUtil.visit(schema, generator)); } return records; } private static class RandomDataGenerator extends TypeUtil.CustomOrderSchemaVisitor<Object> { private final Random random; private RandomDataGenerator(long seed) { this.random = new Random(seed); } @Override public Record schema(Schema schema, Supplier<Object> structResult) { return (Record) structResult.get(); } @Override public Record struct(Types.StructType struct, Iterable<Object> fieldResults) { Record rec = GenericRecord.create(struct); List<Object> values = Lists.newArrayList(fieldResults); for (int i = 0; i < values.size(); i += 1) { rec.set(i, values.get(i)); } return rec; } @Override public Object field(Types.NestedField field, Supplier<Object> fieldResult) { // return null 5% of the time when the value is optional if (field.isOptional() && random.nextInt(20) == 1) { return null; } return fieldResult.get(); } @Override public Object list(Types.ListType list, Supplier<Object> elementResult) { int numElements = random.nextInt(20); List<Object> result = Lists.newArrayListWithExpectedSize(numElements); for (int i = 0; i < numElements; i += 1) { // return null 5% of the time when the value is optional if (list.isElementOptional() && random.nextInt(20) == 1) { result.add(null); } else { result.add(elementResult.get()); } } return result; } @Override public Object map(Types.MapType map, Supplier<Object> keyResult, Supplier<Object> valueResult) { int numEntries = random.nextInt(20); Map<Object, Object> result = Maps.newLinkedHashMap(); Supplier<Object> keyFunc; if (map.keyType() == Types.StringType.get()) { keyFunc = () -> keyResult.get().toString(); } else { keyFunc = keyResult; } Set<Object> keySet = Sets.newHashSet(); for (int i = 0; i < numEntries; i += 1) { Object key = keyFunc.get(); // ensure no collisions while (keySet.contains(key)) { key = keyFunc.get(); } keySet.add(key); // return null 5% of the time when the value is optional if (map.isValueOptional() && random.nextInt(20) == 1) { result.put(key, null); } else { result.put(key, valueResult.get()); } } return result; } @Override public Object primitive(Type.PrimitiveType primitive) { Object result = generatePrimitive(primitive, random); switch (primitive.typeId()) { case BINARY: return ByteBuffer.wrap((byte[]) result); case UUID: return UUID.nameUUIDFromBytes((byte[]) result); default: return result; } } } private static Object generatePrimitive(Type.PrimitiveType primitive, Random random) { int choice = random.nextInt(20); switch (primitive.typeId()) { case BOOLEAN: return choice < 10; case INTEGER: switch (choice) { case 1: return Integer.MIN_VALUE; case 2: return Integer.MAX_VALUE; case 3: return 0; default: return random.nextInt(); } case LONG: switch (choice) { case 1: return Long.MIN_VALUE; case 2: return Long.MAX_VALUE; case 3: return 0L; default: return random.nextLong(); } case FLOAT: switch (choice) { case 1: return Float.MIN_VALUE; case 2: return -Float.MIN_VALUE; case 3: return Float.MAX_VALUE; case 4: return -Float.MAX_VALUE; case 5: return Float.NEGATIVE_INFINITY; case 6: return Float.POSITIVE_INFINITY; case 7: return 0.0F; case 8: return Float.NaN; default: return random.nextFloat(); } case DOUBLE: switch (choice) { case 1: return Double.MIN_VALUE; case 2: return -Double.MIN_VALUE; case 3: return Double.MAX_VALUE; case 4: return -Double.MAX_VALUE; case 5: return Double.NEGATIVE_INFINITY; case 6: return Double.POSITIVE_INFINITY; case 7: return 0.0D; case 8: return Double.NaN; default: return random.nextDouble(); } case DATE: // this will include negative values (dates before 1970-01-01) return EPOCH_DAY.plusDays(random.nextInt() % ABOUT_380_YEARS_IN_DAYS); case TIME: return LocalTime.ofNanoOfDay( ((random.nextLong() & Integer.MAX_VALUE) % ONE_DAY_IN_MICROS) * 1000); case TIMESTAMP: Types.TimestampType ts = (Types.TimestampType) primitive; if (ts.shouldAdjustToUTC()) { return EPOCH.plus(random.nextLong() % FIFTY_YEARS_IN_MICROS, MICROS); } else { return EPOCH.plus(random.nextLong() % FIFTY_YEARS_IN_MICROS, MICROS).toLocalDateTime(); } case STRING: return randomString(random); case UUID: byte[] uuidBytes = new byte[16]; random.nextBytes(uuidBytes); // this will hash the uuidBytes return uuidBytes; case FIXED: byte[] fixed = new byte[((Types.FixedType) primitive).length()]; random.nextBytes(fixed); return fixed; case BINARY: byte[] binary = new byte[random.nextInt(50)]; random.nextBytes(binary); return binary; case DECIMAL: Types.DecimalType type = (Types.DecimalType) primitive; BigInteger unscaled = randomUnscaled(type.precision(), random); return new BigDecimal(unscaled, type.scale()); default: throw new IllegalArgumentException( "Cannot generate random value for unknown type: " + primitive); } } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); private static final long FIFTY_YEARS_IN_MICROS = (50L * (365 * 3 + 366) * 24 * 60 * 60 * 1_000_000) / 4; private static final int ABOUT_380_YEARS_IN_DAYS = 380 * 365; private static final long ONE_DAY_IN_MICROS = 24 * 60 * 60 * 1_000_000L; private static final String CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.!?"; private static String randomString(Random random) { int length = random.nextInt(50); byte[] buffer = new byte[length]; for (int i = 0; i < length; i += 1) { buffer[i] = (byte) CHARS.charAt(random.nextInt(CHARS.length())); } return new String(buffer, Charsets.UTF_8); } private static final String DIGITS = "0123456789"; private static BigInteger randomUnscaled(int precision, Random random) { int length = random.nextInt(precision); if (length == 0) { return BigInteger.ZERO; } StringBuilder sb = new StringBuilder(); for (int i = 0; i < length; i += 1) { sb.append(DIGITS.charAt(random.nextInt(DIGITS.length()))); } return new BigInteger(sb.toString()); } }
6,558
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/DataTest.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.ListType; import com.netflix.iceberg.types.Types.LongType; import com.netflix.iceberg.types.Types.MapType; import com.netflix.iceberg.types.Types.StructType; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.IOException; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public abstract class DataTest { protected abstract void writeAndValidate(Schema schema) throws IOException; private static final StructType SUPPORTED_PRIMITIVES = StructType.of( required(100, "id", LongType.get()), optional(101, "data", Types.StringType.get()), required(102, "b", Types.BooleanType.get()), optional(103, "i", Types.IntegerType.get()), required(104, "l", LongType.get()), optional(105, "f", Types.FloatType.get()), required(106, "d", Types.DoubleType.get()), optional(107, "date", Types.DateType.get()), required(108, "ts", Types.TimestampType.withZone()), required(110, "s", Types.StringType.get()), required(112, "fixed", Types.FixedType.ofLength(7)), optional(113, "bytes", Types.BinaryType.get()), required(114, "dec_9_0", Types.DecimalType.of(9, 0)), required(115, "dec_11_2", Types.DecimalType.of(11, 2)), required(116, "dec_38_10", Types.DecimalType.of(38, 10)) // maximum precision ); @Rule public TemporaryFolder temp = new TemporaryFolder(); @Test public void testSimpleStruct() throws IOException { writeAndValidate(new Schema(SUPPORTED_PRIMITIVES.fields())); } @Test public void testArray() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", ListType.ofOptional(2, Types.StringType.get()))); writeAndValidate(schema); } @Test public void testArrayOfStructs() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", ListType.ofOptional(2, SUPPORTED_PRIMITIVES))); writeAndValidate(schema); } @Test public void testMap() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, Types.StringType.get(), Types.StringType.get()))); writeAndValidate(schema); } @Test public void testNumericMapKey() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, LongType.get(), Types.StringType.get()))); writeAndValidate(schema); } @Test public void testComplexMapKey() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, StructType.of( required(4, "i", Types.IntegerType.get()), optional(5, "s", Types.StringType.get())), Types.StringType.get()))); writeAndValidate(schema); } @Test public void testMapOfStructs() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, Types.StringType.get(), SUPPORTED_PRIMITIVES))); writeAndValidate(schema); } @Test public void testMixedTypes() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "list_of_maps", ListType.ofOptional(2, MapType.ofOptional(3, 4, Types.StringType.get(), SUPPORTED_PRIMITIVES))), optional(5, "map_of_lists", MapType.ofOptional(6, 7, Types.StringType.get(), ListType.ofOptional(8, SUPPORTED_PRIMITIVES))), required(9, "list_of_lists", ListType.ofOptional(10, ListType.ofOptional(11, SUPPORTED_PRIMITIVES))), required(12, "map_of_maps", MapType.ofOptional(13, 14, Types.StringType.get(), MapType.ofOptional(15, 16, Types.StringType.get(), SUPPORTED_PRIMITIVES))), required(17, "list_of_struct_of_nested_types", ListType.ofOptional(19, StructType.of( Types.NestedField.required(20, "m1", MapType.ofOptional(21, 22, Types.StringType.get(), SUPPORTED_PRIMITIVES)), Types.NestedField.optional(23, "l1", ListType.ofRequired(24, SUPPORTED_PRIMITIVES)), Types.NestedField.required(25, "l2", ListType.ofRequired(26, SUPPORTED_PRIMITIVES)), Types.NestedField.optional(27, "m2", MapType.ofOptional(28, 29, Types.StringType.get(), SUPPORTED_PRIMITIVES)) ))) ); writeAndValidate(schema); } }
6,559
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/avro/TestGenericReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.collect.Iterables; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.data.TestReadProjection; import com.netflix.iceberg.io.FileAppender; import java.io.File; import java.io.IOException; public class TestGenericReadProjection extends TestReadProjection { protected Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException { File file = temp.newFile(desc + ".avro"); file.delete(); try (FileAppender<Record> appender = Avro.write(Files.localOutput(file)) .schema(writeSchema) .createWriterFunc(DataWriter::create) .build()) { appender.add(record); } Iterable<Record> records = Avro.read(Files.localInput(file)) .project(readSchema) .createReaderFunc(DataReader::create) .build(); return Iterables.getOnlyElement(records); } }
6,560
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/avro/TestSingleMessageEncoding.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.collect.Ordering; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.data.GenericRecord; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.types.Types; import org.apache.avro.AvroRuntimeException; import org.apache.avro.message.BadHeaderException; import org.apache.avro.message.MessageDecoder; import org.apache.avro.message.MessageEncoder; import org.apache.avro.message.MissingSchemaException; import org.apache.avro.message.SchemaStore; import org.junit.Assert; import org.junit.Test; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; import java.util.Set; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestSingleMessageEncoding { private static final Schema SCHEMA_V1 = new Schema( required(0, "id", Types.IntegerType.get()), optional(1, "msg", Types.StringType.get()) ); private static Record v1Record(int id, String msg) { Record rec = GenericRecord.create(SCHEMA_V1.asStruct()); rec.setField("id", id); rec.setField("msg", msg); return rec; } private static final List<Record> V1_RECORDS = Arrays.asList( v1Record(1, "m-1"), v1Record(2, "m-2"), v1Record(4, "m-4"), v1Record(6, "m-6") ); private static final Schema SCHEMA_V2 = new Schema( required(0, "id", Types.LongType.get()), optional(1, "message", Types.StringType.get()), optional(2, "data", Types.DoubleType.get()) ); private static Record v2Record(long id, String message, Double data) { Record rec = GenericRecord.create(SCHEMA_V2.asStruct()); rec.setField("id", id); rec.setField("message", message); rec.setField("data", data); return rec; } private static final List<Record> V2_RECORDS = Arrays.asList( v2Record(3L, "m-3", 12.3), v2Record(5L, "m-5", 23.4), v2Record(7L, "m-7", 34.5), v2Record(8L, "m-8", 35.6) ); @Test public void testByteBufferRoundTrip() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V2); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V2); Record copy = decoder.decode(encoder.encode(V2_RECORDS.get(0))); Assert.assertTrue("Copy should not be the same object", copy != V2_RECORDS.get(0)); Assert.assertEquals("Record should be identical after round-trip", V2_RECORDS.get(0), copy); } @Test public void testSchemaEvolution() throws Exception { List<ByteBuffer> buffers = Lists.newArrayList(); List<Record> records = Ordering.usingToString().sortedCopy( Iterables.concat(V1_RECORDS, V2_RECORDS)); MessageEncoder<Record> v1Encoder = new IcebergEncoder<>(SCHEMA_V1); MessageEncoder<Record> v2Encoder = new IcebergEncoder<>(SCHEMA_V2); for (Record record : records) { if (record.struct() == SCHEMA_V1.asStruct()) { buffers.add(v1Encoder.encode(record)); } else { buffers.add(v2Encoder.encode(record)); } } Set<Record> allAsV2 = Sets.newHashSet(V2_RECORDS); allAsV2.add(v2Record(1L, "m-1", null)); allAsV2.add(v2Record(2L, "m-2", null)); allAsV2.add(v2Record(4L, "m-4", null)); allAsV2.add(v2Record(6L, "m-6", null)); IcebergDecoder<Record> v2Decoder = new IcebergDecoder<>(SCHEMA_V2); v2Decoder.addSchema(SCHEMA_V1); Set<Record> decodedUsingV2 = Sets.newHashSet(); for (ByteBuffer buffer : buffers) { decodedUsingV2.add(v2Decoder.decode(buffer)); } Assert.assertEquals(allAsV2, decodedUsingV2); } @Test(expected = MissingSchemaException.class) public void testCompatibleReadFailsWithoutSchema() throws Exception { MessageEncoder<Record> v1Encoder = new IcebergEncoder<>(SCHEMA_V1); MessageDecoder<Record> v2Decoder = new IcebergDecoder<>(SCHEMA_V2); ByteBuffer v1Buffer = v1Encoder.encode(V1_RECORDS.get(3)); v2Decoder.decode(v1Buffer); } @Test public void testCompatibleReadWithSchema() throws Exception { MessageEncoder<Record> v1Encoder = new IcebergEncoder<>(SCHEMA_V1); IcebergDecoder<Record> v2Decoder = new IcebergDecoder<>(SCHEMA_V2); v2Decoder.addSchema(SCHEMA_V1); ByteBuffer v1Buffer = v1Encoder.encode(V1_RECORDS.get(3)); Record record = v2Decoder.decode(v1Buffer); Assert.assertEquals(v2Record(6L, "m-6", null), record); } @Test public void testCompatibleReadWithSchemaFromLookup() throws Exception { MessageEncoder<Record> v1Encoder = new IcebergEncoder<>(SCHEMA_V1); SchemaStore.Cache schemaCache = new SchemaStore.Cache(); schemaCache.addSchema(AvroSchemaUtil.convert(SCHEMA_V1, "table")); IcebergDecoder<Record> v2Decoder = new IcebergDecoder<>(SCHEMA_V2, schemaCache); ByteBuffer v1Buffer = v1Encoder.encode(V1_RECORDS.get(2)); Record record = v2Decoder.decode(v1Buffer); Assert.assertEquals(v2Record(4L, "m-4", null), record); } @Test public void testBufferReuse() throws Exception { // This test depends on the serialized version of record 1 being smaller or // the same size as record 0 so that the reused ByteArrayOutputStream won't // expand its internal buffer. MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V1, false); ByteBuffer b0 = encoder.encode(V1_RECORDS.get(0)); ByteBuffer b1 = encoder.encode(V1_RECORDS.get(1)); Assert.assertEquals(b0.array(), b1.array()); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V1); Assert.assertEquals("Buffer was reused, decode(b0) should be record 1", V1_RECORDS.get(1), decoder.decode(b0)); } @Test public void testBufferCopy() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V1); ByteBuffer b0 = encoder.encode(V1_RECORDS.get(0)); ByteBuffer b1 = encoder.encode(V1_RECORDS.get(1)); Assert.assertNotEquals(b0.array(), b1.array()); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V1); // bytes are not changed by reusing the encoder Assert.assertEquals("Buffer was copied, decode(b0) should be record 0", V1_RECORDS.get(0), decoder.decode(b0)); } @Test(expected = AvroRuntimeException.class) public void testByteBufferMissingPayload() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V2); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V2); ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0)); buffer.limit(12); decoder.decode(buffer); } @Test(expected = BadHeaderException.class) public void testByteBufferMissingFullHeader() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V2); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V2); ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0)); buffer.limit(8); decoder.decode(buffer); } @Test(expected = BadHeaderException.class) public void testByteBufferBadMarkerByte() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V2); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V2); ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0)); buffer.array()[0] = 0x00; decoder.decode(buffer); } @Test(expected = BadHeaderException.class) public void testByteBufferBadVersionByte() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V2); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V2); ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0)); buffer.array()[1] = 0x00; decoder.decode(buffer); } @Test(expected = MissingSchemaException.class) public void testByteBufferUnknownSchema() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V2); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V2); ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0)); buffer.array()[4] = 0x00; decoder.decode(buffer); } }
6,561
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/avro/TestGenericData.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.collect.Lists; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.avro.AvroIterable; import com.netflix.iceberg.data.DataTest; import com.netflix.iceberg.data.DataTestHelpers; import com.netflix.iceberg.data.RandomGenericData; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.io.FileAppender; import org.junit.Assert; import java.io.File; import java.io.IOException; import java.util.List; public class TestGenericData extends DataTest { protected void writeAndValidate(Schema schema) throws IOException { List<Record> expected = RandomGenericData.generate(schema, 100, 0L); File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> writer = Avro.write(Files.localOutput(testFile)) .schema(schema) .createWriterFunc(DataWriter::create) .named("test") .build()) { for (Record rec : expected) { writer.add(rec); } } List<Record> rows; try (AvroIterable<Record> reader = Avro.read(Files.localInput(testFile)) .project(schema) .createReaderFunc(DataReader::create) .build()) { rows = Lists.newArrayList(reader); } for (int i = 0; i < expected.size(); i += 1) { DataTestHelpers.assertEquals(schema.asStruct(), expected.get(i), rows.get(i)); } } }
6,562
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/parquet/TestGenericReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.parquet; import com.google.common.collect.Iterables; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.data.TestReadProjection; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import java.io.File; import java.io.IOException; public class TestGenericReadProjection extends TestReadProjection { protected Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException { File file = temp.newFile(desc + ".parquet"); file.delete(); try (FileAppender<Record> appender = Parquet.write(Files.localOutput(file)) .schema(writeSchema) .createWriterFunc(GenericParquetWriter::buildWriter) .build()) { appender.add(record); } Iterable<Record> records = Parquet.read(Files.localInput(file)) .project(readSchema) .createReaderFunc(fileSchema -> GenericParquetReaders.buildReader(readSchema, fileSchema)) .build(); return Iterables.getOnlyElement(records); } }
6,563
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/parquet/TestGenericData.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.parquet; import com.google.common.collect.Lists; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.data.DataTest; import com.netflix.iceberg.data.DataTestHelpers; import com.netflix.iceberg.data.RandomGenericData; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import org.junit.Assert; import java.io.File; import java.io.IOException; import java.util.List; public class TestGenericData extends DataTest { protected void writeAndValidate(Schema schema) throws IOException { List<Record> expected = RandomGenericData.generate(schema, 100, 0L); File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> appender = Parquet.write(Files.localOutput(testFile)) .schema(schema) .createWriterFunc(GenericParquetWriter::buildWriter) .build()) { appender.addAll(expected); } List<Record> rows; try (CloseableIterable<Record> reader = Parquet.read(Files.localInput(testFile)) .project(schema) .createReaderFunc(fileSchema -> GenericParquetReaders.buildReader(schema, fileSchema)) .build()) { rows = Lists.newArrayList(reader); } for (int i = 0; i < expected.size(); i += 1) { DataTestHelpers.assertEquals(schema.asStruct(), expected.get(i), rows.get(i)); } } }
6,564
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/GenericRecord.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.google.common.base.Objects; import com.google.common.base.Preconditions; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import com.netflix.iceberg.StructLike; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import java.util.Arrays; import java.util.List; import java.util.Map; public class GenericRecord implements Record, StructLike { private static final LoadingCache<StructType, Map<String, Integer>> NAME_MAP_CACHE = CacheBuilder.newBuilder() .weakKeys() .build(new CacheLoader<StructType, Map<String, Integer>>() { @Override public Map<String, Integer> load(StructType struct) { Map<String, Integer> idToPos = Maps.newHashMap(); List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { idToPos.put(fields.get(i).name(), i); } return idToPos; } }); public static GenericRecord create(Schema schema) { return new GenericRecord(schema.asStruct()); } public static GenericRecord create(StructType struct) { return new GenericRecord(struct); } private final StructType struct; private final int size; private final Object[] values; private final Map<String, Integer> nameToPos; private GenericRecord(StructType struct) { this.struct = struct; this.size = struct.fields().size(); this.values = new Object[size]; this.nameToPos = NAME_MAP_CACHE.getUnchecked(struct); } private GenericRecord(GenericRecord toCopy) { this.struct = toCopy.struct; this.size = toCopy.size; this.values = Arrays.copyOf(toCopy.values, toCopy.values.length); this.nameToPos = toCopy.nameToPos; } private GenericRecord(GenericRecord toCopy, Map<String, Object> overwrite) { this.struct = toCopy.struct; this.size = toCopy.size; this.values = Arrays.copyOf(toCopy.values, toCopy.values.length); this.nameToPos = toCopy.nameToPos; for (Map.Entry<String, Object> entry : overwrite.entrySet()) { setField(entry.getKey(), entry.getValue()); } } @Override public StructType struct() { return struct; } @Override public Object getField(String name) { Integer pos = nameToPos.get(name); if (pos != null) { return values[pos]; } return null; } @Override public void setField(String name, Object value) { Integer pos = nameToPos.get(name); Preconditions.checkArgument(pos != null, "Cannot set unknown field named: " + name); values[pos] = value; } @Override public int size() { return size; } @Override public Object get(int pos) { return values[pos]; } @Override public <T> T get(int pos, Class<T> javaClass) { Object value = get(pos); if (javaClass.isInstance(value)) { return javaClass.cast(value); } else { throw new IllegalStateException("Not an instance of " + javaClass.getName() + ": " + value); } } @Override public <T> void set(int pos, T value) { values[pos] = value; } @Override public GenericRecord copy() { return new GenericRecord(this); } @Override public GenericRecord copy(Map<String, Object> overwriteValues) { return new GenericRecord(this, overwriteValues); } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Record("); for (int i = 0; i < values.length; i += 1) { if (i != 0) { sb.append(", "); } sb.append(values[i]); } sb.append(")"); return sb.toString(); } @Override public boolean equals(Object other) { if (this == other) { return true; } if (other == null || getClass() != other.getClass()) { return false; } GenericRecord that = (GenericRecord) other; return Arrays.deepEquals(this.values, that.values); } @Override public int hashCode() { return Objects.hashCode(values); } }
6,565
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/TableScanIterable.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.netflix.iceberg.CombinedScanTask; import com.netflix.iceberg.FileScanTask; import com.netflix.iceberg.HasTableOperations; import com.netflix.iceberg.Schema; import com.netflix.iceberg.TableOperations; import com.netflix.iceberg.TableScan; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.data.avro.DataReader; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.expressions.Binder; import com.netflix.iceberg.expressions.Evaluator; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.io.CloseableGroup; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.types.TypeUtil; import java.io.Closeable; import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; import java.util.Set; import static com.google.common.collect.Iterables.concat; import static com.google.common.collect.Iterables.filter; import static com.google.common.collect.Iterables.transform; import static com.netflix.iceberg.data.parquet.GenericParquetReaders.buildReader; import static java.util.Collections.emptyIterator; class TableScanIterable extends CloseableGroup implements CloseableIterable<Record> { private final TableOperations ops; private final Schema projection; private final boolean reuseContainers; private final CloseableIterable<CombinedScanTask> tasks; TableScanIterable(TableScan scan, boolean reuseContainers) { Preconditions.checkArgument(scan.table() instanceof HasTableOperations, "Cannot scan table that doesn't expose its TableOperations"); this.ops = ((HasTableOperations) scan.table()).operations(); this.projection = scan.schema(); this.reuseContainers = reuseContainers; // start planning tasks in the background this.tasks = scan.planTasks(); } @Override public Iterator<Record> iterator() { ScanIterator iter = new ScanIterator(tasks); addCloseable(iter); return iter; } private CloseableIterable<Record> open(FileScanTask task) { InputFile input = ops.io().newInputFile(task.file().path().toString()); // TODO: join to partition data from the manifest file switch (task.file().format()) { case AVRO: Avro.ReadBuilder avro = Avro.read(input) .project(projection) .createReaderFunc(DataReader::create) .split(task.start(), task.length()); if (reuseContainers) { avro.reuseContainers(); } return avro.build(); case PARQUET: Parquet.ReadBuilder parquet = Parquet.read(input) .project(projection) .createReaderFunc(fileSchema -> buildReader(projection, fileSchema)) .split(task.start(), task.length()); if (reuseContainers) { parquet.reuseContainers(); } return parquet.build(); default: throw new UnsupportedOperationException(String.format("Cannot read %s file: %s", task.file().format().name(), task.file().path())); } } @Override public void close() throws IOException { tasks.close(); // close manifests from scan planning super.close(); // close data files } private class ScanIterator implements Iterator<Record>, Closeable { private final Iterator<FileScanTask> tasks; private Closeable currentCloseable = null; private Iterator<Record> currentIterator = emptyIterator(); private ScanIterator(Iterable<CombinedScanTask> tasks) { this.tasks = Lists.newArrayList(concat(transform(tasks, CombinedScanTask::files))).iterator(); } @Override public boolean hasNext() { while (true) { if (currentIterator.hasNext()) { return true; } else if (tasks.hasNext()) { if (currentCloseable != null) { try { currentCloseable.close(); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to close task"); } } FileScanTask task = tasks.next(); CloseableIterable<Record> reader = open(task); this.currentCloseable = reader; if (task.residual() != null && task.residual() != Expressions.alwaysTrue()) { Evaluator filter = new Evaluator(projection.asStruct(), task.residual()); this.currentIterator = filter(reader, filter::eval).iterator(); } else { this.currentIterator = reader.iterator(); } } else { return false; } } } @Override public Record next() { if (!hasNext()) { throw new NoSuchElementException(); } return currentIterator.next(); } @Override public void close() throws IOException { if (currentCloseable != null) { currentCloseable.close(); } } } }
6,566
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/Record.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.netflix.iceberg.StructLike; import com.netflix.iceberg.types.Types.StructType; import java.util.Map; public interface Record extends StructLike { StructType struct(); Object getField(String name); void setField(String name, Object value); Object get(int pos); Record copy(); Record copy(Map<String, Object> overwriteValues); }
6,567
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/IcebergGenerics.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.google.common.collect.ImmutableList; import com.netflix.iceberg.Table; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.Expressions; import java.util.List; public class IcebergGenerics { private IcebergGenerics() { } /** * Returns a builder to configure a read of the given table that produces generic records. * * @param table an Iceberg table * @return a builder to configure the scan */ public static ScanBuilder read(Table table) { return new ScanBuilder(table); } public static class ScanBuilder { private final Table table; private Expression where = Expressions.alwaysTrue(); private List<String> columns = ImmutableList.of("*"); private boolean reuseContainers = false; public ScanBuilder(Table table) { this.table = table; } public ScanBuilder reuseContainers() { this.reuseContainers = true; return this; } public ScanBuilder where(Expression rowFilter) { this.where = Expressions.and(where, rowFilter); return this; } public ScanBuilder select(String... columns) { this.columns = ImmutableList.copyOf(columns); return this; } public Iterable<Record> build() { return new TableScanIterable(table.newScan().filter(where).select(columns), reuseContainers); } } }
6,568
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/avro/IcebergDecoder.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.collect.ImmutableMap; import com.google.common.collect.MapMaker; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.avro.ProjectionDatumReader; import org.apache.avro.AvroRuntimeException; import org.apache.avro.Schema; import org.apache.avro.SchemaNormalization; import org.apache.avro.io.BinaryDecoder; import org.apache.avro.io.DatumReader; import org.apache.avro.io.DecoderFactory; import org.apache.avro.message.BadHeaderException; import org.apache.avro.message.MessageDecoder; import org.apache.avro.message.MissingSchemaException; import org.apache.avro.message.SchemaStore; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.Map; public class IcebergDecoder<D> extends MessageDecoder.BaseDecoder<D> { private static final ThreadLocal<byte[]> HEADER_BUFFER = ThreadLocal.withInitial(() -> new byte[10]); private static final ThreadLocal<ByteBuffer> FP_BUFFER = ThreadLocal.withInitial(() -> { byte[] header = HEADER_BUFFER.get(); return ByteBuffer.wrap(header).order(ByteOrder.LITTLE_ENDIAN); }); private final com.netflix.iceberg.Schema readSchema; private final SchemaStore resolver; private final Map<Long, RawDecoder<D>> decoders = new MapMaker().makeMap(); /** * Creates a new decoder that constructs datum instances described by an * {@link com.netflix.iceberg.Schema Iceberg schema}. * <p> * The {@code readSchema} is as used the expected schema (read schema). Datum instances created * by this class will are described by the expected schema. * <p> * The schema used to decode incoming buffers is determined by the schema fingerprint encoded in * the message header. This class can decode messages that were encoded using the * {@code readSchema} and other schemas that are added using * {@link #addSchema(com.netflix.iceberg.Schema)}. * * @param readSchema the schema used to construct datum instances */ public IcebergDecoder(com.netflix.iceberg.Schema readSchema) { this(readSchema, null); } /** * Creates a new decoder that constructs datum instances described by an * {@link com.netflix.iceberg.Schema Iceberg schema}. * <p> * The {@code readSchema} is as used the expected schema (read schema). Datum instances created * by this class will are described by the expected schema. * <p> * The schema used to decode incoming buffers is determined by the schema fingerprint encoded in * the message header. This class can decode messages that were encoded using the * {@code readSchema} and other schemas that are added using * {@link #addSchema(com.netflix.iceberg.Schema)}. * <p> * Schemas may also be returned from an Avro {@link SchemaStore}. Avro Schemas from the store * must be compatible with Iceberg and should contain id properties and use only Iceberg types. * * @param readSchema the {@link Schema} used to construct datum instances * @param resolver a {@link SchemaStore} used to find schemas by fingerprint */ public IcebergDecoder(com.netflix.iceberg.Schema readSchema, SchemaStore resolver) { this.readSchema = readSchema; this.resolver = resolver; addSchema(this.readSchema); } /** * Adds an {@link com.netflix.iceberg.Schema Iceberg schema} that can be used to decode buffers. * * @param writeSchema a schema to use when decoding buffers */ public void addSchema(com.netflix.iceberg.Schema writeSchema) { addSchema(AvroSchemaUtil.convert(writeSchema, "table")); } private void addSchema(org.apache.avro.Schema writeSchema) { long fp = SchemaNormalization.parsingFingerprint64(writeSchema); decoders.put(fp, new RawDecoder<>(readSchema, writeSchema)); } private RawDecoder<D> getDecoder(long fp) { RawDecoder<D> decoder = decoders.get(fp); if (decoder != null) { return decoder; } if (resolver != null) { Schema writeSchema = resolver.findByFingerprint(fp); if (writeSchema != null) { addSchema(writeSchema); return decoders.get(fp); } } throw new MissingSchemaException( "Cannot resolve schema for fingerprint: " + fp); } @Override public D decode(InputStream stream, D reuse) throws IOException { byte[] header = HEADER_BUFFER.get(); try { if (!readFully(stream, header)) { throw new BadHeaderException("Not enough header bytes"); } } catch (IOException e) { throw new IOException("Failed to read header and fingerprint bytes", e); } if (IcebergEncoder.V1_HEADER[0] != header[0] || IcebergEncoder.V1_HEADER[1] != header[1]) { throw new BadHeaderException(String.format( "Unrecognized header bytes: 0x%02X 0x%02X", header[0], header[1])); } RawDecoder<D> decoder = getDecoder(FP_BUFFER.get().getLong(2)); return decoder.decode(stream, reuse); } private static class RawDecoder<D> extends MessageDecoder.BaseDecoder<D> { private static final ThreadLocal<BinaryDecoder> DECODER = new ThreadLocal<>(); private final DatumReader<D> reader; /** * Creates a new {@link MessageDecoder} that constructs datum instances described by the * {@link Schema readSchema}. * <p> * The {@code readSchema} is used for the expected schema and the {@code writeSchema} is the * schema used to decode buffers. The {@code writeSchema} must be the schema that was used to * encode all buffers decoded by this class. * * @param readSchema the schema used to construct datum instances * @param writeSchema the schema used to decode buffers */ private RawDecoder(com.netflix.iceberg.Schema readSchema, org.apache.avro.Schema writeSchema) { this.reader = new ProjectionDatumReader<>(DataReader::create, readSchema, ImmutableMap.of()); this.reader.setSchema(writeSchema); } @Override public D decode(InputStream stream, D reuse) { BinaryDecoder decoder = DecoderFactory.get().directBinaryDecoder(stream, DECODER.get()); DECODER.set(decoder); try { return reader.read(reuse, decoder); } catch (IOException e) { throw new AvroRuntimeException("Decoding datum failed", e); } } } /** * Reads a buffer from a stream, making multiple read calls if necessary. * * @param stream an InputStream to read from * @param bytes a buffer * @return true if the buffer is complete, false otherwise (stream ended) * @throws IOException if there is an error while reading */ private boolean readFully(InputStream stream, byte[] bytes) throws IOException { int pos = 0; int bytesRead; while ((bytes.length - pos) > 0 && (bytesRead = stream.read(bytes, pos, bytes.length - pos)) > 0) { pos += bytesRead; } return (pos == bytes.length); } }
6,569
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/avro/DataReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.collect.MapMaker; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.avro.AvroSchemaVisitor; import com.netflix.iceberg.avro.LogicalMap; import com.netflix.iceberg.avro.ValueReader; import com.netflix.iceberg.avro.ValueReaders; import com.netflix.iceberg.exceptions.RuntimeIOException; import org.apache.avro.LogicalType; import org.apache.avro.LogicalTypes; import org.apache.avro.Schema; import org.apache.avro.io.DatumReader; import org.apache.avro.io.Decoder; import org.apache.avro.io.DecoderFactory; import org.apache.avro.io.ResolvingDecoder; import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; public class DataReader<T> implements DatumReader<T> { private static final ThreadLocal<Map<Schema, Map<Schema, ResolvingDecoder>>> DECODER_CACHES = ThreadLocal.withInitial(() -> new MapMaker().weakKeys().makeMap()); public static <D> DataReader<D> create(Schema readSchema) { return new DataReader<>(readSchema); } private final Schema readSchema; private final ValueReader<T> reader; private Schema fileSchema = null; @SuppressWarnings("unchecked") private DataReader(Schema readSchema) { this.readSchema = readSchema; this.reader = (ValueReader<T>) AvroSchemaVisitor.visit(readSchema, new ReadBuilder()); } @Override public void setSchema(Schema fileSchema) { this.fileSchema = Schema.applyAliases(fileSchema, readSchema); } @Override public T read(T reuse, Decoder decoder) throws IOException { ResolvingDecoder resolver = resolve(decoder); T value = reader.read(resolver, reuse); resolver.drain(); return value; } private ResolvingDecoder resolve(Decoder decoder) throws IOException { Map<Schema, Map<Schema, ResolvingDecoder>> cache = DECODER_CACHES.get(); Map<Schema, ResolvingDecoder> fileSchemaToResolver = cache .computeIfAbsent(readSchema, k -> new HashMap<>()); ResolvingDecoder resolver = fileSchemaToResolver.get(fileSchema); if (resolver == null) { resolver = newResolver(); fileSchemaToResolver.put(fileSchema, resolver); } resolver.configure(decoder); return resolver; } private ResolvingDecoder newResolver() { try { return DecoderFactory.get().resolvingDecoder(fileSchema, readSchema, null); } catch (IOException e) { throw new RuntimeIOException(e); } } private static class ReadBuilder extends AvroSchemaVisitor<ValueReader<?>> { private ReadBuilder() { } @Override public ValueReader<?> record(Schema record, List<String> names, List<ValueReader<?>> fields) { return GenericReaders.struct(AvroSchemaUtil.convert(record).asStructType(), fields); } @Override public ValueReader<?> union(Schema union, List<ValueReader<?>> options) { return ValueReaders.union(options); } @Override public ValueReader<?> array(Schema array, ValueReader<?> elementReader) { if (array.getLogicalType() instanceof LogicalMap) { ValueReaders.StructReader<?> keyValueReader = (ValueReaders.StructReader) elementReader; ValueReader<?> keyReader = keyValueReader.reader(0); ValueReader<?> valueReader = keyValueReader.reader(1); return ValueReaders.arrayMap(keyReader, valueReader); } return ValueReaders.array(elementReader); } @Override public ValueReader<?> map(Schema map, ValueReader<?> valueReader) { return ValueReaders.map(ValueReaders.strings(), valueReader); } @Override public ValueReader<?> primitive(Schema primitive) { LogicalType logicalType = primitive.getLogicalType(); if (logicalType != null) { switch (logicalType.getName()) { case "date": return GenericReaders.dates(); case "time-micros": return GenericReaders.times(); case "timestamp-micros": if (AvroSchemaUtil.isTimestamptz(primitive)) { return GenericReaders.timestamptz(); } return GenericReaders.timestamps(); case "decimal": ValueReader<byte[]> inner; switch (primitive.getType()) { case FIXED: inner = ValueReaders.fixed(primitive.getFixedSize()); break; case BYTES: inner = ValueReaders.bytes(); break; default: throw new IllegalArgumentException( "Invalid primitive type for decimal: " + primitive.getType()); } LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; return ValueReaders.decimal(inner, decimal.getScale()); case "uuid": return ValueReaders.uuids(); default: throw new IllegalArgumentException("Unknown logical type: " + logicalType); } } switch (primitive.getType()) { case NULL: return ValueReaders.nulls(); case BOOLEAN: return ValueReaders.booleans(); case INT: return ValueReaders.ints(); case LONG: return ValueReaders.longs(); case FLOAT: return ValueReaders.floats(); case DOUBLE: return ValueReaders.doubles(); case STRING: // might want to use a binary-backed container like Utf8 return ValueReaders.strings(); case FIXED: return ValueReaders.fixed(primitive.getFixedSize()); case BYTES: return ValueReaders.byteBuffers(); default: throw new IllegalArgumentException("Unsupported type: " + primitive); } } } }
6,570
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/avro/GenericWriters.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.netflix.iceberg.avro.ValueWriter; import com.netflix.iceberg.avro.ValueWriters; import com.netflix.iceberg.data.Record; import org.apache.avro.io.Encoder; import java.io.IOException; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.List; class GenericWriters { private GenericWriters() { } static ValueWriter<LocalDate> dates() { return DateWriter.INSTANCE; } static ValueWriter<LocalTime> times() { return TimeWriter.INSTANCE; } static ValueWriter<LocalDateTime> timestamps() { return TimestampWriter.INSTANCE; } static ValueWriter<OffsetDateTime> timestamptz() { return TimestamptzWriter.INSTANCE; } static ValueWriter<Record> struct(List<ValueWriter<?>> writers) { return new GenericRecordWriter(writers); } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); private static class DateWriter implements ValueWriter<LocalDate> { private static final DateWriter INSTANCE = new DateWriter(); private DateWriter() { } @Override public void write(LocalDate date, Encoder encoder) throws IOException { encoder.writeInt((int) ChronoUnit.DAYS.between(EPOCH_DAY, date)); } } private static class TimeWriter implements ValueWriter<LocalTime> { private static final TimeWriter INSTANCE = new TimeWriter(); private TimeWriter() { } @Override public void write(LocalTime time, Encoder encoder) throws IOException { encoder.writeLong(time.toNanoOfDay() / 1000); } } private static class TimestampWriter implements ValueWriter<LocalDateTime> { private static final TimestampWriter INSTANCE = new TimestampWriter(); private TimestampWriter() { } @Override public void write(LocalDateTime timestamp, Encoder encoder) throws IOException { encoder.writeLong(ChronoUnit.MICROS.between(EPOCH, timestamp.atOffset(ZoneOffset.UTC))); } } private static class TimestamptzWriter implements ValueWriter<OffsetDateTime> { private static final TimestamptzWriter INSTANCE = new TimestamptzWriter(); private TimestamptzWriter() { } @Override public void write(OffsetDateTime timestamptz, Encoder encoder) throws IOException { encoder.writeLong(ChronoUnit.MICROS.between(EPOCH, timestamptz)); } } private static class GenericRecordWriter extends ValueWriters.StructWriter<Record> { private GenericRecordWriter(List<ValueWriter<?>> writers) { super(writers); } @Override protected Object get(Record struct, int pos) { return struct.get(pos); } } }
6,571
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/avro/DataWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.base.Preconditions; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.avro.AvroSchemaVisitor; import com.netflix.iceberg.avro.LogicalMap; import com.netflix.iceberg.avro.ValueWriter; import com.netflix.iceberg.avro.ValueWriters; import org.apache.avro.LogicalType; import org.apache.avro.LogicalTypes; import org.apache.avro.Schema; import org.apache.avro.io.DatumWriter; import org.apache.avro.io.Encoder; import java.io.IOException; import java.util.List; import static com.netflix.iceberg.avro.AvroSchemaVisitor.visit; public class DataWriter<T> implements DatumWriter<T> { private ValueWriter<T> writer = null; public static <D> DataWriter<D> create(Schema schema) { return new DataWriter<>(schema); } private DataWriter(Schema schema) { setSchema(schema); } @Override @SuppressWarnings("unchecked") public void setSchema(Schema schema) { this.writer = (ValueWriter<T>) visit(schema, new WriteBuilder()); } @Override public void write(T datum, Encoder out) throws IOException { writer.write(datum, out); } private static class WriteBuilder extends AvroSchemaVisitor<ValueWriter<?>> { private WriteBuilder() { } @Override public ValueWriter<?> record(Schema record, List<String> names, List<ValueWriter<?>> fields) { return GenericWriters.struct(fields); } @Override public ValueWriter<?> union(Schema union, List<ValueWriter<?>> options) { Preconditions.checkArgument(options.contains(ValueWriters.nulls()), "Cannot create writer for non-option union: " + union); Preconditions.checkArgument(options.size() == 2, "Cannot create writer for non-option union: " + union); if (union.getTypes().get(0).getType() == Schema.Type.NULL) { return ValueWriters.option(0, options.get(1)); } else { return ValueWriters.option(1, options.get(0)); } } @Override public ValueWriter<?> array(Schema array, ValueWriter<?> elementWriter) { if (array.getLogicalType() instanceof LogicalMap) { ValueWriters.StructWriter<?> keyValueWriter = (ValueWriters.StructWriter<?>) elementWriter; return ValueWriters.arrayMap(keyValueWriter.writer(0), keyValueWriter.writer(1)); } return ValueWriters.array(elementWriter); } @Override public ValueWriter<?> map(Schema map, ValueWriter<?> valueWriter) { return ValueWriters.map(ValueWriters.strings(), valueWriter); } @Override public ValueWriter<?> primitive(Schema primitive) { LogicalType logicalType = primitive.getLogicalType(); if (logicalType != null) { switch (logicalType.getName()) { case "date": return GenericWriters.dates(); case "time-micros": return GenericWriters.times(); case "timestamp-micros": if (AvroSchemaUtil.isTimestamptz(primitive)) { return GenericWriters.timestamptz(); } return GenericWriters.timestamps(); case "decimal": LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; return ValueWriters.decimal(decimal.getPrecision(), decimal.getScale()); case "uuid": return ValueWriters.uuids(); default: throw new IllegalArgumentException("Unsupported logical type: " + logicalType); } } switch (primitive.getType()) { case NULL: return ValueWriters.nulls(); case BOOLEAN: return ValueWriters.booleans(); case INT: return ValueWriters.ints(); case LONG: return ValueWriters.longs(); case FLOAT: return ValueWriters.floats(); case DOUBLE: return ValueWriters.doubles(); case STRING: return ValueWriters.strings(); case FIXED: return ValueWriters.fixed(primitive.getFixedSize()); case BYTES: return ValueWriters.byteBuffers(); default: throw new IllegalArgumentException("Unsupported type: " + primitive); } } } }
6,572
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/avro/GenericReaders.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.netflix.iceberg.avro.ValueReader; import com.netflix.iceberg.avro.ValueReaders; import com.netflix.iceberg.data.GenericRecord; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.types.Types.StructType; import org.apache.avro.io.Decoder; import java.io.IOException; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.List; class GenericReaders { private GenericReaders() { } static ValueReader<LocalDate> dates() { return DateReader.INSTANCE; } static ValueReader<LocalTime> times() { return TimeReader.INSTANCE; } static ValueReader<LocalDateTime> timestamps() { return TimestampReader.INSTANCE; } static ValueReader<OffsetDateTime> timestamptz() { return TimestamptzReader.INSTANCE; } static ValueReader<Record> struct(StructType struct, List<ValueReader<?>> readers) { return new GenericRecordReader(readers, struct); } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); private static class DateReader implements ValueReader<LocalDate> { private static final DateReader INSTANCE = new DateReader(); private DateReader() { } @Override public LocalDate read(Decoder decoder, Object reuse) throws IOException { return EPOCH_DAY.plusDays(decoder.readInt()); } } private static class TimeReader implements ValueReader<LocalTime> { private static final TimeReader INSTANCE = new TimeReader(); private TimeReader() { } @Override public LocalTime read(Decoder decoder, Object reuse) throws IOException { return LocalTime.ofNanoOfDay(decoder.readLong() * 1000); } } private static class TimestampReader implements ValueReader<LocalDateTime> { private static final TimestampReader INSTANCE = new TimestampReader(); private TimestampReader() { } @Override public LocalDateTime read(Decoder decoder, Object reuse) throws IOException { return EPOCH.plus(decoder.readLong(), ChronoUnit.MICROS).toLocalDateTime(); } } private static class TimestamptzReader implements ValueReader<OffsetDateTime> { private static final TimestamptzReader INSTANCE = new TimestamptzReader(); private TimestamptzReader() { } @Override public OffsetDateTime read(Decoder decoder, Object reuse) throws IOException { return EPOCH.plus(decoder.readLong(), ChronoUnit.MICROS); } } private static class GenericRecordReader extends ValueReaders.StructReader<Record> { private final StructType struct; private GenericRecordReader(List<ValueReader<?>> readers, StructType struct) { super(readers); this.struct = struct; } @Override protected Record reuseOrCreate(Object reuse) { if (reuse instanceof Record) { return (Record) reuse; } else { return GenericRecord.create(struct); } } @Override protected Object get(Record struct, int pos) { return struct.get(pos); } @Override protected void set(Record struct, int pos, Object value) { struct.set(pos, value); } } }
6,573
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/avro/IcebergEncoder.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.primitives.Bytes; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.AvroSchemaUtil; import org.apache.avro.AvroRuntimeException; import org.apache.avro.SchemaNormalization; import org.apache.avro.io.BinaryEncoder; import org.apache.avro.io.DatumWriter; import org.apache.avro.io.EncoderFactory; import org.apache.avro.message.MessageEncoder; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; import java.nio.ByteBuffer; import java.security.NoSuchAlgorithmException; public class IcebergEncoder<D> implements MessageEncoder<D> { static final byte[] V1_HEADER = new byte[] {(byte) 0xC3, (byte) 0x01}; private static final ThreadLocal<BufferOutputStream> TEMP = ThreadLocal.withInitial(BufferOutputStream::new); private static final ThreadLocal<BinaryEncoder> ENCODER = new ThreadLocal<>(); private final byte[] headerBytes; private final boolean copyOutputBytes; private final DatumWriter<D> writer; /** * Creates a new {@link MessageEncoder} that will deconstruct datum instances described by the * {@link Schema schema}. * <p> * Buffers returned by {@code encode} are copied and will not be modified by future calls to * {@code encode}. * * @param schema the {@link Schema} for datum instances */ public IcebergEncoder(Schema schema) { this(schema, true); } /** * Creates a new {@link MessageEncoder} that will deconstruct datum instances described by the * {@link Schema schema}. * <p> * If {@code shouldCopy} is true, then buffers returned by {@code encode} are copied and will * not be modified by future calls to {@code encode}. * <p> * If {@code shouldCopy} is false, then buffers returned by {@code encode} wrap a thread-local * buffer that can be reused by future calls to {@code encode}, but may not be. Callers should * only set {@code shouldCopy} to false if the buffer will be copied before the current thread's * next call to {@code encode}. * * @param schema the {@link Schema} for datum instances * @param shouldCopy whether to copy buffers before returning encoded results */ public IcebergEncoder(Schema schema, boolean shouldCopy) { this.copyOutputBytes = shouldCopy; org.apache.avro.Schema avroSchema = AvroSchemaUtil.convert(schema, "table"); this.writer = DataWriter.create(avroSchema); this.headerBytes = getWriteHeader(avroSchema); } @Override public ByteBuffer encode(D datum) throws IOException { BufferOutputStream temp = TEMP.get(); temp.reset(); temp.write(headerBytes); encode(datum, temp); if (copyOutputBytes) { return temp.toBufferWithCopy(); } else { return temp.toBufferWithoutCopy(); } } @Override public void encode(D datum, OutputStream stream) throws IOException { BinaryEncoder encoder = EncoderFactory.get().directBinaryEncoder(stream, ENCODER.get()); ENCODER.set(encoder); writer.write(datum, encoder); encoder.flush(); } private static class BufferOutputStream extends ByteArrayOutputStream { BufferOutputStream() { } ByteBuffer toBufferWithoutCopy() { return ByteBuffer.wrap(buf, 0, count); } ByteBuffer toBufferWithCopy() { return ByteBuffer.wrap(toByteArray()); } } private static byte[] getWriteHeader(org.apache.avro.Schema schema) { try { byte[] fp = SchemaNormalization.parsingFingerprint("CRC-64-AVRO", schema); return Bytes.concat(V1_HEADER, fp); } catch (NoSuchAlgorithmException e) { throw new AvroRuntimeException(e); } } }
6,574
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/parquet/GenericParquetWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.parquet; import com.google.common.collect.Lists; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.parquet.ParquetTypeVisitor; import com.netflix.iceberg.parquet.ParquetValueWriter; import com.netflix.iceberg.parquet.ParquetValueWriters.PrimitiveWriter; import com.netflix.iceberg.parquet.ParquetValueWriters.StructWriter; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.DecimalMetadata; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.Iterator; import java.util.List; import static com.netflix.iceberg.parquet.ParquetValueWriters.byteBuffers; import static com.netflix.iceberg.parquet.ParquetValueWriters.collections; import static com.netflix.iceberg.parquet.ParquetValueWriters.decimalAsFixed; import static com.netflix.iceberg.parquet.ParquetValueWriters.decimalAsInteger; import static com.netflix.iceberg.parquet.ParquetValueWriters.decimalAsLong; import static com.netflix.iceberg.parquet.ParquetValueWriters.maps; import static com.netflix.iceberg.parquet.ParquetValueWriters.option; import static com.netflix.iceberg.parquet.ParquetValueWriters.strings; import static com.netflix.iceberg.parquet.ParquetValueWriters.unboxed; public class GenericParquetWriter { private GenericParquetWriter() { } @SuppressWarnings("unchecked") public static <T> ParquetValueWriter<T> buildWriter(MessageType type) { return (ParquetValueWriter<T>) ParquetTypeVisitor.visit(type, new WriteBuilder(type)); } private static class WriteBuilder extends ParquetTypeVisitor<ParquetValueWriter<?>> { private final MessageType type; WriteBuilder(MessageType type) { this.type = type; } @Override public ParquetValueWriter<?> message(MessageType message, List<ParquetValueWriter<?>> fieldWriters) { return struct(message.asGroupType(), fieldWriters); } @Override public ParquetValueWriter<?> struct(GroupType struct, List<ParquetValueWriter<?>> fieldWriters) { List<Type> fields = struct.getFields(); List<ParquetValueWriter<?>> writers = Lists.newArrayListWithExpectedSize(fieldWriters.size()); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = struct.getType(i); int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName())); writers.add(option(fieldType, fieldD, fieldWriters.get(i))); } return new RecordWriter(writers); } @Override public ParquetValueWriter<?> list(GroupType array, ParquetValueWriter<?> elementWriter) { GroupType repeated = array.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath); int repeatedR = type.getMaxRepetitionLevel(repeatedPath); org.apache.parquet.schema.Type elementType = repeated.getType(0); int elementD = type.getMaxDefinitionLevel(path(elementType.getName())); return collections(repeatedD, repeatedR, option(elementType, elementD, elementWriter)); } @Override public ParquetValueWriter<?> map(GroupType map, ParquetValueWriter<?> keyWriter, ParquetValueWriter<?> valueWriter) { GroupType repeatedKeyValue = map.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath); int repeatedR = type.getMaxRepetitionLevel(repeatedPath); org.apache.parquet.schema.Type keyType = repeatedKeyValue.getType(0); int keyD = type.getMaxDefinitionLevel(path(keyType.getName())); org.apache.parquet.schema.Type valueType = repeatedKeyValue.getType(1); int valueD = type.getMaxDefinitionLevel(path(valueType.getName())); return maps(repeatedD, repeatedR, option(keyType, keyD, keyWriter), option(valueType, valueD, valueWriter)); } @Override public ParquetValueWriter<?> primitive(PrimitiveType primitive) { ColumnDescriptor desc = type.getColumnDescription(currentPath()); if (primitive.getOriginalType() != null) { switch (primitive.getOriginalType()) { case ENUM: case JSON: case UTF8: return strings(desc); case INT_8: case INT_16: case INT_32: case INT_64: return unboxed(desc); case DATE: return new DateWriter(desc); case TIME_MICROS: return new TimeWriter(desc); case TIMESTAMP_MICROS: return new TimestamptzWriter(desc); case DECIMAL: DecimalMetadata decimal = primitive.getDecimalMetadata(); switch (primitive.getPrimitiveTypeName()) { case INT32: return decimalAsInteger(desc, decimal.getPrecision(), decimal.getScale()); case INT64: return decimalAsLong(desc, decimal.getPrecision(), decimal.getScale()); case BINARY: case FIXED_LEN_BYTE_ARRAY: return decimalAsFixed(desc, decimal.getPrecision(), decimal.getScale()); default: throw new UnsupportedOperationException( "Unsupported base type for decimal: " + primitive.getPrimitiveTypeName()); } case BSON: return byteBuffers(desc); default: throw new UnsupportedOperationException( "Unsupported logical type: " + primitive.getOriginalType()); } } switch (primitive.getPrimitiveTypeName()) { case FIXED_LEN_BYTE_ARRAY: return new FixedWriter(desc); case BINARY: return byteBuffers(desc); case BOOLEAN: case INT32: case INT64: case FLOAT: case DOUBLE: return unboxed(desc); default: throw new UnsupportedOperationException("Unsupported type: " + primitive); } } private String[] currentPath() { String[] path = new String[fieldNames.size()]; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } private String[] path(String name) { String[] path = new String[fieldNames.size() + 1]; path[fieldNames.size()] = name; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); private static class DateWriter extends PrimitiveWriter<LocalDate> { private DateWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, LocalDate value) { column.writeInteger(repetitionLevel, (int) ChronoUnit.DAYS.between(EPOCH_DAY, value)); } } private static class TimeWriter extends PrimitiveWriter<LocalTime> { private TimeWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, LocalTime value) { column.writeLong(repetitionLevel, value.toNanoOfDay() / 1000); } } private static class TimestampWriter extends PrimitiveWriter<LocalDateTime> { private TimestampWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, LocalDateTime value) { column.writeLong(repetitionLevel, ChronoUnit.MICROS.between(EPOCH, value.atOffset(ZoneOffset.UTC))); } } private static class TimestamptzWriter extends PrimitiveWriter<OffsetDateTime> { private TimestamptzWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, OffsetDateTime value) { column.writeLong(repetitionLevel, ChronoUnit.MICROS.between(EPOCH, value)); } } private static class FixedWriter extends PrimitiveWriter<byte[]> { private FixedWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, byte[] value) { column.writeBinary(repetitionLevel, Binary.fromReusedByteArray(value)); } } private static class RecordWriter extends StructWriter<Record> { private RecordWriter(List<ParquetValueWriter<?>> writers) { super(writers); } @Override protected Object get(Record struct, int index) { return struct.get(index); } } }
6,575
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/parquet/GenericParquetReaders.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.parquet; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import com.netflix.iceberg.data.GenericRecord; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.parquet.ParquetValueReader; import com.netflix.iceberg.parquet.ParquetValueReaders; import com.netflix.iceberg.parquet.ParquetValueReaders.BinaryAsDecimalReader; import com.netflix.iceberg.parquet.ParquetValueReaders.BytesReader; import com.netflix.iceberg.parquet.ParquetValueReaders.IntAsLongReader; import com.netflix.iceberg.parquet.ParquetValueReaders.IntegerAsDecimalReader; import com.netflix.iceberg.parquet.ParquetValueReaders.ListReader; import com.netflix.iceberg.parquet.ParquetValueReaders.LongAsDecimalReader; import com.netflix.iceberg.parquet.ParquetValueReaders.MapReader; import com.netflix.iceberg.parquet.ParquetValueReaders.PrimitiveReader; import com.netflix.iceberg.parquet.ParquetValueReaders.StringReader; import com.netflix.iceberg.parquet.ParquetValueReaders.StructReader; import com.netflix.iceberg.parquet.ParquetValueReaders.UnboxedReader; import com.netflix.iceberg.parquet.TypeWithSchemaVisitor; import com.netflix.iceberg.types.Type.TypeID; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import com.netflix.iceberg.types.Types.TimestampType; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.schema.DecimalMetadata; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.Iterator; import java.util.List; import java.util.Map; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.hasIds; import static com.netflix.iceberg.parquet.ParquetValueReaders.option; public class GenericParquetReaders { private GenericParquetReaders() { } @SuppressWarnings("unchecked") public static ParquetValueReader<GenericRecord> buildReader(Schema expectedSchema, MessageType fileSchema) { if (hasIds(fileSchema)) { return (ParquetValueReader<GenericRecord>) TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema, new ReadBuilder(fileSchema)); } else { return (ParquetValueReader<GenericRecord>) TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema, new FallbackReadBuilder(fileSchema)); } } private static class FallbackReadBuilder extends ReadBuilder { FallbackReadBuilder(MessageType type) { super(type); } @Override public ParquetValueReader<?> message(StructType expected, MessageType message, List<ParquetValueReader<?>> fieldReaders) { // the top level matches by ID, but the remaining IDs are missing return super.struct(expected, message, fieldReaders); } @Override public ParquetValueReader<?> struct(StructType expected, GroupType struct, List<ParquetValueReader<?>> fieldReaders) { // the expected struct is ignored because nested fields are never found when the List<ParquetValueReader<?>> newFields = Lists.newArrayListWithExpectedSize( fieldReaders.size()); List<Type> types = Lists.newArrayListWithExpectedSize(fieldReaders.size()); List<Type> fields = struct.getFields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i); int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName()))-1; newFields.add(option(fieldType, fieldD, fieldReaders.get(i))); types.add(fieldType); } return new RecordReader(types, newFields, expected); } } private static class ReadBuilder extends TypeWithSchemaVisitor<ParquetValueReader<?>> { final MessageType type; ReadBuilder(MessageType type) { this.type = type; } @Override public ParquetValueReader<?> message(StructType expected, MessageType message, List<ParquetValueReader<?>> fieldReaders) { return struct(expected, message.asGroupType(), fieldReaders); } @Override public ParquetValueReader<?> struct(StructType expected, GroupType struct, List<ParquetValueReader<?>> fieldReaders) { // match the expected struct's order Map<Integer, ParquetValueReader<?>> readersById = Maps.newHashMap(); Map<Integer, Type> typesById = Maps.newHashMap(); List<Type> fields = struct.getFields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i); int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName()))-1; int id = fieldType.getId().intValue(); readersById.put(id, option(fieldType, fieldD, fieldReaders.get(i))); typesById.put(id, fieldType); } List<Types.NestedField> expectedFields = expected != null ? expected.fields() : ImmutableList.of(); List<ParquetValueReader<?>> reorderedFields = Lists.newArrayListWithExpectedSize( expectedFields.size()); List<Type> types = Lists.newArrayListWithExpectedSize(expectedFields.size()); for (Types.NestedField field : expectedFields) { int id = field.fieldId(); ParquetValueReader<?> reader = readersById.get(id); if (reader != null) { reorderedFields.add(reader); types.add(typesById.get(id)); } else { reorderedFields.add(ParquetValueReaders.nulls()); types.add(null); } } return new RecordReader(types, reorderedFields, expected); } @Override public ParquetValueReader<?> list(Types.ListType expectedList, GroupType array, ParquetValueReader<?> elementReader) { GroupType repeated = array.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath)-1; int repeatedR = type.getMaxRepetitionLevel(repeatedPath)-1; Type elementType = repeated.getType(0); int elementD = type.getMaxDefinitionLevel(path(elementType.getName()))-1; return new ListReader<>(repeatedD, repeatedR, option(elementType, elementD, elementReader)); } @Override public ParquetValueReader<?> map(Types.MapType expectedMap, GroupType map, ParquetValueReader<?> keyReader, ParquetValueReader<?> valueReader) { GroupType repeatedKeyValue = map.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath)-1; int repeatedR = type.getMaxRepetitionLevel(repeatedPath)-1; Type keyType = repeatedKeyValue.getType(0); int keyD = type.getMaxDefinitionLevel(path(keyType.getName()))-1; Type valueType = repeatedKeyValue.getType(1); int valueD = type.getMaxDefinitionLevel(path(valueType.getName()))-1; return new MapReader<>(repeatedD, repeatedR, option(keyType, keyD, keyReader), option(valueType, valueD, valueReader)); } @Override public ParquetValueReader<?> primitive(com.netflix.iceberg.types.Type.PrimitiveType expected, PrimitiveType primitive) { ColumnDescriptor desc = type.getColumnDescription(currentPath()); if (primitive.getOriginalType() != null) { switch (primitive.getOriginalType()) { case ENUM: case JSON: case UTF8: return new StringReader(desc); case INT_8: case INT_16: case INT_32: if (expected.typeId() == TypeID.LONG) { return new IntAsLongReader(desc); } else { return new UnboxedReader<>(desc); } case INT_64: return new UnboxedReader<>(desc); case DATE: return new DateReader(desc); case TIMESTAMP_MICROS: TimestampType tsMicrosType = (TimestampType) expected; if (tsMicrosType.shouldAdjustToUTC()) { return new TimestamptzReader(desc); } else { return new TimestampReader(desc); } case TIMESTAMP_MILLIS: TimestampType tsMillisType = (TimestampType) expected; if (tsMillisType.shouldAdjustToUTC()) { return new TimestamptzMillisReader(desc); } else { return new TimestampMillisReader(desc); } case DECIMAL: DecimalMetadata decimal = primitive.getDecimalMetadata(); switch (primitive.getPrimitiveTypeName()) { case BINARY: case FIXED_LEN_BYTE_ARRAY: return new BinaryAsDecimalReader(desc, decimal.getScale()); case INT64: return new LongAsDecimalReader(desc, decimal.getScale()); case INT32: return new IntegerAsDecimalReader(desc, decimal.getScale()); default: throw new UnsupportedOperationException( "Unsupported base type for decimal: " + primitive.getPrimitiveTypeName()); } case BSON: return new BytesReader(desc); default: throw new UnsupportedOperationException( "Unsupported logical type: " + primitive.getOriginalType()); } } switch (primitive.getPrimitiveTypeName()) { case FIXED_LEN_BYTE_ARRAY: return new FixedReader(desc); case BINARY: return new BytesReader(desc); case INT32: if (expected != null && expected.typeId() == TypeID.LONG) { return new IntAsLongReader(desc); } else { return new UnboxedReader<>(desc); } case FLOAT: if (expected != null && expected.typeId() == TypeID.DOUBLE) { return new ParquetValueReaders.FloatAsDoubleReader(desc); } else { return new UnboxedReader<>(desc); } case BOOLEAN: case INT64: case DOUBLE: return new UnboxedReader<>(desc); default: throw new UnsupportedOperationException("Unsupported type: " + primitive); } } private String[] currentPath() { String[] path = new String[fieldNames.size()]; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } protected String[] path(String name) { String[] path = new String[fieldNames.size() + 1]; path[fieldNames.size()] = name; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); private static class DateReader extends PrimitiveReader<LocalDate> { private DateReader(ColumnDescriptor desc) { super(desc); } @Override public LocalDate read(LocalDate reuse) { return EPOCH_DAY.plusDays(column.nextInteger()); } } private static class TimestampReader extends PrimitiveReader<LocalDateTime> { private TimestampReader(ColumnDescriptor desc) { super(desc); } @Override public LocalDateTime read(LocalDateTime reuse) { return EPOCH.plus(column.nextLong(), ChronoUnit.MICROS).toLocalDateTime(); } } private static class TimestampMillisReader extends PrimitiveReader<LocalDateTime> { private TimestampMillisReader(ColumnDescriptor desc) { super(desc); } @Override public LocalDateTime read(LocalDateTime reuse) { return EPOCH.plus(column.nextLong() * 1000, ChronoUnit.MICROS).toLocalDateTime(); } } private static class TimestamptzReader extends PrimitiveReader<OffsetDateTime> { private TimestamptzReader(ColumnDescriptor desc) { super(desc); } @Override public OffsetDateTime read(OffsetDateTime reuse) { return EPOCH.plus(column.nextLong(), ChronoUnit.MICROS); } } private static class TimestamptzMillisReader extends PrimitiveReader<OffsetDateTime> { private TimestamptzMillisReader(ColumnDescriptor desc) { super(desc); } @Override public OffsetDateTime read(OffsetDateTime reuse) { return EPOCH.plus(column.nextLong() * 1000, ChronoUnit.MICROS); } } private static class FixedReader extends PrimitiveReader<byte[]> { private FixedReader(ColumnDescriptor desc) { super(desc); } @Override public byte[] read(byte[] reuse) { if (reuse != null) { column.nextBinary().toByteBuffer().duplicate().get(reuse); return reuse; } else { return column.nextBinary().getBytes(); } } } static class RecordReader extends StructReader<Record, Record> { private final StructType struct; RecordReader(List<Type> types, List<ParquetValueReader<?>> readers, StructType struct) { super(types, readers); this.struct = struct; } @Override protected Record newStructData(Record reuse) { if (reuse != null) { return reuse; } else { return GenericRecord.create(struct); } } @Override @SuppressWarnings("unchecked") protected Object getField(Record intermediate, int pos) { return intermediate.get(pos); } @Override protected Record buildStruct(Record struct) { return struct; } @Override protected void set(Record struct, int pos, Object value) { struct.set(pos, value); } } }
6,576
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestIcebergSource.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.netflix.iceberg.Table; import org.apache.spark.sql.sources.v2.DataSourceOptions; public class TestIcebergSource extends IcebergSource { @Override public String shortName() { return "iceberg-test"; } @Override protected Table findTable(DataSourceOptions options) { return TestTables.load(options.get("iceberg.table.name").get()); } }
6,577
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestSparkReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Fixed; import org.apache.avro.generic.GenericData.Record; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.UUID; import static com.netflix.iceberg.Files.localOutput; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; import static org.apache.avro.Schema.Type.NULL; import static org.apache.avro.Schema.Type.UNION; @RunWith(Parameterized.class) public class TestSparkReadProjection extends TestReadProjection { private static SparkSession spark = null; @Parameterized.Parameters public static Object[][] parameters() { return new Object[][] { new Object[] { "parquet" }, new Object[] { "avro" } }; } public TestSparkReadProjection(String format) { super(format); } @BeforeClass public static void startSpark() { TestSparkReadProjection.spark = SparkSession.builder().master("local[2]").getOrCreate(); } @AfterClass public static void stopSpark() { SparkSession spark = TestSparkReadProjection.spark; TestSparkReadProjection.spark = null; spark.stop(); } @Override protected Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException { File parent = temp.newFolder(desc); File location = new File(parent, "test"); File dataFolder = new File(location, "data"); Assert.assertTrue("mkdirs should succeed", dataFolder.mkdirs()); FileFormat fileFormat = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); File testFile = new File(dataFolder, fileFormat.addExtension(UUID.randomUUID().toString())); Table table = TestTables.create(location, desc, writeSchema, PartitionSpec.unpartitioned()); try { // Important: use the table's schema for the rest of the test // When tables are created, the column ids are reassigned. Schema tableSchema = table.schema(); switch (fileFormat) { case AVRO: try (FileAppender<Record> writer = Avro.write(localOutput(testFile)) .schema(tableSchema) .build()) { writer.add(record); } break; case PARQUET: try (FileAppender<Record> writer = Parquet.write(localOutput(testFile)) .schema(tableSchema) .build()) { writer.add(record); } break; } DataFile file = DataFiles.builder(PartitionSpec.unpartitioned()) .withRecordCount(100) .withFileSizeInBytes(testFile.length()) .withPath(testFile.toString()) .build(); table.newAppend().appendFile(file).commit(); // rewrite the read schema for the table's reassigned ids Map<Integer, Integer> idMapping = Maps.newHashMap(); for (int id : allIds(writeSchema)) { // translate each id to the original schema's column name, then to the new schema's id String originalName = writeSchema.findColumnName(id); idMapping.put(id, tableSchema.findField(originalName).fieldId()); } Schema expectedSchema = reassignIds(readSchema, idMapping); // Set the schema to the expected schema directly to simulate the table schema evolving TestTables.replaceMetadata(desc, TestTables.readMetadata(desc).updateSchema(expectedSchema, 100)); Dataset<Row> df = spark.read() .format("com.netflix.iceberg.spark.source.TestIcebergSource") .option("iceberg.table.name", desc) .load(); // convert to Avro using the read schema so that the record schemas match return convert(AvroSchemaUtil.convert(readSchema, "table"), df.collectAsList().get(0)); } finally { TestTables.clearTables(); } } @SuppressWarnings("unchecked") private Object convert(org.apache.avro.Schema schema, Object object) { switch (schema.getType()) { case RECORD: return convert(schema, (Row) object); case ARRAY: List<Object> convertedList = Lists.newArrayList(); List<?> list = (List<?>) object; for (Object element : list) { convertedList.add(convert(schema.getElementType(), element)); } return convertedList; case MAP: Map<String, Object> convertedMap = Maps.newLinkedHashMap(); Map<String, ?> map = (Map<String, ?>) object; for (Map.Entry<String, ?> entry : map.entrySet()) { convertedMap.put(entry.getKey(), convert(schema.getValueType(), entry.getValue())); } return convertedMap; case UNION: if (object == null) { return null; } List<org.apache.avro.Schema> types = schema.getTypes(); if (types.get(0).getType() != NULL) { return convert(types.get(0), object); } else { return convert(types.get(1), object); } case FIXED: Fixed convertedFixed = new Fixed(schema); convertedFixed.bytes((byte[]) object); return convertedFixed; case BYTES: return ByteBuffer.wrap((byte[]) object); case BOOLEAN: case INT: case LONG: case FLOAT: case DOUBLE: case STRING: return object; case NULL: return null; default: throw new UnsupportedOperationException("Not a supported type: " + schema); } } private Record convert(org.apache.avro.Schema schema, Row row) { if (schema.getType() == UNION) { if (schema.getTypes().get(0).getType() != NULL) { schema = schema.getTypes().get(0); } else { schema = schema.getTypes().get(1); } } Record record = new Record(schema); List<org.apache.avro.Schema.Field> fields = schema.getFields(); for (int i = 0; i < fields.size(); i += 1) { org.apache.avro.Schema.Field field = fields.get(i); org.apache.avro.Schema fieldSchema = field.schema(); if (fieldSchema.getType() == UNION) { if (fieldSchema.getTypes().get(0).getType() != NULL) { fieldSchema = fieldSchema.getTypes().get(0); } else { fieldSchema = fieldSchema.getTypes().get(1); } } switch (fieldSchema.getType()) { case RECORD: record.put(i, convert(field.schema(), row.getStruct(i))); break; case ARRAY: record.put(i, convert(field.schema(), row.getList(i))); break; case MAP: record.put(i, convert(field.schema(), row.getJavaMap(i))); break; default: record.put(i, convert(field.schema(), row.get(i))); } } return record; } private List<Integer> allIds(Schema schema) { List<Integer> ids = Lists.newArrayList(); TypeUtil.visit(schema, new TypeUtil.SchemaVisitor<Void>() { @Override public Void field(Types.NestedField field, Void fieldResult) { ids.add(field.fieldId()); return null; } @Override public Void list(Types.ListType list, Void elementResult) { ids.add(list.elementId()); return null; } @Override public Void map(Types.MapType map, Void keyResult, Void valueResult) { ids.add(map.keyId()); ids.add(map.valueId()); return null; } }); return ids; } private Schema reassignIds(Schema schema, Map<Integer, Integer> idMapping) { return new Schema(TypeUtil.visit(schema, new TypeUtil.SchemaVisitor<Type>() { private int map(int id) { if (idMapping.containsKey(id)) { return idMapping.get(id); } return 1000 + id; // make sure the new IDs don't conflict with reassignment } @Override public Type schema(Schema schema, Type structResult) { return structResult; } @Override public Type struct(Types.StructType struct, List<Type> fieldResults) { List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(fieldResults.size()); List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { Types.NestedField field = fields.get(i); if (field.isOptional()) { newFields.add(optional(map(field.fieldId()), field.name(), fieldResults.get(i))); } else { newFields.add(required(map(field.fieldId()), field.name(), fieldResults.get(i))); } } return Types.StructType.of(newFields); } @Override public Type field(Types.NestedField field, Type fieldResult) { return fieldResult; } @Override public Type list(Types.ListType list, Type elementResult) { if (list.isElementOptional()) { return Types.ListType.ofOptional(map(list.elementId()), elementResult); } else { return Types.ListType.ofRequired(map(list.elementId()), elementResult); } } @Override public Type map(Types.MapType map, Type keyResult, Type valueResult) { if (map.isValueOptional()) { return Types.MapType.ofOptional( map(map.keyId()), map(map.valueId()), keyResult, valueResult); } else { return Types.MapType.ofRequired( map(map.keyId()), map(map.valueId()), keyResult, valueResult); } } @Override public Type primitive(Type.PrimitiveType primitive) { return primitive; } }).asNestedType().asStructType().fields()); } }
6,578
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestOrcScan.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.orc.ORC; import com.netflix.iceberg.orc.OrcFileAppender; import com.netflix.iceberg.spark.data.AvroDataTest; import com.netflix.iceberg.spark.data.RandomData; import com.netflix.iceberg.spark.data.SparkOrcWriter; import com.netflix.iceberg.spark.data.TestHelpers; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.serde2.io.TimestampWritable; import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch; import org.apache.orc.storage.serde2.io.DateWritable; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.SpecializedGetters; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.DateTimeUtils; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.sql.Date; import java.sql.Timestamp; import java.util.Iterator; import java.util.List; import java.util.UUID; import static com.netflix.iceberg.Files.localOutput; public class TestOrcScan extends AvroDataTest { private static final Configuration CONF = new Configuration(); @Rule public TemporaryFolder temp = new TemporaryFolder(); private static SparkSession spark = null; @BeforeClass public static void startSpark() { TestOrcScan.spark = SparkSession.builder().master("local[2]").getOrCreate(); } @AfterClass public static void stopSpark() { SparkSession spark = TestOrcScan.spark; TestOrcScan.spark = null; spark.stop(); } @Override protected void writeAndValidate(Schema schema) throws IOException { System.out.println("Starting ORC test with " + schema); final int ROW_COUNT = 100; final long SEED = 1; File parent = temp.newFolder("orc"); File location = new File(parent, "test"); File dataFolder = new File(location, "data"); dataFolder.mkdirs(); File orcFile = new File(dataFolder, FileFormat.ORC.addExtension(UUID.randomUUID().toString())); HadoopTables tables = new HadoopTables(CONF); Table table = tables.create(schema, PartitionSpec.unpartitioned(), location.toString()); // Important: use the table's schema for the rest of the test // When tables are created, the column ids are reassigned. Schema tableSchema = table.schema(); Metrics metrics; SparkOrcWriter writer = new SparkOrcWriter(ORC.write(localOutput(orcFile)) .schema(tableSchema) .build()); try { writer.addAll(RandomData.generateSpark(tableSchema, ROW_COUNT, SEED)); } finally { writer.close(); // close writes the last batch, so metrics are not correct until after close is called metrics = writer.metrics(); } DataFile file = DataFiles.builder(PartitionSpec.unpartitioned()) .withFileSizeInBytes(orcFile.length()) .withPath(orcFile.toString()) .withMetrics(metrics) .build(); table.newAppend().appendFile(file).commit(); Dataset<Row> df = spark.read() .format("iceberg") .load(location.toString()); List<Row> rows = df.collectAsList(); Assert.assertEquals("Wrong number of rows", ROW_COUNT, rows.size()); Iterator<InternalRow> expected = RandomData.generateSpark(tableSchema, ROW_COUNT, SEED); for(int i=0; i < ROW_COUNT; ++i) { TestHelpers.assertEquals("row " + i, schema.asStruct(), expected.next(), rows.get(i)); } } }
6,579
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/SimpleRecord.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.base.Objects; public class SimpleRecord { private Integer id; private String data; public SimpleRecord() { } SimpleRecord(Integer id, String data) { this.id = id; this.data = data; } public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public String getData() { return data; } public void setData(String data) { this.data = data; } @Override public boolean equals(Object o) { if (this == o){ return true; } if (o == null || getClass() != o.getClass()){ return false; } SimpleRecord record = (SimpleRecord) o; return Objects.equal(id, record.id) && Objects.equal(data, record.data); } @Override public int hashCode() { return Objects.hashCode(id, data); } @Override public String toString() { StringBuilder buffer = new StringBuilder(); buffer.append("{\"id\"="); buffer.append(id); buffer.append(",\"data\"=\""); buffer.append(data); buffer.append("\"}"); return buffer.toString(); } }
6,580
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestDataFrameWrites.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.Lists; import com.netflix.iceberg.Files; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.TableProperties; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.avro.AvroIterable; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.spark.data.AvroDataTest; import com.netflix.iceberg.spark.data.RandomData; import com.netflix.iceberg.spark.data.SparkAvroReader; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Record; import org.apache.hadoop.conf.Configuration; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.sql.DataFrameWriter; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.catalyst.InternalRow; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; import java.net.URI; import java.util.List; import static com.netflix.iceberg.spark.SparkSchemaUtil.convert; import static com.netflix.iceberg.spark.data.TestHelpers.assertEqualsSafe; import static com.netflix.iceberg.spark.data.TestHelpers.assertEqualsUnsafe; @RunWith(Parameterized.class) public class TestDataFrameWrites extends AvroDataTest { private static final Configuration CONF = new Configuration(); private final String format; @Parameterized.Parameters public static Object[][] parameters() { return new Object[][] { new Object[] { "parquet" }, new Object[] { "orc" }, new Object[] { "avro" } }; } public TestDataFrameWrites(String format) { this.format = format; } private static SparkSession spark = null; private static JavaSparkContext sc = null; @BeforeClass public static void startSpark() { TestDataFrameWrites.spark = SparkSession.builder().master("local[2]").getOrCreate(); TestDataFrameWrites.sc = new JavaSparkContext(spark.sparkContext()); } @AfterClass public static void stopSpark() { SparkSession spark = TestDataFrameWrites.spark; TestDataFrameWrites.spark = null; TestDataFrameWrites.sc = null; spark.stop(); } @Override protected void writeAndValidate(Schema schema) throws IOException { File location = createTableFolder(); Table table = createTable(schema, location); writeAndValidateWithLocations(table, location, new File(location, "data")); } @Test public void testWriteWithCustomDataLocation() throws IOException { File location = createTableFolder(); File tablePropertyDataLocation = temp.newFolder("test-table-property-data-dir"); Table table = createTable(new Schema(SUPPORTED_PRIMITIVES.fields()), location); table.updateProperties().set( TableProperties.WRITE_NEW_DATA_LOCATION, tablePropertyDataLocation.getAbsolutePath()).commit(); writeAndValidateWithLocations(table, location, tablePropertyDataLocation); } private File createTableFolder() throws IOException { File parent = temp.newFolder("parquet"); File location = new File(parent, "test"); Assert.assertTrue("Mkdir should succeed", location.mkdirs()); return location; } private Table createTable(Schema schema, File location) { HadoopTables tables = new HadoopTables(CONF); return tables.create(schema, PartitionSpec.unpartitioned(), location.toString()); } private void writeAndValidateWithLocations(Table table, File location, File expectedDataDir) throws IOException { Schema tableSchema = table.schema(); // use the table schema because ids are reassigned table.updateProperties().set(TableProperties.DEFAULT_FILE_FORMAT, format).commit(); List<Record> expected = RandomData.generateList(tableSchema, 100, 0L); Dataset<Row> df = createDataset(expected, tableSchema); DataFrameWriter<?> writer = df.write().format("iceberg").mode("append"); writer.save(location.toString()); table.refresh(); Dataset<Row> result = spark.read() .format("iceberg") .load(location.toString()); List<Row> actual = result.collectAsList(); Assert.assertEquals("Result size should match expected", expected.size(), actual.size()); for (int i = 0; i < expected.size(); i += 1) { assertEqualsSafe(tableSchema.asStruct(), expected.get(i), actual.get(i)); } table.currentSnapshot().addedFiles().forEach(dataFile -> Assert.assertTrue( String.format( "File should have the parent directory %s, but has: %s.", expectedDataDir.getAbsolutePath(), dataFile.path()), URI.create(dataFile.path().toString()).getPath().startsWith(expectedDataDir.getAbsolutePath()))); } private Dataset<Row> createDataset(List<Record> records, Schema schema) throws IOException { // this uses the SparkAvroReader to create a DataFrame from the list of records // it assumes that SparkAvroReader is correct File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> writer = Avro.write(Files.localOutput(testFile)) .schema(schema) .named("test") .build()) { for (Record rec : records) { writer.add(rec); } } List<InternalRow> rows; try (AvroIterable<InternalRow> reader = Avro.read(Files.localInput(testFile)) .createReaderFunc(SparkAvroReader::new) .project(schema) .build()) { rows = Lists.newArrayList(reader); } // make sure the dataframe matches the records before moving on for (int i = 0; i < records.size(); i += 1) { assertEqualsUnsafe(schema.asStruct(), records.get(i), rows.get(i)); } JavaRDD<InternalRow> rdd = sc.parallelize(rows); return spark.internalCreateDataFrame(JavaRDD.toRDD(rdd), convert(schema), false); } }
6,581
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestFilteredScan.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.spark.SparkExpressions; import com.netflix.iceberg.spark.data.TestHelpers; import com.netflix.iceberg.transforms.Transform; import com.netflix.iceberg.transforms.Transforms; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Record; import org.apache.hadoop.conf.Configuration; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.api.java.UDF1; import org.apache.spark.sql.catalyst.expressions.Expression; import org.apache.spark.sql.catalyst.expressions.UnsafeRow; import org.apache.spark.sql.sources.v2.DataSourceOptions; import org.apache.spark.sql.sources.v2.reader.DataSourceReader; import org.apache.spark.sql.sources.v2.reader.DataReaderFactory; import org.apache.spark.sql.sources.v2.reader.SupportsPushDownCatalystFilters; import org.apache.spark.sql.sources.v2.reader.SupportsScanUnsafeRow; import org.apache.spark.sql.types.DateType$; import org.apache.spark.sql.types.IntegerType$; import org.apache.spark.sql.types.StringType$; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; import java.sql.Timestamp; import java.util.List; import java.util.Locale; import java.util.UUID; import static com.netflix.iceberg.Files.localOutput; import static org.apache.spark.sql.catalyst.util.DateTimeUtils.fromJavaTimestamp; import static org.apache.spark.sql.functions.callUDF; import static org.apache.spark.sql.functions.col; import static org.apache.spark.sql.functions.column; import static org.apache.spark.sql.functions.lit; import static org.apache.spark.sql.functions.to_date; @RunWith(Parameterized.class) public class TestFilteredScan { private static final Configuration CONF = new Configuration(); private static final HadoopTables TABLES = new HadoopTables(CONF); private static final Schema SCHEMA = new Schema( Types.NestedField.required(1, "id", Types.LongType.get()), Types.NestedField.optional(2, "ts", Types.TimestampType.withZone()), Types.NestedField.optional(3, "data", Types.StringType.get()) ); private static final PartitionSpec BUCKET_BY_ID = PartitionSpec.builderFor(SCHEMA) .bucket("id", 4) .build(); private static final PartitionSpec PARTITION_BY_DAY = PartitionSpec.builderFor(SCHEMA) .day("ts") .build(); private static final PartitionSpec PARTITION_BY_HOUR = PartitionSpec.builderFor(SCHEMA) .hour("ts") .build(); private static final PartitionSpec PARTITION_BY_FIRST_LETTER = PartitionSpec.builderFor(SCHEMA) .truncate("data", 1) .build(); private static SparkSession spark = null; @BeforeClass public static void startSpark() { TestFilteredScan.spark = SparkSession.builder().master("local[2]").getOrCreate(); // define UDFs used by partition tests Transform<Long, Integer> bucket4 = Transforms.bucket(Types.LongType.get(), 4); spark.udf().register("bucket4", (UDF1<Long, Integer>) bucket4::apply, IntegerType$.MODULE$); Transform<Long, Integer> day = Transforms.day(Types.TimestampType.withZone()); spark.udf().register("ts_day", (UDF1<Timestamp, Integer>) timestamp -> day.apply(fromJavaTimestamp(timestamp)), IntegerType$.MODULE$); Transform<Long, Integer> hour = Transforms.hour(Types.TimestampType.withZone()); spark.udf().register("ts_hour", (UDF1<Timestamp, Integer>) timestamp -> hour.apply(fromJavaTimestamp(timestamp)), IntegerType$.MODULE$); Transform<CharSequence, CharSequence> trunc1 = Transforms.truncate(Types.StringType.get(), 1); spark.udf().register("trunc1", (UDF1<CharSequence, CharSequence>) str -> trunc1.apply(str.toString()), StringType$.MODULE$); } @AfterClass public static void stopSpark() { SparkSession spark = TestFilteredScan.spark; TestFilteredScan.spark = null; spark.stop(); } @Rule public TemporaryFolder temp = new TemporaryFolder(); private final String format; @Parameterized.Parameters public static Object[][] parameters() { return new Object[][] { new Object[] { "parquet" }, new Object[] { "avro" } }; } public TestFilteredScan(String format) { this.format = format; } private File parent = null; private File unpartitioned = null; private List<Record> records = null; @Before public void writeUnpartitionedTable() throws IOException { this.parent = temp.newFolder("TestFilteredScan"); this.unpartitioned = new File(parent, "unpartitioned"); File dataFolder = new File(unpartitioned, "data"); Assert.assertTrue("Mkdir should succeed", dataFolder.mkdirs()); Table table = TABLES.create(SCHEMA, PartitionSpec.unpartitioned(), unpartitioned.toString()); Schema tableSchema = table.schema(); // use the table schema because ids are reassigned FileFormat fileFormat = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); File testFile = new File(dataFolder, fileFormat.addExtension(UUID.randomUUID().toString())); // create records using the table's schema org.apache.avro.Schema avroSchema = AvroSchemaUtil.convert(tableSchema, "test"); this.records = testRecords(avroSchema); switch (fileFormat) { case AVRO: try (FileAppender<Record> writer = Avro.write(localOutput(testFile)) .schema(tableSchema) .build()) { writer.addAll(records); } break; case PARQUET: try (FileAppender<Record> writer = Parquet.write(localOutput(testFile)) .schema(tableSchema) .build()) { writer.addAll(records); } break; } DataFile file = DataFiles.builder(PartitionSpec.unpartitioned()) .withRecordCount(records.size()) .withFileSizeInBytes(testFile.length()) .withPath(testFile.toString()) .build(); table.newAppend().appendFile(file).commit(); } @Test public void testUnpartitionedIDFilters() { DataSourceOptions options = new DataSourceOptions(ImmutableMap.of( "path", unpartitioned.toString()) ); IcebergSource source = new IcebergSource(); for (int i = 0; i < 10; i += 1) { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.equal("id", i)); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should only create one task for a small file", 1, tasks.size()); // validate row filtering assertEqualsSafe(SCHEMA.asStruct(), expected(i), read(unpartitioned.toString(), "id = " + i)); } } @Test public void testUnpartitionedTimestampFilter() { DataSourceOptions options = new DataSourceOptions(ImmutableMap.of( "path", unpartitioned.toString()) ); IcebergSource source = new IcebergSource(); DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.lessThan("ts", "2017-12-22T00:00:00+00:00")); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should only create one task for a small file", 1, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(5,6,7,8,9), read(unpartitioned.toString(), "ts < cast('2017-12-22 00:00:00+00:00' as timestamp)")); } @Test public void testBucketPartitionedIDFilters() { File location = buildPartitionedTable("bucketed_by_id", BUCKET_BY_ID, "bucket4", "id"); DataSourceOptions options = new DataSourceOptions(ImmutableMap.of( "path", location.toString()) ); IcebergSource source = new IcebergSource(); DataSourceReader unfiltered = source.createReader(options); Assert.assertEquals("Unfiltered table should created 4 read tasks", 4, planTasks(unfiltered).size()); for (int i = 0; i < 10; i += 1) { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.equal("id", i)); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); // validate predicate push-down Assert.assertEquals("Should create one task for a single bucket", 1, tasks.size()); // validate row filtering assertEqualsSafe(SCHEMA.asStruct(), expected(i), read(location.toString(), "id = " + i)); } } @Test public void testDayPartitionedTimestampFilters() { File location = buildPartitionedTable("partitioned_by_day", PARTITION_BY_DAY, "ts_day", "ts"); DataSourceOptions options = new DataSourceOptions(ImmutableMap.of( "path", location.toString()) ); int day = Literal.of("2017-12-21").<Integer>to(Types.DateType.get()).value(); IcebergSource source = new IcebergSource(); DataSourceReader unfiltered = source.createReader(options); Assert.assertEquals("Unfiltered table should created 2 read tasks", 2, planTasks(unfiltered).size()); { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.lessThan("ts", "2017-12-22T00:00:00+00:00")); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create one task for 2017-12-21", 1, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(5, 6, 7, 8, 9), read(location.toString(), "ts < cast('2017-12-22 00:00:00+00:00' as timestamp)")); } { DataSourceReader reader = source.createReader(options); pushFilters(reader, col("ts").cast(DateType$.MODULE$).$eq$eq$eq(lit(day)).expr()); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create one task for 2017-12-21", 1, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(5, 6, 7, 8, 9), read(location.toString(), "cast(ts as date) = date '2017-12-21'")); } { DataSourceReader reader = source.createReader(options); pushFilters(reader, to_date(col("ts")).$eq$eq$eq(lit(day)).expr()); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create one task for 2017-12-21", 1, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(5, 6, 7, 8, 9), read(location.toString(), "to_date(ts) = date '2017-12-21'")); } { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.and( Expressions.greaterThan("ts", "2017-12-22T06:00:00+00:00"), Expressions.lessThan("ts", "2017-12-22T08:00:00+00:00"))); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create one task for 2017-12-22", 1, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(1, 2), read(location.toString(), "ts > cast('2017-12-22 06:00:00+00:00' as timestamp) and " + "ts < cast('2017-12-22 08:00:00+00:00' as timestamp)")); } } @Test public void testHourPartitionedTimestampFilters() { File location = buildPartitionedTable("partitioned_by_hour", PARTITION_BY_HOUR, "ts_hour", "ts"); DataSourceOptions options = new DataSourceOptions(ImmutableMap.of( "path", location.toString()) ); IcebergSource source = new IcebergSource(); DataSourceReader unfiltered = source.createReader(options); Assert.assertEquals("Unfiltered table should created 9 read tasks", 9, planTasks(unfiltered).size()); { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.lessThan("ts", "2017-12-22T00:00:00+00:00")); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create 4 tasks for 2017-12-21: 15, 17, 21, 22", 4, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(8, 9, 7, 6, 5), read(location.toString(), "ts < cast('2017-12-22 00:00:00+00:00' as timestamp)")); } { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.and( Expressions.greaterThan("ts", "2017-12-22T06:00:00+00:00"), Expressions.lessThan("ts", "2017-12-22T08:00:00+00:00"))); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create 2 tasks for 2017-12-22: 6, 7", 2, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(2, 1), read(location.toString(), "ts > cast('2017-12-22 06:00:00+00:00' as timestamp) and " + "ts < cast('2017-12-22 08:00:00+00:00' as timestamp)")); } } @Test public void testTrunctateDataPartitionedFilters() { File location = buildPartitionedTable("trunc", PARTITION_BY_FIRST_LETTER, "trunc1", "data"); DataSourceOptions options = new DataSourceOptions(ImmutableMap.of( "path", location.toString()) ); IcebergSource source = new IcebergSource(); DataSourceReader unfiltered = source.createReader(options); Assert.assertEquals("Unfiltered table should have created 9 read tasks", 9, planTasks(unfiltered).size()); { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.equal("data", "goldfish")); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create 1 task for 'goldfish' (g)", 1, tasks.size()); } { DataSourceReader reader = source.createReader(options); pushFilters(reader, col("data").$eq$eq$eq("goldfish").expr()); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create 1 task for 'goldfish' (g)", 1, tasks.size()); } assertEqualsSafe(SCHEMA.asStruct(), expected(9), read(location.toString(), "data = 'goldfish'")); } @Test public void testFilterByNonProjectedColumn() { { Schema actualProjection = SCHEMA.select("id", "data"); List<Record> expected = Lists.newArrayList(); for (Record rec : expected(5, 6 ,7, 8, 9)) { expected.add(projectFlat(actualProjection, rec)); } assertEqualsSafe(actualProjection.asStruct(), expected, read( unpartitioned.toString(), "cast('2017-12-22 00:00:00+00:00' as timestamp) > ts", "id", "data")); } { // only project id: ts will be projected because of the filter, but data will not be included Schema actualProjection = SCHEMA.select("id"); List<Record> expected = Lists.newArrayList(); for (Record rec : expected(1, 2)) { expected.add(projectFlat(actualProjection, rec)); } assertEqualsSafe(actualProjection.asStruct(), expected, read( unpartitioned.toString(), "ts > cast('2017-12-22 06:00:00+00:00' as timestamp) and " + "cast('2017-12-22 08:00:00+00:00' as timestamp) > ts", "id")); } } private static Record projectFlat(Schema projection, Record record) { org.apache.avro.Schema avroSchema = AvroSchemaUtil.convert(projection, "test"); Record result = new Record(avroSchema); List<Types.NestedField> fields = projection.asStruct().fields(); for (int i = 0; i < fields.size(); i += 1) { Types.NestedField field = fields.get(i); result.put(i, record.get(field.name())); } return result; } public static void assertEqualsSafe(Types.StructType struct, List<Record> expected, List<Row> actual) { // TODO: match records by ID int numRecords = Math.min(expected.size(), actual.size()); for (int i = 0; i < numRecords; i += 1) { TestHelpers.assertEqualsSafe(struct, expected.get(i), actual.get(i)); } Assert.assertEquals("Number of results should match expected", expected.size(), actual.size()); } private List<Record> expected(int... ordinals) { List<Record> expected = Lists.newArrayListWithExpectedSize(ordinals.length); for (int ord : ordinals) { expected.add(records.get(ord)); } return expected; } private void pushFilters(DataSourceReader reader, com.netflix.iceberg.expressions.Expression... filters) { Expression[] expressions = new Expression[filters.length]; for (int i = 0; i < filters.length; i += 1) { expressions[i] = SparkExpressions.convert(filters[i], SCHEMA); } pushFilters(reader, expressions); } private void pushFilters(DataSourceReader reader, Expression... expressions) { Assert.assertTrue(reader instanceof SupportsPushDownCatalystFilters); SupportsPushDownCatalystFilters filterable = (SupportsPushDownCatalystFilters) reader; filterable.pushCatalystFilters(expressions); } private List<DataReaderFactory<UnsafeRow>> planTasks(DataSourceReader reader) { Assert.assertTrue(reader instanceof SupportsScanUnsafeRow); SupportsScanUnsafeRow unsafeReader = (SupportsScanUnsafeRow) reader; return unsafeReader.createUnsafeRowReaderFactories(); } private File buildPartitionedTable(String desc, PartitionSpec spec, String udf, String partitionColumn) { File location = new File(parent, desc); Table byId = TABLES.create(SCHEMA, spec, location.toString()); // do not combine splits because the tests expect a split per partition byId.updateProperties().set("read.split.target-size", "1").commit(); // copy the unpartitioned table into the partitioned table to produce the partitioned data Dataset<Row> allRows = spark.read() .format("iceberg") .load(unpartitioned.toString()); allRows .coalesce(1) // ensure only 1 file per partition is written .withColumn("part", callUDF(udf, column(partitionColumn))) .sortWithinPartitions("part") .drop("part") .write() .format("iceberg") .mode("append") .save(byId.location()); return location; } private List<Record> testRecords(org.apache.avro.Schema avroSchema) { return Lists.newArrayList( record(avroSchema, 0L, timestamp("2017-12-22T09:20:44.294658+00:00"), "junction"), record(avroSchema, 1L, timestamp("2017-12-22T07:15:34.582910+00:00"), "alligator"), record(avroSchema, 2L, timestamp("2017-12-22T06:02:09.243857+00:00"), "forrest"), record(avroSchema, 3L, timestamp("2017-12-22T03:10:11.134509+00:00"), "clapping"), record(avroSchema, 4L, timestamp("2017-12-22T00:34:00.184671+00:00"), "brush"), record(avroSchema, 5L, timestamp("2017-12-21T22:20:08.935889+00:00"), "trap"), record(avroSchema, 6L, timestamp("2017-12-21T21:55:30.589712+00:00"), "element"), record(avroSchema, 7L, timestamp("2017-12-21T17:31:14.532797+00:00"), "limited"), record(avroSchema, 8L, timestamp("2017-12-21T15:21:51.237521+00:00"), "global"), record(avroSchema, 9L, timestamp("2017-12-21T15:02:15.230570+00:00"), "goldfish") ); } private static List<Row> read(String table, String expr) { return read(table, expr, "*"); } private static List<Row> read(String table, String expr, String select0, String... selectN) { Dataset<Row> dataset = spark.read().format("iceberg").load(table).filter(expr) .select(select0, selectN); return dataset.collectAsList(); } private static long timestamp(String timestamp) { return Literal.of(timestamp).<Long>to(Types.TimestampType.withZone()).value(); } private static Record record(org.apache.avro.Schema schema, Object... values) { Record rec = new Record(schema); for (int i = 0; i < values.length; i += 1) { rec.put(i, values[i]); } return rec; } }
6,582
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestParquetWrite.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.Lists; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.types.Types; import org.apache.hadoop.conf.Configuration; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Encoders; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.List; import static com.netflix.iceberg.types.Types.NestedField.optional; public class TestParquetWrite { private static final Configuration CONF = new Configuration(); private static final Schema SCHEMA = new Schema( optional(1, "id", Types.IntegerType.get()), optional(2, "data", Types.StringType.get()) ); @Rule public TemporaryFolder temp = new TemporaryFolder(); private static SparkSession spark = null; @BeforeClass public static void startSpark() { TestParquetWrite.spark = SparkSession.builder().master("local[2]").getOrCreate(); } @AfterClass public static void stopSpark() { SparkSession spark = TestParquetWrite.spark; TestParquetWrite.spark = null; spark.stop(); } @Test public void testBasicWrite() throws IOException { File parent = temp.newFolder("parquet"); File location = new File(parent, "test"); HadoopTables tables = new HadoopTables(CONF); PartitionSpec spec = PartitionSpec.builderFor(SCHEMA).identity("data").build(); Table table = tables.create(SCHEMA, spec, location.toString()); List<SimpleRecord> expected = Lists.newArrayList( new SimpleRecord(1, "a"), new SimpleRecord(2, "b"), new SimpleRecord(3, "c") ); Dataset<Row> df = spark.createDataFrame(expected, SimpleRecord.class); // TODO: incoming columns must be ordered according to the table's schema df.select("id", "data").write() .format("iceberg") .mode("append") .save(location.toString()); table.refresh(); Dataset<Row> result = spark.read() .format("iceberg") .load(location.toString()); List<SimpleRecord> actual = result.orderBy("id").as(Encoders.bean(SimpleRecord.class)).collectAsList(); Assert.assertEquals("Number of rows should match", expected.size(), actual.size()); Assert.assertEquals("Result rows should match", expected, actual); } }
6,583
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestOrcWrite.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.Lists; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.types.Types; import org.apache.hadoop.conf.Configuration; import org.apache.orc.CompressionKind; import org.apache.orc.OrcConf; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Encoders; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.List; import static com.netflix.iceberg.types.Types.NestedField.optional; public class TestOrcWrite { private static final Configuration CONF = new Configuration(); private static final Schema SCHEMA = new Schema( optional(1, "id", Types.IntegerType.get()), optional(2, "data", Types.StringType.get()) ); @Rule public TemporaryFolder temp = new TemporaryFolder(); private static SparkSession spark = null; @BeforeClass public static void startSpark() { TestOrcWrite.spark = SparkSession.builder().master("local[2]").getOrCreate(); } @AfterClass public static void stopSpark() { SparkSession spark = TestOrcWrite.spark; TestOrcWrite.spark = null; spark.stop(); } @Test public void testBasicWrite() throws IOException { File parent = temp.newFolder("orc"); File location = new File(parent, "test"); location.mkdirs(); HadoopTables tables = new HadoopTables(CONF); PartitionSpec spec = PartitionSpec.builderFor(SCHEMA).identity("data").build(); Table table = tables.create(SCHEMA, spec, location.toString()); table.updateProperties() .defaultFormat(FileFormat.ORC) .set(OrcConf.COMPRESS.getAttribute(), CompressionKind.NONE.name()) .commit(); List<SimpleRecord> expected = Lists.newArrayList( new SimpleRecord(1, "a"), new SimpleRecord(2, "b"), new SimpleRecord(3, "c") ); Dataset<Row> df = spark.createDataFrame(expected, SimpleRecord.class); // TODO: incoming columns must be ordered according to the table's schema df.select("id", "data").write() .format("iceberg") .mode("append") .save(location.toString()); table.refresh(); Dataset<Row> result = spark.read() .format("iceberg") .load(location.toString()); List<SimpleRecord> actual = result.orderBy("id").as( Encoders.bean(SimpleRecord.class)).collectAsList(); Assert.assertEquals("Number of rows should match", expected.size(), actual.size()); Assert.assertEquals("Result rows should match", expected, actual); } }
6,584
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestParquetScan.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.spark.data.AvroDataTest; import com.netflix.iceberg.spark.data.RandomData; import com.netflix.iceberg.spark.data.TestHelpers; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData; import org.apache.hadoop.conf.Configuration; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Assume; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.List; import java.util.UUID; import static com.netflix.iceberg.Files.localInput; import static com.netflix.iceberg.Files.localOutput; import static com.netflix.iceberg.parquet.ParquetMetrics.fromInputFile; public class TestParquetScan extends AvroDataTest { private static final Configuration CONF = new Configuration(); @Rule public TemporaryFolder temp = new TemporaryFolder(); private static SparkSession spark = null; @BeforeClass public static void startSpark() { TestParquetScan.spark = SparkSession.builder().master("local[2]").getOrCreate(); } @AfterClass public static void stopSpark() { SparkSession spark = TestParquetScan.spark; TestParquetScan.spark = null; spark.stop(); } @Override protected void writeAndValidate(Schema schema) throws IOException { Assume.assumeTrue("Cannot handle non-string map keys in parquet-avro", null == TypeUtil.find( schema, type -> type.isMapType() && type.asMapType().keyType() != Types.StringType.get())); File parent = temp.newFolder("parquet"); File location = new File(parent, "test"); File dataFolder = new File(location, "data"); dataFolder.mkdirs(); File parquetFile = new File(dataFolder, FileFormat.PARQUET.addExtension(UUID.randomUUID().toString())); HadoopTables tables = new HadoopTables(CONF); Table table = tables.create(schema, PartitionSpec.unpartitioned(), location.toString()); // Important: use the table's schema for the rest of the test // When tables are created, the column ids are reassigned. Schema tableSchema = table.schema(); List<GenericData.Record> expected = RandomData.generateList(tableSchema, 100, 1L); try (FileAppender<GenericData.Record> writer = Parquet.write(localOutput(parquetFile)) .schema(tableSchema) .build()) { writer.addAll(expected); } DataFile file = DataFiles.builder(PartitionSpec.unpartitioned()) .withFileSizeInBytes(parquetFile.length()) .withPath(parquetFile.toString()) .withMetrics(fromInputFile(localInput(parquetFile))) .build(); table.newAppend().appendFile(file).commit(); Dataset<Row> df = spark.read() .format("iceberg") .load(location.toString()); List<Row> rows = df.collectAsList(); Assert.assertEquals("Should contain 100 rows", 100, rows.size()); for (int i = 0; i < expected.size(); i += 1) { TestHelpers.assertEqualsSafe(tableSchema.asStruct(), expected.get(i), rows.get(i)); } } }
6,585
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.types.Comparators; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Record; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.IOException; import java.util.List; import java.util.Map; import static org.apache.avro.Schema.Type.UNION; public abstract class TestReadProjection { final String format; TestReadProjection(String format) { this.format = format; } protected abstract Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException; @Rule public TemporaryFolder temp = new TemporaryFolder(); @Test public void testFullProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Record projected = writeAndRead("full_projection", schema, schema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("data")); Assert.assertEquals("Should contain the correct data value", 0, cmp); } @Test public void testReorderedFullProjection() throws Exception { // Assume.assumeTrue( // "Spark's Parquet read support does not support reordered columns", // !format.equalsIgnoreCase("parquet")); Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("reordered_full_projection", schema, reordered, record); Assert.assertEquals("Should contain the correct 0 value", "test", projected.get(0).toString()); Assert.assertEquals("Should contain the correct 1 value", 34L, projected.get(1)); } @Test public void testReorderedProjection() throws Exception { // Assume.assumeTrue( // "Spark's Parquet read support does not support reordered columns", // !format.equalsIgnoreCase("parquet")); Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(2, "missing_1", Types.StringType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.optional(3, "missing_2", Types.LongType.get()) ); Record projected = writeAndRead("reordered_projection", schema, reordered, record); Assert.assertNull("Should contain the correct 0 value", projected.get(0)); Assert.assertEquals("Should contain the correct 1 value", "test", projected.get(1).toString()); Assert.assertNull("Should contain the correct 2 value", projected.get(2)); } @Test public void testEmptyProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Record projected = writeAndRead("empty_projection", schema, schema.select(), record); Assert.assertNotNull("Should read a non-null record", projected); try { projected.get(0); Assert.fail("Should not retrieve value with ordinal 0"); } catch (ArrayIndexOutOfBoundsException e) { // this is expected because there are no values } } @Test public void testBasicProjection() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("data", "test"); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("basic_projection_id", writeSchema, idOnly, record); Assert.assertNull("Should not project data", projected.get("data")); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Schema dataOnly = new Schema( Types.NestedField.optional(1, "data", Types.StringType.get()) ); projected = writeAndRead("basic_projection_data", writeSchema, dataOnly, record); Assert.assertNull("Should not project id", projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("data")); Assert.assertEquals("Should contain the correct data value", 0, cmp); } @Test public void testRename() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("data", "test"); Schema readSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "renamed", Types.StringType.get()) ); Record projected = writeAndRead("project_and_rename", writeSchema, readSchema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("renamed")); Assert.assertEquals("Should contain the correct data/renamed value", 0, cmp); } @Test public void testNestedStructProjection() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()), Types.NestedField.required(2, "long", Types.FloatType.get()) )) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); Record location = new Record(fromOption(record.getSchema().getField("location").schema())); location.put("lat", 52.995143f); location.put("long", -1.539054f); record.put("location", location); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Record projectedLocation = (Record) projected.get("location"); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project location", projectedLocation); Schema latOnly = new Schema( Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()) )) ); projected = writeAndRead("latitude_only", writeSchema, latOnly, record); projectedLocation = (Record) projected.get("location"); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project location", projected.get("location")); Assert.assertNull("Should not project longitude", projectedLocation.get("long")); Assert.assertEquals("Should project latitude", 52.995143f, (float) projectedLocation.get("lat"), 0.000001f); Schema longOnly = new Schema( Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(2, "long", Types.FloatType.get()) )) ); projected = writeAndRead("longitude_only", writeSchema, longOnly, record); projectedLocation = (Record) projected.get("location"); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project location", projected.get("location")); Assert.assertNull("Should not project latitutde", projectedLocation.get("lat")); Assert.assertEquals("Should project longitude", -1.539054f, (float) projectedLocation.get("long"), 0.000001f); Schema locationOnly = writeSchema.select("location"); projected = writeAndRead("location_only", writeSchema, locationOnly, record); projectedLocation = (Record) projected.get("location"); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project location", projected.get("location")); Assert.assertEquals("Should project latitude", 52.995143f, (float) projectedLocation.get("lat"), 0.000001f); Assert.assertEquals("Should project longitude", -1.539054f, (float) projectedLocation.get("long"), 0.000001f); } @Test public void testMapProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(5, "properties", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StringType.get())) ); Map<String, String> properties = ImmutableMap.of("a", "A", "b", "B"); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("properties", properties); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project properties map", projected.get("properties")); Schema keyOnly = writeSchema.select("properties.key"); projected = writeAndRead("key_only", writeSchema, keyOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.get("properties"))); Schema valueOnly = writeSchema.select("properties.value"); projected = writeAndRead("value_only", writeSchema, valueOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.get("properties"))); Schema mapOnly = writeSchema.select("properties"); projected = writeAndRead("map_only", writeSchema, mapOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.get("properties"))); } private Map<String, ?> toStringMap(Map<?, ?> map) { Map<String, Object> stringMap = Maps.newHashMap(); for (Map.Entry<?, ?> entry : map.entrySet()) { if (entry.getValue() instanceof CharSequence) { stringMap.put(entry.getKey().toString(), entry.getValue().toString()); } else { stringMap.put(entry.getKey().toString(), entry.getValue()); } } return stringMap; } @Test public void testMapOfStructsProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()), Types.NestedField.required(2, "long", Types.FloatType.get()) ) )) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); Record l1 = new Record(fromOption( fromOption(record.getSchema().getField("locations").schema()).getValueType())); l1.put("lat", 53.992811f); l1.put("long", -1.542616f); Record l2 = new Record(l1.getSchema()); l2.put("lat", 52.995143f); l2.put("long", -1.539054f); record.put("locations", ImmutableMap.of("L1", l1, "L2", l2)); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project locations map", projected.get("locations")); projected = writeAndRead("all_locations", writeSchema, writeSchema.select("locations"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project locations map", record.get("locations"), toStringMap((Map) projected.get("locations"))); projected = writeAndRead("lat_only", writeSchema, writeSchema.select("locations.lat"), record); Assert.assertNull("Should not project id", projected.get("id")); Map<String, ?> locations = toStringMap((Map) projected.get("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); Record projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertEquals("L1 should contain lat", 53.992811f, (float) projectedL1.get("lat"), 0.000001); Assert.assertNull("L1 should not contain long", projectedL1.get("long")); Record projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertEquals("L2 should contain lat", 52.995143f, (float) projectedL2.get("lat"), 0.000001); Assert.assertNull("L2 should not contain long", projectedL2.get("long")); projected = writeAndRead("long_only", writeSchema, writeSchema.select("locations.long"), record); Assert.assertNull("Should not project id", projected.get("id")); locations = toStringMap((Map) projected.get("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertNull("L1 should not contain lat", projectedL1.get("lat")); Assert.assertEquals("L1 should contain long", -1.542616f, (float) projectedL1.get("long"), 0.000001); projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertNull("L2 should not contain lat", projectedL2.get("lat")); Assert.assertEquals("L2 should contain long", -1.539054f, (float) projectedL2.get("long"), 0.000001); Schema latitiudeRenamed = new Schema( Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StructType.of( Types.NestedField.required(1, "latitude", Types.FloatType.get()) ) )) ); projected = writeAndRead("latitude_renamed", writeSchema, latitiudeRenamed, record); Assert.assertNull("Should not project id", projected.get("id")); locations = toStringMap((Map) projected.get("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertEquals("L1 should contain latitude", 53.992811f, (float) projectedL1.get("latitude"), 0.000001); Assert.assertNull("L1 should not contain lat", projectedL1.get("lat")); Assert.assertNull("L1 should not contain long", projectedL1.get("long")); projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertEquals("L2 should contain latitude", 52.995143f, (float) projectedL2.get("latitude"), 0.000001); Assert.assertNull("L2 should not contain lat", projectedL2.get("lat")); Assert.assertNull("L2 should not contain long", projectedL2.get("long")); } @Test public void testListProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(10, "values", Types.ListType.ofOptional(11, Types.LongType.get())) ); List<Long> values = ImmutableList.of(56L, 57L, 58L); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("values", values); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project values list", projected.get("values")); Schema elementOnly = writeSchema.select("values.element"); projected = writeAndRead("element_only", writeSchema, elementOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire list", values, projected.get("values")); Schema listOnly = writeSchema.select("values"); projected = writeAndRead("list_only", writeSchema, listOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire list", values, projected.get("values")); } @Test @SuppressWarnings("unchecked") public void testListOfStructsProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.required(19, "x", Types.IntegerType.get()), Types.NestedField.optional(18, "y", Types.IntegerType.get()) )) ) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); Record p1 = new Record(fromOption( fromOption(record.getSchema().getField("points").schema()).getElementType())); p1.put("x", 1); p1.put("y", 2); Record p2 = new Record(p1.getSchema()); p2.put("x", 3); p2.put("y", null); record.put("points", ImmutableList.of(p1, p2)); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project points list", projected.get("points")); projected = writeAndRead("all_points", writeSchema, writeSchema.select("points"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project points list", record.get("points"), projected.get("points")); projected = writeAndRead("x_only", writeSchema, writeSchema.select("points.x"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); List<Record> points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); Record projectedP1 = points.get(0); Assert.assertEquals("Should project x", 1, (int) projectedP1.get("x")); Assert.assertNull("Should not project y", projectedP1.get("y")); Record projectedP2 = points.get(1); Assert.assertEquals("Should project x", 3, (int) projectedP2.get("x")); Assert.assertNull("Should not project y", projectedP2.get("y")); projected = writeAndRead("y_only", writeSchema, writeSchema.select("points.y"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertNull("Should not project x", projectedP1.get("x")); Assert.assertEquals("Should project y", 2, (int) projectedP1.get("y")); projectedP2 = points.get(1); Assert.assertNull("Should not project x", projectedP2.get("x")); Assert.assertNull("Should project null y", projectedP2.get("y")); Schema yRenamed = new Schema( Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.optional(18, "z", Types.IntegerType.get()) )) ) ); projected = writeAndRead("y_renamed", writeSchema, yRenamed, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertNull("Should not project x", projectedP1.get("x")); Assert.assertNull("Should not project y", projectedP1.get("y")); Assert.assertEquals("Should project z", 2, (int) projectedP1.get("z")); projectedP2 = points.get(1); Assert.assertNull("Should not project x", projectedP2.get("x")); Assert.assertNull("Should not project y", projectedP2.get("y")); Assert.assertNull("Should project null z", projectedP2.get("z")); Schema zAdded = new Schema( Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.required(19, "x", Types.IntegerType.get()), Types.NestedField.optional(18, "y", Types.IntegerType.get()), Types.NestedField.optional(20, "z", Types.IntegerType.get()) )) ) ); projected = writeAndRead("z_added", writeSchema, zAdded, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertEquals("Should project x", 1, (int) projectedP1.get("x")); Assert.assertEquals("Should project y", 2, (int) projectedP1.get("y")); Assert.assertNull("Should contain null z", projectedP1.get("z")); projectedP2 = points.get(1); Assert.assertEquals("Should project x", 3, (int) projectedP2.get("x")); Assert.assertNull("Should project null y", projectedP2.get("y")); Assert.assertNull("Should contain null z", projectedP2.get("z")); } private static org.apache.avro.Schema fromOption(org.apache.avro.Schema schema) { Preconditions.checkArgument(schema.getType() == UNION, "Expected union schema but was passed: {}", schema); Preconditions.checkArgument(schema.getTypes().size() == 2, "Expected optional schema, but was passed: {}", schema); if (schema.getTypes().get(0).getType() == org.apache.avro.Schema.Type.NULL) { return schema.getTypes().get(1); } else { return schema.getTypes().get(0); } } }
6,586
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestTables.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.Maps; import com.netflix.iceberg.BaseTable; import com.netflix.iceberg.FileIO; import com.netflix.iceberg.Files; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Snapshot; import com.netflix.iceberg.TableMetadata; import com.netflix.iceberg.TableOperations; import com.netflix.iceberg.exceptions.AlreadyExistsException; import com.netflix.iceberg.exceptions.CommitFailedException; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import java.io.File; import java.io.IOException; import java.util.Map; // TODO: Use the copy of this from core. class TestTables { private TestTables() { } static TestTable create(File temp, String name, Schema schema, PartitionSpec spec) { TestTableOperations ops = new TestTableOperations(name); if (ops.current() != null) { throw new AlreadyExistsException("Table %s already exists at location: %s", name, temp); } ops.commit(null, TableMetadata.newTableMetadata(ops, schema, spec, temp.toString())); return new TestTable(ops, name); } static TestTable load(String name) { TestTableOperations ops = new TestTableOperations(name); return new TestTable(ops, name); } static class TestTable extends BaseTable { private final TestTableOperations ops; private TestTable(TestTableOperations ops, String name) { super(ops, name); this.ops = ops; } TestTableOperations ops() { return ops; } } private static final Map<String, TableMetadata> METADATA = Maps.newHashMap(); static void clearTables() { synchronized (METADATA) { METADATA.clear(); } } static TableMetadata readMetadata(String tableName) { synchronized (METADATA) { return METADATA.get(tableName); } } static void replaceMetadata(String tableName, TableMetadata metadata) { synchronized (METADATA) { METADATA.put(tableName, metadata); } } static class TestTableOperations implements TableOperations { private final String tableName; private TableMetadata current = null; private long lastSnapshotId = 0; private int failCommits = 0; TestTableOperations(String tableName) { this.tableName = tableName; refresh(); if (current != null) { for (Snapshot snap : current.snapshots()) { this.lastSnapshotId = Math.max(lastSnapshotId, snap.snapshotId()); } } else { this.lastSnapshotId = 0; } } void failCommits(int numFailures) { this.failCommits = numFailures; } @Override public TableMetadata current() { return current; } @Override public TableMetadata refresh() { synchronized (METADATA) { this.current = METADATA.get(tableName); } return current; } @Override public void commit(TableMetadata base, TableMetadata metadata) { if (base != current) { throw new CommitFailedException("Cannot commit changes based on stale metadata"); } synchronized (METADATA) { refresh(); if (base == current) { if (failCommits > 0) { this.failCommits -= 1; throw new CommitFailedException("Injected failure"); } METADATA.put(tableName, metadata); this.current = metadata; } else { throw new CommitFailedException( "Commit failed: table was updated at %d", base.lastUpdatedMillis()); } } } @Override public FileIO io() { return new LocalFileIO(); } @Override public String metadataFileLocation(String fileName) { return new File(new File(current.location(), "metadata"), fileName).getAbsolutePath(); } @Override public long newSnapshotId() { long nextSnapshotId = lastSnapshotId + 1; this.lastSnapshotId = nextSnapshotId; return nextSnapshotId; } } static class LocalFileIO implements FileIO { @Override public InputFile newInputFile(String path) { return Files.localInput(path); } @Override public OutputFile newOutputFile(String path) { return Files.localOutput(new File(path)); } @Override public void deleteFile(String path) { if (!new File(path).delete()) { throw new RuntimeIOException("Failed to delete file: " + path); } } } }
6,587
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestAvroScan.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.spark.data.AvroDataTest; import com.netflix.iceberg.spark.data.RandomData; import com.netflix.iceberg.spark.data.TestHelpers; import org.apache.avro.generic.GenericData.Record; import org.apache.hadoop.conf.Configuration; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.List; import java.util.UUID; import static com.netflix.iceberg.Files.localOutput; public class TestAvroScan extends AvroDataTest { private static final Configuration CONF = new Configuration(); @Rule public TemporaryFolder temp = new TemporaryFolder(); private static SparkSession spark = null; @BeforeClass public static void startSpark() { TestAvroScan.spark = SparkSession.builder().master("local[2]").getOrCreate(); } @AfterClass public static void stopSpark() { SparkSession spark = TestAvroScan.spark; TestAvroScan.spark = null; spark.stop(); } protected void writeAndValidate(Schema schema) throws IOException { File parent = temp.newFolder("avro"); File location = new File(parent, "test"); File dataFolder = new File(location, "data"); dataFolder.mkdirs(); File avroFile = new File(dataFolder, FileFormat.AVRO.addExtension(UUID.randomUUID().toString())); HadoopTables tables = new HadoopTables(CONF); Table table = tables.create(schema, PartitionSpec.unpartitioned(), location.toString()); // Important: use the table's schema for the rest of the test // When tables are created, the column ids are reassigned. Schema tableSchema = table.schema(); List<Record> expected = RandomData.generateList(tableSchema, 100, 1L); try (FileAppender<Record> writer = Avro.write(localOutput(avroFile)) .schema(tableSchema) .build()) { writer.addAll(expected); } DataFile file = DataFiles.builder(PartitionSpec.unpartitioned()) .withRecordCount(100) .withFileSizeInBytes(avroFile.length()) .withPath(avroFile.toString()) .build(); table.newAppend().appendFile(file).commit(); Dataset<Row> df = spark.read() .format("iceberg") .load(location.toString()); List<Row> rows = df.collectAsList(); Assert.assertEquals("Should contain 100 rows", 100, rows.size()); for (int i = 0; i < expected.size(); i += 1) { TestHelpers.assertEqualsSafe(tableSchema.asStruct(), expected.get(i), rows.get(i)); } } }
6,588
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/TestSparkParquetReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData; import org.apache.spark.sql.catalyst.InternalRow; import org.junit.Assert; import org.junit.Assume; import java.io.File; import java.io.IOException; import java.util.Iterator; import java.util.List; import static com.netflix.iceberg.spark.data.TestHelpers.assertEqualsUnsafe; public class TestSparkParquetReader extends AvroDataTest { protected void writeAndValidate(Schema schema) throws IOException { Assume.assumeTrue("Parquet Avro cannot write non-string map keys", null == TypeUtil.find(schema, type -> type.isMapType() && type.asMapType().keyType() != Types.StringType.get())); List<GenericData.Record> expected = RandomData.generateList(schema, 100, 0L); File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<GenericData.Record> writer = Parquet.write(Files.localOutput(testFile)) .schema(schema) .named("test") .build()) { writer.addAll(expected); } try (CloseableIterable<InternalRow> reader = Parquet.read(Files.localInput(testFile)) .project(schema) .createReaderFunc(type -> SparkParquetReaders.buildReader(schema, type)) .build()) { Iterator<InternalRow> rows = reader.iterator(); for (int i = 0; i < expected.size(); i += 1) { Assert.assertTrue("Should have expected number of rows", rows.hasNext()); assertEqualsUnsafe(schema.asStruct(), expected.get(i), rows.next()); } Assert.assertFalse("Should not have extra rows", rows.hasNext()); } } }
6,589
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/CodegenExamples.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.UnsafeArrayData; import org.apache.spark.sql.catalyst.expressions.UnsafeMapData; import org.apache.spark.sql.catalyst.expressions.UnsafeRow; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.MapData; import org.apache.spark.unsafe.Platform; import org.apache.spark.unsafe.types.UTF8String; public class CodegenExamples { class Example1 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; public Example1(Object[] references) { this.references = references; result = new UnsafeRow(2); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 2); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); long value = isNull ? -1L : (i.getLong(0)); if (isNull) { rowWriter.setNullAt(0); } else { rowWriter.write(0, value); } boolean isNull1 = i.isNullAt(1); UTF8String value1 = isNull1 ? null : (i.getUTF8String(1)); if (isNull1) { rowWriter.setNullAt(1); } else { rowWriter.write(1, value1); } result.setTotalSize(holder.totalSize()); return result; } } class Example2 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter1; public Example2(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); InternalRow value = isNull ? null : (i.getStruct(0, 1)); if (isNull) { rowWriter.setNullAt(0); } else { // Remember the current cursor so that we can calculate how many bytes are // written later. final int tmpCursor = holder.cursor; if (value instanceof UnsafeRow) { final int sizeInBytes = ((UnsafeRow) value).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes); ((UnsafeRow) value).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes; } else { rowWriter1.reset(); boolean isNull1 = value.isNullAt(0); float value1 = isNull1 ? -1.0f : value.getFloat(0); if (isNull1) { rowWriter1.setNullAt(0); } else { rowWriter1.write(0, value1); } } rowWriter.setOffsetAndSize(0, tmpCursor, holder.cursor - tmpCursor); } result.setTotalSize(holder.totalSize()); return result; } } class Example3 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter1; public Example3(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 2); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); InternalRow value = isNull ? null : (i.getStruct(0, 2)); if (isNull) { rowWriter.setNullAt(0); } else { // Remember the current cursor so that we can calculate how many bytes are // written later. final int tmpCursor = holder.cursor; if (value instanceof UnsafeRow) { final int sizeInBytes = ((UnsafeRow) value).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes); ((UnsafeRow) value).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes; } else { rowWriter1.reset(); boolean isNull1 = value.isNullAt(0); float value1 = isNull1 ? -1.0f : value.getFloat(0); if (isNull1) { rowWriter1.setNullAt(0); } else { rowWriter1.write(0, value1); } boolean isNull2 = value.isNullAt(1); float value2 = isNull2 ? -1.0f : value.getFloat(1); if (isNull2) { rowWriter1.setNullAt(1); } else { rowWriter1.write(1, value2); } } rowWriter.setOffsetAndSize(0, tmpCursor, holder.cursor - tmpCursor); } result.setTotalSize(holder.totalSize()); return result; } } class Example4 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter1; public Example4(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.arrayWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.arrayWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); MapData value = isNull ? null : (i.getMap(0)); if (isNull) { rowWriter.setNullAt(0); } else { // Remember the current cursor so that we can calculate how many bytes are // written later. final int tmpCursor = holder.cursor; if (value instanceof UnsafeMapData) { final int sizeInBytes = ((UnsafeMapData) value).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes); ((UnsafeMapData) value).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes; } else { final ArrayData keys = value.keyArray(); final ArrayData values = value.valueArray(); // preserve 8 bytes to write the key array numBytes later. holder.grow(8); holder.cursor += 8; // Remember the current cursor so that we can write numBytes of key array later. final int tmpCursor1 = holder.cursor; if (keys instanceof UnsafeArrayData) { final int sizeInBytes1 = ((UnsafeArrayData) keys).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes1); ((UnsafeArrayData) keys).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes1; } else { final int numElements = keys.numElements(); arrayWriter.initialize(holder, numElements, 8); for (int index = 0; index < numElements; index++) { if (keys.isNullAt(index)) { arrayWriter.setNull(index); } else { final UTF8String element = keys.getUTF8String(index); arrayWriter.write(index, element); } } } // Write the numBytes of key array into the first 8 bytes. Platform.putLong(holder.buffer, tmpCursor1 - 8, holder.cursor - tmpCursor1); if (values instanceof UnsafeArrayData) { final int sizeInBytes2 = ((UnsafeArrayData) values).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes2); ((UnsafeArrayData) values).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes2; } else { final int numElements1 = values.numElements(); arrayWriter1.initialize(holder, numElements1, 8); for (int index1 = 0; index1 < numElements1; index1++) { if (values.isNullAt(index1)) { arrayWriter1.setNull(index1); } else { final UTF8String element1 = values.getUTF8String(index1); arrayWriter1.write(index1, element1); } } } } rowWriter.setOffsetAndSize(0, tmpCursor, holder.cursor - tmpCursor); } result.setTotalSize(holder.totalSize()); return result; } } class Example5 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter1; public Example5(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.arrayWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 2); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); ArrayData value = isNull ? null : (i.getArray(0)); if (isNull) { rowWriter.setNullAt(0); } else { // Remember the current cursor so that we can calculate how many bytes are // written later. final int tmpCursor = holder.cursor; if (value instanceof UnsafeArrayData) { final int sizeInBytes1 = ((UnsafeArrayData) value).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes1); ((UnsafeArrayData) value).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes1; } else { final int numElements = value.numElements(); arrayWriter.initialize(holder, numElements, 8); for (int index = 0; index < numElements; index++) { if (value.isNullAt(index)) { arrayWriter.setNull(index); } else { final InternalRow element = value.getStruct(index, 2); final int tmpCursor1 = holder.cursor; if (element instanceof UnsafeRow) { final int sizeInBytes = ((UnsafeRow) element).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes); ((UnsafeRow) element).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes; } else { rowWriter1.reset(); boolean isNull1 = element.isNullAt(0); int value1 = isNull1 ? -1 : element.getInt(0); if (isNull1) { rowWriter1.setNullAt(0); } else { rowWriter1.write(0, value1); } boolean isNull2 = element.isNullAt(1); int value2 = isNull2 ? -1 : element.getInt(1); if (isNull2) { rowWriter1.setNullAt(1); } else { rowWriter1.write(1, value2); } } arrayWriter.setOffsetAndSize(index, tmpCursor1, holder.cursor - tmpCursor1); } } } rowWriter.setOffsetAndSize(0, tmpCursor, holder.cursor - tmpCursor); } result.setTotalSize(holder.totalSize()); return result; } } class Example6 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter1; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter1; public Example6(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.arrayWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.arrayWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 2); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); MapData value = isNull ? null : (i.getMap(0)); if (isNull) { rowWriter.setNullAt(0); } else { // Remember the current cursor so that we can calculate how many bytes are // written later. final int tmpCursor = holder.cursor; if (value instanceof UnsafeMapData) { final int sizeInBytes = ((UnsafeMapData) value).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes); ((UnsafeMapData) value).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes; } else { final ArrayData keys = value.keyArray(); final ArrayData values = value.valueArray(); // preserve 8 bytes to write the key array numBytes later. holder.grow(8); holder.cursor += 8; // Remember the current cursor so that we can write numBytes of key array later. final int tmpCursor1 = holder.cursor; if (keys instanceof UnsafeArrayData) { final int sizeInBytes1 = ((UnsafeArrayData) keys).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes1); ((UnsafeArrayData) keys).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes1; } else { final int numElements = keys.numElements(); arrayWriter.initialize(holder, numElements, 8); for (int index = 0; index < numElements; index++) { if (keys.isNullAt(index)) { arrayWriter.setNull(index); } else { final UTF8String element = keys.getUTF8String(index); arrayWriter.write(index, element); } } } // Write the numBytes of key array into the first 8 bytes. Platform.putLong(holder.buffer, tmpCursor1 - 8, holder.cursor - tmpCursor1); if (values instanceof UnsafeArrayData) { final int sizeInBytes3 = ((UnsafeArrayData) values).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes3); ((UnsafeArrayData) values).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes3; } else { final int numElements1 = values.numElements(); arrayWriter1.initialize(holder, numElements1, 8); for (int index1 = 0; index1 < numElements1; index1++) { if (values.isNullAt(index1)) { arrayWriter1.setNull(index1); } else { final InternalRow element1 = values.getStruct(index1, 2); final int tmpCursor3 = holder.cursor; if (element1 instanceof UnsafeRow) { final int sizeInBytes2 = ((UnsafeRow) element1).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes2); ((UnsafeRow) element1).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes2; } else { rowWriter1.reset(); boolean isNull1 = element1.isNullAt(0); float value1 = isNull1 ? -1.0f : element1.getFloat(0); if (isNull1) { rowWriter1.setNullAt(0); } else { rowWriter1.write(0, value1); } boolean isNull2 = element1.isNullAt(1); float value2 = isNull2 ? -1.0f : element1.getFloat(1); if (isNull2) { rowWriter1.setNullAt(1); } else { rowWriter1.write(1, value2); } } arrayWriter1.setOffsetAndSize(index1, tmpCursor3, holder.cursor - tmpCursor3); } } } } rowWriter.setOffsetAndSize(0, tmpCursor, holder.cursor - tmpCursor); } result.setTotalSize(holder.totalSize()); return result; } } class Example7 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter1; public Example7(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.arrayWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.arrayWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); MapData value = isNull ? null : (i.getMap(0)); if (isNull) { rowWriter.setNullAt(0); } else { // Remember the current cursor so that we can calculate how many bytes are // written later. final int tmpCursor = holder.cursor; if (value instanceof UnsafeMapData) { final int sizeInBytes = ((UnsafeMapData) value).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes); ((UnsafeMapData) value).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes; } else { final ArrayData keys = value.keyArray(); final ArrayData values = value.valueArray(); // preserve 8 bytes to write the key array numBytes later. holder.grow(8); holder.cursor += 8; // Remember the current cursor so that we can write numBytes of key array later. final int tmpCursor1 = holder.cursor; if (keys instanceof UnsafeArrayData) { final int sizeInBytes1 = ((UnsafeArrayData) keys).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes1); ((UnsafeArrayData) keys).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes1; } else { final int numElements = keys.numElements(); arrayWriter.initialize(holder, numElements, 8); for (int index = 0; index < numElements; index++) { if (keys.isNullAt(index)) { arrayWriter.setNull(index); } else { final UTF8String element = keys.getUTF8String(index); arrayWriter.write(index, element); } } } // Write the numBytes of key array into the first 8 bytes. Platform.putLong(holder.buffer, tmpCursor1 - 8, holder.cursor - tmpCursor1); if (values instanceof UnsafeArrayData) { final int sizeInBytes2 = ((UnsafeArrayData) values).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes2); ((UnsafeArrayData) values).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes2; } else { final int numElements1 = values.numElements(); arrayWriter1.initialize(holder, numElements1, 8); for (int index1 = 0; index1 < numElements1; index1++) { if (values.isNullAt(index1)) { arrayWriter1.setNull(index1); } else { final UTF8String element1 = values.getUTF8String(index1); arrayWriter1.write(index1, element1); } } } } rowWriter.setOffsetAndSize(0, tmpCursor, holder.cursor - tmpCursor); } result.setTotalSize(holder.totalSize()); return result; } } }
6,590
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/TestParquetAvroReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.parquet.ParquetAvroValueReaders; import com.netflix.iceberg.parquet.ParquetReader; import com.netflix.iceberg.parquet.ParquetSchemaUtil; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Record; import org.apache.parquet.ParquetReadOptions; import org.apache.parquet.schema.MessageType; import org.junit.Assert; import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.Iterator; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestParquetAvroReader { @Rule public TemporaryFolder temp = new TemporaryFolder(); private static final Schema COMPLEX_SCHEMA = new Schema( required(1, "roots", Types.LongType.get()), optional(3, "lime", Types.ListType.ofRequired(4, Types.DoubleType.get())), required(5, "strict", Types.StructType.of( required(9, "tangerine", Types.StringType.get()), optional(6, "hopeful", Types.StructType.of( required(7, "steel", Types.FloatType.get()), required(8, "lantern", Types.DateType.get()) )), optional(10, "vehement", Types.LongType.get()) )), optional(11, "metamorphosis", Types.MapType.ofRequired(12, 13, Types.StringType.get(), Types.TimestampType.withoutZone())), required(14, "winter", Types.ListType.ofOptional(15, Types.StructType.of( optional(16, "beet", Types.DoubleType.get()), required(17, "stamp", Types.TimeType.get()), optional(18, "wheeze", Types.StringType.get()) ))), optional(19, "renovate", Types.MapType.ofRequired(20, 21, Types.StringType.get(), Types.StructType.of( optional(22, "jumpy", Types.DoubleType.get()), required(23, "koala", Types.TimeType.get()) ))), optional(2, "slide", Types.StringType.get()) ); @Ignore public void testStructSchema() throws IOException { Schema structSchema = new Schema( required(1, "circumvent", Types.LongType.get()), optional(2, "antarctica", Types.StringType.get()), optional(3, "fluent", Types.DoubleType.get()), required(4, "quell", Types.StructType.of( required(5, "operator", Types.BooleanType.get()), optional(6, "fanta", Types.IntegerType.get()), optional(7, "cable", Types.FloatType.get()) )), required(8, "chimney", Types.TimestampType.withZone()), required(9, "wool", Types.DateType.get()) ); File testFile = writeTestData(structSchema, 5_000_000, 1059); MessageType readSchema = ParquetSchemaUtil.convert(structSchema, "test"); long sum = 0; long sumSq = 0; int warmups = 2; int n = 10; for (int i = 0; i < warmups + n; i += 1) { // clean up as much memory as possible to avoid a large GC during the timed run System.gc(); try (ParquetReader<Record> reader = new ParquetReader<>( Files.localInput(testFile), structSchema, ParquetReadOptions.builder().build(), fileSchema -> ParquetAvroValueReaders.buildReader(structSchema, readSchema), Expressions.alwaysTrue(), true)) { long start = System.currentTimeMillis(); long val = 0; long count = 0; for (Record record : reader) { // access something to ensure the compiler doesn't optimize this away val ^= (Long) record.get(0); count += 1; } long end = System.currentTimeMillis(); long duration = end - start; System.err.println("XOR val: " + val); System.err.println(String.format("Reassembled %d records in %d ms", count, duration)); if (i >= warmups) { sum += duration; sumSq += (duration * duration); } } } double mean = ((double) sum) / n; double stddev = Math.sqrt((((double) sumSq) / n) - (mean * mean)); System.err.println(String.format( "Ran %d trials: mean time: %.3f ms, stddev: %.3f ms", n, mean, stddev)); } @Ignore public void testWithOldReadPath() throws IOException { File testFile = writeTestData(COMPLEX_SCHEMA, 500_000, 1985); MessageType readSchema = ParquetSchemaUtil.convert(COMPLEX_SCHEMA, "test"); for (int i = 0; i < 5; i += 1) { // clean up as much memory as possible to avoid a large GC during the timed run System.gc(); try (CloseableIterable<Record> reader = Parquet.read(Files.localInput(testFile)) .project(COMPLEX_SCHEMA) .build()) { long start = System.currentTimeMillis(); long val = 0; long count = 0; for (Record record : reader) { // access something to ensure the compiler doesn't optimize this away val ^= (Long) record.get(0); count += 1; } long end = System.currentTimeMillis(); System.err.println("XOR val: " + val); System.err.println("Old read path: read " + count + " records in " + (end - start) + " ms"); } // clean up as much memory as possible to avoid a large GC during the timed run System.gc(); try (ParquetReader<Record> reader = new ParquetReader<>( Files.localInput(testFile), COMPLEX_SCHEMA, ParquetReadOptions.builder().build(), fileSchema -> ParquetAvroValueReaders.buildReader(COMPLEX_SCHEMA, readSchema), Expressions.alwaysTrue(), true)) { long start = System.currentTimeMillis(); long val = 0; long count = 0; for (Record record : reader) { // access something to ensure the compiler doesn't optimize this away val ^= (Long) record.get(0); count += 1; } long end = System.currentTimeMillis(); System.err.println("XOR val: " + val); System.err.println("New read path: read " + count + " records in " + (end - start) + " ms"); } } } @Test public void testCorrectness() throws IOException { Iterable<Record> records = RandomData.generate(COMPLEX_SCHEMA, 250_000, 34139); File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> writer = Parquet.write(Files.localOutput(testFile)) .schema(COMPLEX_SCHEMA) .build()) { writer.addAll(records); } MessageType readSchema = ParquetSchemaUtil.convert(COMPLEX_SCHEMA, "test"); // verify that the new read path is correct try (ParquetReader<Record> reader = new ParquetReader<>( Files.localInput(testFile), COMPLEX_SCHEMA, ParquetReadOptions.builder().build(), fileSchema -> ParquetAvroValueReaders.buildReader(COMPLEX_SCHEMA, readSchema), Expressions.alwaysTrue(), true)) { int i = 0; Iterator<Record> iter = records.iterator(); for (Record actual : reader) { Record expected = iter.next(); Assert.assertEquals("Record " + i + " should match expected", expected, actual); i += 1; } } } private File writeTestData(Schema schema, int n, int seed) throws IOException { File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> writer = Parquet.write(Files.localOutput(testFile)) .schema(schema) .build()) { writer.addAll(RandomData.generate(schema, n, seed)); } return testFile; } }
6,591
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/TestHelpers.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.google.common.collect.Lists; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericData.Record; import org.apache.orc.storage.serde2.io.DateWritable; import org.apache.spark.sql.Row; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.GenericRow; import org.apache.spark.sql.catalyst.expressions.SpecializedGetters; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.DateTimeUtils; import org.apache.spark.sql.catalyst.util.MapData; import org.apache.spark.sql.types.Decimal; import org.apache.spark.unsafe.types.UTF8String; import org.junit.Assert; import scala.collection.Seq; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.sql.Timestamp; import java.time.Instant; import java.time.LocalDate; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.Collection; import java.util.Date; import java.util.List; import java.util.Map; import java.util.UUID; import static com.netflix.iceberg.spark.SparkSchemaUtil.convert; import static scala.collection.JavaConverters.mapAsJavaMapConverter; import static scala.collection.JavaConverters.seqAsJavaListConverter; public class TestHelpers { public static void assertEqualsSafe(Types.StructType struct, Record rec, Row row) { List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i).type(); Object expectedValue = rec.get(i); Object actualValue = row.get(i); assertEqualsSafe(fieldType, expectedValue, actualValue); } } private static void assertEqualsSafe(Types.ListType list, Collection<?> expected, List actual) { Type elementType = list.elementType(); List<?> expectedElements = Lists.newArrayList(expected); for (int i = 0; i < expectedElements.size(); i += 1) { Object expectedValue = expectedElements.get(i); Object actualValue = actual.get(i); assertEqualsSafe(elementType, expectedValue, actualValue); } } private static void assertEqualsSafe(Types.MapType map, Map<?, ?> expected, Map<?, ?> actual) { Type keyType = map.keyType(); Type valueType = map.valueType(); for (Object expectedKey : expected.keySet()) { Object matchingKey = null; for (Object actualKey : actual.keySet()) { try { assertEqualsSafe(keyType, expectedKey, actualKey); matchingKey = actualKey; } catch (AssertionError e) { // failed } } Assert.assertNotNull("Should have a matching key", matchingKey); assertEqualsSafe(valueType, expected.get(expectedKey), actual.get(matchingKey)); } } private static final OffsetDateTime EPOCH = Instant.ofEpochMilli(0L).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); @SuppressWarnings("unchecked") private static void assertEqualsSafe(Type type, Object expected, Object actual) { if (expected == null && actual == null) { return; } switch (type.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: Assert.assertEquals("Primitive value should be equal to expected", expected, actual); break; case DATE: Assert.assertTrue("Should be an int", expected instanceof Integer); Assert.assertTrue("Should be a Date", actual instanceof Date); int daysFrom1970_01_01 = (Integer) expected; LocalDate date = ChronoUnit.DAYS.addTo(EPOCH_DAY, daysFrom1970_01_01); Assert.assertEquals("ISO-8601 date should be equal", date.toString(), actual.toString()); break; case TIMESTAMP: Assert.assertTrue("Should be a long", expected instanceof Long); Assert.assertTrue("Should be a Timestamp", actual instanceof Timestamp); Timestamp ts = (Timestamp) actual; // milliseconds from nanos has already been added by getTime long tsMicros = (ts.getTime() * 1000) + ((ts.getNanos() / 1000) % 1000); Assert.assertEquals("Timestamp micros should be equal", expected, tsMicros); break; case STRING: Assert.assertTrue("Should be a String", actual instanceof String); Assert.assertEquals("Strings should be equal", expected, actual); break; case UUID: Assert.assertTrue("Should expect a UUID", expected instanceof UUID); Assert.assertTrue("Should be a String", actual instanceof String); Assert.assertEquals("UUID string representation should match", expected.toString(), actual); break; case FIXED: Assert.assertTrue("Should expect a Fixed", expected instanceof GenericData.Fixed); Assert.assertTrue("Should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Bytes should match", ((GenericData.Fixed) expected).bytes(), (byte[]) actual); break; case BINARY: Assert.assertTrue("Should expect a ByteBuffer", expected instanceof ByteBuffer); Assert.assertTrue("Should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Bytes should match", ((ByteBuffer) expected).array(), (byte[]) actual); break; case DECIMAL: Assert.assertTrue("Should expect a BigDecimal", expected instanceof BigDecimal); Assert.assertTrue("Should be a BigDecimal", actual instanceof BigDecimal); Assert.assertEquals("BigDecimals should be equal", expected, actual); break; case STRUCT: Assert.assertTrue("Should expect a Record", expected instanceof Record); Assert.assertTrue("Should be a Row", actual instanceof Row); assertEqualsSafe(type.asNestedType().asStructType(), (Record) expected, (Row) actual); break; case LIST: Assert.assertTrue("Should expect a Collection", expected instanceof Collection); Assert.assertTrue("Should be a Seq", actual instanceof Seq); List<?> asList = seqAsJavaListConverter((Seq<?>) actual).asJava(); assertEqualsSafe(type.asNestedType().asListType(), (Collection) expected, asList); break; case MAP: Assert.assertTrue("Should expect a Collection", expected instanceof Map); Assert.assertTrue("Should be a Map", actual instanceof scala.collection.Map); Map<String, ?> asMap = mapAsJavaMapConverter( (scala.collection.Map<String, ?>) actual).asJava(); assertEqualsSafe(type.asNestedType().asMapType(), (Map<String, ?>) expected, asMap); break; case TIME: default: throw new IllegalArgumentException("Not a supported type: " + type); } } public static void assertEqualsUnsafe(Types.StructType struct, Record rec, InternalRow row) { List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i).type(); Object expectedValue = rec.get(i); Object actualValue = row.get(i, convert(fieldType)); assertEqualsUnsafe(fieldType, expectedValue, actualValue); } } private static void assertEqualsUnsafe(Types.ListType list, Collection<?> expected, ArrayData actual) { Type elementType = list.elementType(); List<?> expectedElements = Lists.newArrayList(expected); for (int i = 0; i < expectedElements.size(); i += 1) { Object expectedValue = expectedElements.get(i); Object actualValue = actual.get(i, convert(elementType)); assertEqualsUnsafe(elementType, expectedValue, actualValue); } } private static void assertEqualsUnsafe(Types.MapType map, Map<?, ?> expected, MapData actual) { Type keyType = map.keyType(); Type valueType = map.valueType(); List<Map.Entry<?, ?>> expectedElements = Lists.newArrayList(expected.entrySet()); ArrayData actualKeys = actual.keyArray(); ArrayData actualValues = actual.valueArray(); for (int i = 0; i < expectedElements.size(); i += 1) { Map.Entry<?, ?> expectedPair = expectedElements.get(i); Object actualKey = actualKeys.get(i, convert(keyType)); Object actualValue = actualValues.get(i, convert(keyType)); assertEqualsUnsafe(keyType, expectedPair.getKey(), actualKey); assertEqualsUnsafe(valueType, expectedPair.getValue(), actualValue); } } private static void assertEqualsUnsafe(Type type, Object expected, Object actual) { if (expected == null && actual == null) { return; } switch (type.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case DATE: case TIMESTAMP: Assert.assertEquals("Primitive value should be equal to expected", expected, actual); break; case STRING: Assert.assertTrue("Should be a UTF8String", actual instanceof UTF8String); Assert.assertEquals("Strings should be equal", expected, actual.toString()); break; case UUID: Assert.assertTrue("Should expect a UUID", expected instanceof UUID); Assert.assertTrue("Should be a UTF8String", actual instanceof UTF8String); Assert.assertEquals("UUID string representation should match", expected.toString(), actual.toString()); break; case FIXED: Assert.assertTrue("Should expect a Fixed", expected instanceof GenericData.Fixed); Assert.assertTrue("Should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Bytes should match", ((GenericData.Fixed) expected).bytes(), (byte[]) actual); break; case BINARY: Assert.assertTrue("Should expect a ByteBuffer", expected instanceof ByteBuffer); Assert.assertTrue("Should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Bytes should match", ((ByteBuffer) expected).array(), (byte[]) actual); break; case DECIMAL: Assert.assertTrue("Should expect a BigDecimal", expected instanceof BigDecimal); Assert.assertTrue("Should be a Decimal", actual instanceof Decimal); Assert.assertEquals("BigDecimals should be equal", expected, ((Decimal) actual).toJavaBigDecimal()); break; case STRUCT: Assert.assertTrue("Should expect a Record", expected instanceof Record); Assert.assertTrue("Should be an InternalRow", actual instanceof InternalRow); assertEqualsUnsafe(type.asNestedType().asStructType(), (Record) expected, (InternalRow) actual); break; case LIST: Assert.assertTrue("Should expect a Collection", expected instanceof Collection); Assert.assertTrue("Should be an ArrayData", actual instanceof ArrayData); assertEqualsUnsafe(type.asNestedType().asListType(), (Collection) expected, (ArrayData) actual); break; case MAP: Assert.assertTrue("Should expect a Map", expected instanceof Map); Assert.assertTrue("Should be an ArrayBasedMapData", actual instanceof MapData); assertEqualsUnsafe(type.asNestedType().asMapType(), (Map) expected, (MapData) actual); break; case TIME: default: throw new IllegalArgumentException("Not a supported type: " + type); } } /** * Check that the given InternalRow is equivalent to the Row. * @param prefix context for error messages * @param type the type of the row * @param expected the expected value of the row * @param actual the actual value of the row */ public static void assertEquals(String prefix, Types.StructType type, InternalRow expected, Row actual) { if (expected == null || actual == null) { Assert.assertEquals(prefix, expected, actual); } else { List<Types.NestedField> fields = type.fields(); for (int c = 0; c < fields.size(); ++c) { String fieldName = fields.get(c).name(); Type childType = fields.get(c).type(); switch (childType.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case STRING: case DECIMAL: case DATE: case TIMESTAMP: Assert.assertEquals(prefix + "." + fieldName + " - " + childType, getValue(expected, c, childType), getPrimitiveValue(actual, c, childType)); break; case UUID: case FIXED: case BINARY: assertEqualBytes(prefix + "." + fieldName, (byte[]) getValue(expected, c, childType), (byte[]) actual.get(c)); break; case STRUCT: { Types.StructType st = (Types.StructType) childType; assertEquals(prefix + "." + fieldName, st, expected.getStruct(c, st.fields().size()), actual.getStruct(c)); break; } case LIST: assertEqualsLists(prefix + "." + fieldName, childType.asListType(), expected.getArray(c), toList((Seq<?>) actual.get(c))); break; case MAP: assertEqualsMaps(prefix + "." + fieldName, childType.asMapType(), expected.getMap(c), toJavaMap((scala.collection.Map<?, ?>) actual.getMap(c))); break; default: throw new IllegalArgumentException("Unhandled type " + childType); } } } } private static void assertEqualsLists(String prefix, Types.ListType type, ArrayData expected, List actual) { if (expected == null || actual == null) { Assert.assertEquals(prefix, expected, actual); } else { Assert.assertEquals(prefix + " length", expected.numElements(), actual.size()); Type childType = type.elementType(); for (int e = 0; e < expected.numElements(); ++e) { switch (childType.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case STRING: case DECIMAL: case DATE: case TIMESTAMP: Assert.assertEquals(prefix + ".elem " + e + " - " + childType, getValue(expected, e, childType), actual.get(e)); break; case UUID: case FIXED: case BINARY: assertEqualBytes(prefix + ".elem " + e, (byte[]) getValue(expected, e, childType), (byte[]) actual.get(e)); break; case STRUCT: { Types.StructType st = (Types.StructType) childType; assertEquals(prefix + ".elem " + e, st, expected.getStruct(e, st.fields().size()), (Row) actual.get(e)); break; } case LIST: assertEqualsLists(prefix + ".elem " + e, childType.asListType(), expected.getArray(e), toList((Seq<?>) actual.get(e))); break; case MAP: assertEqualsMaps(prefix + ".elem " + e, childType.asMapType(), expected.getMap(e), toJavaMap((scala.collection.Map<?, ?>) actual.get(e))); break; default: throw new IllegalArgumentException("Unhandled type " + childType); } } } } private static void assertEqualsMaps(String prefix, Types.MapType type, MapData expected, Map<?, ?> actual) { if (expected == null || actual == null) { Assert.assertEquals(prefix, expected, actual); } else { Type keyType = type.keyType(); Type valueType = type.valueType(); ArrayData expectedKeyArray = expected.keyArray(); ArrayData expectedValueArray = expected.valueArray(); Assert.assertEquals(prefix + " length", expected.numElements(), actual.size()); for (int e = 0; e < expected.numElements(); ++e) { Object expectedKey = getValue(expectedKeyArray, e, keyType); Object actualValue = actual.get(expectedKey); if (actualValue == null) { Assert.assertEquals(prefix + ".key=" + expectedKey + " has null", true, expected.valueArray().isNullAt(e)); } else { switch (valueType.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case STRING: case DECIMAL: case DATE: case TIMESTAMP: Assert.assertEquals(prefix + ".key=" + expectedKey + " - " + valueType, getValue(expectedValueArray, e, valueType), actual.get(expectedKey)); break; case UUID: case FIXED: case BINARY: assertEqualBytes(prefix + ".key=" + expectedKey, (byte[]) getValue(expectedValueArray, e, valueType), (byte[]) actual.get(expectedKey)); break; case STRUCT: { Types.StructType st = (Types.StructType) valueType; assertEquals(prefix + ".key=" + expectedKey, st, expectedValueArray.getStruct(e, st.fields().size()), (Row) actual.get(expectedKey)); break; } case LIST: assertEqualsLists(prefix + ".key=" + expectedKey, valueType.asListType(), expectedValueArray.getArray(e), toList((Seq<?>) actual.get(expectedKey))); break; case MAP: assertEqualsMaps(prefix + ".key=" + expectedKey, valueType.asMapType(), expectedValueArray.getMap(e), toJavaMap((scala.collection.Map<?, ?>) actual.get(expectedKey))); break; default: throw new IllegalArgumentException("Unhandled type " + valueType); } } } } } private static Object getValue(SpecializedGetters container, int ord, Type type) { if (container.isNullAt(ord)) { return null; } switch (type.typeId()) { case BOOLEAN: return container.getBoolean(ord); case INTEGER: return container.getInt(ord); case LONG: return container.getLong(ord); case FLOAT: return container.getFloat(ord); case DOUBLE: return container.getDouble(ord); case STRING: return container.getUTF8String(ord).toString(); case BINARY: case FIXED: case UUID: return container.getBinary(ord); case DATE: return new DateWritable(container.getInt(ord)).get(); case TIMESTAMP: return DateTimeUtils.toJavaTimestamp(container.getLong(ord)); case DECIMAL: { Types.DecimalType dt = (Types.DecimalType) type; return container.getDecimal(ord, dt.precision(), dt.scale()).toJavaBigDecimal(); } case STRUCT: Types.StructType struct = type.asStructType(); InternalRow internalRow = container.getStruct(ord, struct.fields().size()); Object[] data = new Object[struct.fields().size()]; for (int i = 0; i < data.length; i += 1) { if (internalRow.isNullAt(i)) { data[i] = null; } else { data[i] = getValue(internalRow, i, struct.fields().get(i).type()); } } return new GenericRow(data); default: throw new IllegalArgumentException("Unhandled type " + type); } } private static Object getPrimitiveValue(Row row, int ord, Type type) { if (row.isNullAt(ord)) { return null; } switch (type.typeId()) { case BOOLEAN: return row.getBoolean(ord); case INTEGER: return row.getInt(ord); case LONG: return row.getLong(ord); case FLOAT: return row.getFloat(ord); case DOUBLE: return row.getDouble(ord); case STRING: return row.getString(ord); case BINARY: case FIXED: case UUID: return row.get(ord); case DATE: return row.getDate(ord); case TIMESTAMP: return row.getTimestamp(ord); case DECIMAL: return row.getDecimal(ord); default: throw new IllegalArgumentException("Unhandled type " + type); } } private static <K, V> Map<K, V> toJavaMap(scala.collection.Map<K, V> map) { return map == null ? null : mapAsJavaMapConverter(map).asJava(); } private static List toList(Seq<?> val) { return val == null ? null : seqAsJavaListConverter(val).asJava(); } private static void assertEqualBytes(String context, byte[] expected, byte[] actual) { if (expected == null || actual == null) { Assert.assertEquals(context, expected, actual); } else { Assert.assertArrayEquals(context, expected, actual); } } }
6,592
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/TestParquetAvroWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.parquet.ParquetAvroValueReaders; import com.netflix.iceberg.parquet.ParquetAvroWriter; import com.netflix.iceberg.parquet.ParquetReader; import com.netflix.iceberg.parquet.ParquetSchemaUtil; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Record; import org.apache.parquet.ParquetReadOptions; import org.apache.parquet.schema.MessageType; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.Iterator; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestParquetAvroWriter { @Rule public TemporaryFolder temp = new TemporaryFolder(); private static final Schema COMPLEX_SCHEMA = new Schema( required(1, "roots", Types.LongType.get()), optional(3, "lime", Types.ListType.ofRequired(4, Types.DoubleType.get())), required(5, "strict", Types.StructType.of( required(9, "tangerine", Types.StringType.get()), optional(6, "hopeful", Types.StructType.of( required(7, "steel", Types.FloatType.get()), required(8, "lantern", Types.DateType.get()) )), optional(10, "vehement", Types.LongType.get()) )), optional(11, "metamorphosis", Types.MapType.ofRequired(12, 13, Types.StringType.get(), Types.TimestampType.withoutZone())), required(14, "winter", Types.ListType.ofOptional(15, Types.StructType.of( optional(16, "beet", Types.DoubleType.get()), required(17, "stamp", Types.TimeType.get()), optional(18, "wheeze", Types.StringType.get()) ))), optional(19, "renovate", Types.MapType.ofRequired(20, 21, Types.StringType.get(), Types.StructType.of( optional(22, "jumpy", Types.DoubleType.get()), required(23, "koala", Types.TimeType.get()) ))), optional(2, "slide", Types.StringType.get()) ); @Test public void testCorrectness() throws IOException { Iterable<Record> records = RandomData.generate(COMPLEX_SCHEMA, 250_000, 34139); File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> writer = Parquet.write(Files.localOutput(testFile)) .schema(COMPLEX_SCHEMA) .createWriterFunc(ParquetAvroWriter::buildWriter) .build()) { writer.addAll(records); } MessageType readSchema = ParquetSchemaUtil.convert(COMPLEX_SCHEMA, "test"); // verify that the new read path is correct try (ParquetReader<Record> reader = new ParquetReader<>( Files.localInput(testFile), COMPLEX_SCHEMA, ParquetReadOptions.builder().build(), fileSchema -> ParquetAvroValueReaders.buildReader(COMPLEX_SCHEMA, readSchema), Expressions.alwaysTrue(), false)) { int i = 0; Iterator<Record> iter = records.iterator(); for (Record actual : reader) { Record expected = iter.next(); Assert.assertEquals("Record " + i + " should match expected", expected, actual); i += 1; } } } }
6,593
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/RandomData.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericData.Record; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.GenericInternalRow; import org.apache.spark.sql.catalyst.util.ArrayBasedMapData; import org.apache.spark.sql.catalyst.util.GenericArrayData; import org.apache.spark.sql.types.Decimal; import org.apache.spark.unsafe.types.UTF8String; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.function.Supplier; public class RandomData { public static List<Record> generateList(Schema schema, int numRecords, long seed) { RandomDataGenerator generator = new RandomDataGenerator(schema, seed); List<Record> records = Lists.newArrayListWithExpectedSize(numRecords); for (int i = 0; i < numRecords; i += 1) { records.add((Record) TypeUtil.visit(schema, generator)); } return records; } public static Iterator<InternalRow> generateSpark(Schema schema, int rows, long seed) { return new Iterator<InternalRow>() { private int rowsLeft = rows; private final SparkRandomDataGenerator generator = new SparkRandomDataGenerator(seed); @Override public boolean hasNext() { return rowsLeft > 0; } @Override public InternalRow next() { rowsLeft -= 1; return (InternalRow) TypeUtil.visit(schema, generator); } }; } public static Iterable<Record> generate(Schema schema, int numRecords, long seed) { return () -> new Iterator<Record>() { private RandomDataGenerator generator = new RandomDataGenerator(schema, seed); private int count = 0; @Override public boolean hasNext() { return count < numRecords; } @Override public Record next() { if (count >= numRecords) { throw new NoSuchElementException(); } count += 1; return (Record) TypeUtil.visit(schema, generator); } }; } private static class RandomDataGenerator extends TypeUtil.CustomOrderSchemaVisitor<Object> { private final Map<Type, org.apache.avro.Schema> typeToSchema; private final Random random; private RandomDataGenerator(Schema schema, long seed) { this.typeToSchema = AvroSchemaUtil.convertTypes(schema.asStruct(), "test"); this.random = new Random(seed); } @Override public Record schema(Schema schema, Supplier<Object> structResult) { return (Record) structResult.get(); } @Override public Record struct(Types.StructType struct, Iterable<Object> fieldResults) { Record rec = new Record(typeToSchema.get(struct)); List<Object> values = Lists.newArrayList(fieldResults); for (int i = 0; i < values.size(); i += 1) { rec.put(i, values.get(i)); } return rec; } @Override public Object field(Types.NestedField field, Supplier<Object> fieldResult) { // return null 5% of the time when the value is optional if (field.isOptional() && random.nextInt(20) == 1) { return null; } return fieldResult.get(); } @Override public Object list(Types.ListType list, Supplier<Object> elementResult) { int numElements = random.nextInt(20); List<Object> result = Lists.newArrayListWithExpectedSize(numElements); for (int i = 0; i < numElements; i += 1) { // return null 5% of the time when the value is optional if (list.isElementOptional() && random.nextInt(20) == 1) { result.add(null); } else { result.add(elementResult.get()); } } return result; } @Override public Object map(Types.MapType map, Supplier<Object> keyResult, Supplier<Object> valueResult) { int numEntries = random.nextInt(20); Map<Object, Object> result = Maps.newLinkedHashMap(); Set<Object> keySet = Sets.newHashSet(); for (int i = 0; i < numEntries; i += 1) { Object key = keyResult.get(); // ensure no collisions while (keySet.contains(key)) { key = keyResult.get(); } keySet.add(key); // return null 5% of the time when the value is optional if (map.isValueOptional() && random.nextInt(20) == 1) { result.put(key, null); } else { result.put(key, valueResult.get()); } } return result; } @Override public Object primitive(Type.PrimitiveType primitive) { Object result = generatePrimitive(primitive, random); // For the primitives that Avro needs a different type than Spark, fix // them here. switch (primitive.typeId()) { case STRING: return ((UTF8String) result).toString(); case FIXED: return new GenericData.Fixed(typeToSchema.get(primitive), (byte[]) result); case BINARY: return ByteBuffer.wrap((byte[]) result); case UUID: return UUID.nameUUIDFromBytes((byte[]) result); case DECIMAL: return ((Decimal) result).toJavaBigDecimal(); default: return result; } } } private static class SparkRandomDataGenerator extends TypeUtil.CustomOrderSchemaVisitor<Object> { private final Random random; private SparkRandomDataGenerator(long seed) { this.random = new Random(seed); } @Override public InternalRow schema(Schema schema, Supplier<Object> structResult) { return (InternalRow) structResult.get(); } @Override public InternalRow struct(Types.StructType struct, Iterable<Object> fieldResults) { List<Object> values = Lists.newArrayList(fieldResults); GenericInternalRow row = new GenericInternalRow(values.size()); for (int i = 0; i < values.size(); i += 1) { row.update(i, values.get(i)); } return row; } @Override public Object field(Types.NestedField field, Supplier<Object> fieldResult) { // return null 5% of the time when the value is optional if (field.isOptional() && random.nextInt(20) == 1) { return null; } return fieldResult.get(); } @Override public GenericArrayData list(Types.ListType list, Supplier<Object> elementResult) { int numElements = random.nextInt(20); Object[] arr = new Object[numElements]; GenericArrayData result = new GenericArrayData(arr); for (int i = 0; i < numElements; i += 1) { // return null 5% of the time when the value is optional if (list.isElementOptional() && random.nextInt(20) == 1) { arr[i] = null; } else { arr[i] = elementResult.get(); } } return result; } @Override public Object map(Types.MapType map, Supplier<Object> keyResult, Supplier<Object> valueResult) { int numEntries = random.nextInt(20); Object[] keysArr = new Object[numEntries]; Object[] valuesArr = new Object[numEntries]; GenericArrayData keys = new GenericArrayData(keysArr); GenericArrayData values = new GenericArrayData(valuesArr); ArrayBasedMapData result = new ArrayBasedMapData(keys, values); Set<Object> keySet = Sets.newHashSet(); for (int i = 0; i < numEntries; i += 1) { Object key = keyResult.get(); // ensure no collisions while (keySet.contains(key)) { key = keyResult.get(); } keySet.add(key); keysArr[i] = key; // return null 5% of the time when the value is optional if (map.isValueOptional() && random.nextInt(20) == 1) { valuesArr[i] = null; } else { valuesArr[i] = valueResult.get(); } } return result; } @Override public Object primitive(Type.PrimitiveType primitive) { return generatePrimitive(primitive, random); } } private static Object generatePrimitive(Type.PrimitiveType primitive, Random random) { int choice = random.nextInt(20); switch (primitive.typeId()) { case BOOLEAN: return choice < 10; case INTEGER: switch (choice) { case 1: return Integer.MIN_VALUE; case 2: return Integer.MAX_VALUE; case 3: return 0; default: return random.nextInt(); } case LONG: switch (choice) { case 1: return Long.MIN_VALUE; case 2: return Long.MAX_VALUE; case 3: return 0L; default: return random.nextLong(); } case FLOAT: switch (choice) { case 1: return Float.MIN_VALUE; case 2: return -Float.MIN_VALUE; case 3: return Float.MAX_VALUE; case 4: return -Float.MAX_VALUE; case 5: return Float.NEGATIVE_INFINITY; case 6: return Float.POSITIVE_INFINITY; case 7: return 0.0F; case 8: return Float.NaN; default: return random.nextFloat(); } case DOUBLE: switch (choice) { case 1: return Double.MIN_VALUE; case 2: return -Double.MIN_VALUE; case 3: return Double.MAX_VALUE; case 4: return -Double.MAX_VALUE; case 5: return Double.NEGATIVE_INFINITY; case 6: return Double.POSITIVE_INFINITY; case 7: return 0.0D; case 8: return Double.NaN; default: return random.nextDouble(); } case DATE: // this will include negative values (dates before 1970-01-01) return random.nextInt() % ABOUT_380_YEARS_IN_DAYS; case TIME: return (random.nextLong() & Integer.MAX_VALUE) % ONE_DAY_IN_MICROS; case TIMESTAMP: return random.nextLong() % FIFTY_YEARS_IN_MICROS; case STRING: return randomString(random); case UUID: byte[] uuidBytes = new byte[16]; random.nextBytes(uuidBytes); // this will hash the uuidBytes return uuidBytes; case FIXED: byte[] fixed = new byte[((Types.FixedType) primitive).length()]; random.nextBytes(fixed); return fixed; case BINARY: byte[] binary = new byte[random.nextInt(50)]; random.nextBytes(binary); return binary; case DECIMAL: Types.DecimalType type = (Types.DecimalType) primitive; BigInteger unscaled = randomUnscaled(type.precision(), random); return Decimal.apply(new BigDecimal(unscaled, type.scale())); default: throw new IllegalArgumentException( "Cannot generate random value for unknown type: " + primitive); } } private static final long FIFTY_YEARS_IN_MICROS = (50L * (365 * 3 + 366) * 24 * 60 * 60 * 1_000_000) / 4; private static final int ABOUT_380_YEARS_IN_DAYS = 380 * 365; private static final long ONE_DAY_IN_MICROS = 24 * 60 * 60 * 1_000_000L; private static final String CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.!?"; private static UTF8String randomString(Random random) { int length = random.nextInt(50); byte[] buffer = new byte[length]; for (int i = 0; i < length; i += 1) { buffer[i] = (byte) CHARS.charAt(random.nextInt(CHARS.length())); } return UTF8String.fromBytes(buffer); } private static final String DIGITS = "0123456789"; private static BigInteger randomUnscaled(int precision, Random random) { int length = random.nextInt(precision); if (length == 0) { return BigInteger.ZERO; } StringBuilder sb = new StringBuilder(); for (int i = 0; i < length; i += 1) { sb.append(DIGITS.charAt(random.nextInt(DIGITS.length()))); } return new BigInteger(sb.toString()); } }
6,594
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/TestSparkAvroReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.google.common.collect.Lists; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.avro.AvroIterable; import com.netflix.iceberg.io.FileAppender; import org.apache.avro.generic.GenericData.Record; import org.apache.spark.sql.catalyst.InternalRow; import org.junit.Assert; import java.io.File; import java.io.IOException; import java.util.List; import static com.netflix.iceberg.spark.data.TestHelpers.assertEqualsUnsafe; public class TestSparkAvroReader extends AvroDataTest { protected void writeAndValidate(Schema schema) throws IOException { List<Record> expected = RandomData.generateList(schema, 100, 0L); File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> writer = Avro.write(Files.localOutput(testFile)) .schema(schema) .named("test") .build()) { for (Record rec : expected) { writer.add(rec); } } List<InternalRow> rows; try (AvroIterable<InternalRow> reader = Avro.read(Files.localInput(testFile)) .createReaderFunc(SparkAvroReader::new) .project(schema) .build()) { rows = Lists.newArrayList(reader); } for (int i = 0; i < expected.size(); i += 1) { assertEqualsUnsafe(schema.asStruct(), expected.get(i), rows.get(i)); } } }
6,595
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/AvroDataTest.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.ListType; import com.netflix.iceberg.types.Types.LongType; import com.netflix.iceberg.types.Types.MapType; import com.netflix.iceberg.types.Types.StructType; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.IOException; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public abstract class AvroDataTest { protected abstract void writeAndValidate(Schema schema) throws IOException; protected static final StructType SUPPORTED_PRIMITIVES = StructType.of( required(100, "id", LongType.get()), optional(101, "data", Types.StringType.get()), required(102, "b", Types.BooleanType.get()), optional(103, "i", Types.IntegerType.get()), required(104, "l", LongType.get()), optional(105, "f", Types.FloatType.get()), required(106, "d", Types.DoubleType.get()), optional(107, "date", Types.DateType.get()), required(108, "ts", Types.TimestampType.withZone()), required(110, "s", Types.StringType.get()), //required(111, "uuid", Types.UUIDType.get()), //required(112, "fixed", Types.FixedType.ofLength(7)), optional(113, "bytes", Types.BinaryType.get()), required(114, "dec_9_0", Types.DecimalType.of(9, 0)), required(115, "dec_11_2", Types.DecimalType.of(11, 2)), required(116, "dec_38_10", Types.DecimalType.of(38, 10)) // spark's maximum precision ); @Rule public TemporaryFolder temp = new TemporaryFolder(); @Test public void testSimpleStruct() throws IOException { writeAndValidate(new Schema(SUPPORTED_PRIMITIVES.fields())); } @Test public void testArray() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", ListType.ofOptional(2, Types.StringType.get()))); writeAndValidate(schema); } @Test public void testArrayOfStructs() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", ListType.ofOptional(2, SUPPORTED_PRIMITIVES))); writeAndValidate(schema); } @Test public void testMap() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, Types.StringType.get(), Types.StringType.get()))); writeAndValidate(schema); } @Test public void testNumericMapKey() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, Types.LongType.get(), Types.StringType.get()))); writeAndValidate(schema); } @Test public void testComplexMapKey() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, Types.StructType.of( required(4, "i", Types.IntegerType.get()), optional(5, "s", Types.StringType.get())), Types.StringType.get()))); writeAndValidate(schema); } @Test public void testMapOfStructs() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, Types.StringType.get(), SUPPORTED_PRIMITIVES))); writeAndValidate(schema); } @Test public void testMixedTypes() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "list_of_maps", ListType.ofOptional(2, MapType.ofOptional(3, 4, Types.StringType.get(), SUPPORTED_PRIMITIVES))), optional(5, "map_of_lists", MapType.ofOptional(6, 7, Types.StringType.get(), ListType.ofOptional(8, SUPPORTED_PRIMITIVES))), required(9, "list_of_lists", ListType.ofOptional(10, ListType.ofOptional(11, SUPPORTED_PRIMITIVES))), required(12, "map_of_maps", MapType.ofOptional(13, 14, Types.StringType.get(), MapType.ofOptional(15, 16, Types.StringType.get(), SUPPORTED_PRIMITIVES))), required(17, "list_of_struct_of_nested_types", ListType.ofOptional(19, StructType.of( Types.NestedField.required(20, "m1", MapType.ofOptional(21, 22, Types.StringType.get(), SUPPORTED_PRIMITIVES)), Types.NestedField.optional(23, "l1", ListType.ofRequired(24, SUPPORTED_PRIMITIVES)), Types.NestedField.required(25, "l2", ListType.ofRequired(26, SUPPORTED_PRIMITIVES)), Types.NestedField.optional(27, "m2", MapType.ofOptional(28, 29, Types.StringType.get(), SUPPORTED_PRIMITIVES)) ))) ); writeAndValidate(schema); } }
6,596
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/TestSparkDateTimes.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.types.Types; import org.apache.spark.sql.catalyst.util.DateTimeUtils; import org.junit.Assert; import org.junit.Test; import java.util.TimeZone; public class TestSparkDateTimes { @Test public void testSparkDate() { // checkSparkDate("1582-10-14"); // -141428 checkSparkDate("1582-10-15"); // first day of the gregorian calendar checkSparkDate("1601-08-12"); checkSparkDate("1801-07-04"); checkSparkDate("1901-08-12"); checkSparkDate("1969-12-31"); checkSparkDate("1970-01-01"); checkSparkDate("2017-12-25"); checkSparkDate("2043-08-11"); checkSparkDate("2111-05-03"); checkSparkDate("2224-02-29"); checkSparkDate("3224-10-05"); } public void checkSparkDate(String dateString) { Literal<Integer> date = Literal.of(dateString).to(Types.DateType.get()); String sparkDate = DateTimeUtils.toJavaDate(date.value()).toString(); System.err.println(dateString + ": " + date.value()); Assert.assertEquals("Should be the same date (" + date.value() + ")", dateString, sparkDate); } @Test public void testSparkTimestamp() { TimeZone currentTz = TimeZone.getDefault(); try { TimeZone.setDefault(TimeZone.getTimeZone("UTC")); checkSparkTimestamp("1582-10-15T15:51:08.440219+00:00", "1582-10-15 15:51:08.440219"); checkSparkTimestamp("1970-01-01T00:00:00.000000+00:00", "1970-01-01 00:00:00"); checkSparkTimestamp("2043-08-11T12:30:01.000001+00:00", "2043-08-11 12:30:01.000001"); } finally { TimeZone.setDefault(currentTz); } } public void checkSparkTimestamp(String timestampString, String sparkRepr) { Literal<Long> ts = Literal.of(timestampString).to(Types.TimestampType.withZone()); String sparkTimestamp = DateTimeUtils.timestampToString(ts.value()); System.err.println(timestampString + ": " + ts.value()); Assert.assertEquals("Should be the same timestamp (" + ts.value() + ")", sparkRepr, sparkTimestamp); } }
6,597
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/SparkSchemaUtil.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Binder; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.spark.sql.AnalysisException; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.catalog.Column; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.StructType; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; import static com.netflix.iceberg.spark.SparkTypeVisitor.visit; import static com.netflix.iceberg.types.TypeUtil.visit; /** * Helper methods for working with Spark/Hive metadata. */ public class SparkSchemaUtil { private SparkSchemaUtil() { } /** * Returns a {@link Schema} for the given table with fresh field ids. * <p> * This creates a Schema for an existing table by looking up the table's schema with Spark and * converting that schema. Spark/Hive partition columns are included in the schema. * * @param spark a Spark session * @param name a table name and (optional) database * @return a Schema for the table, if found */ public static Schema schemaForTable(SparkSession spark, String name) { StructType sparkType = spark.table(name).schema(); Type converted = visit(sparkType, new SparkTypeToType(sparkType)); return new Schema(converted.asNestedType().asStructType().fields()); } /** * Returns a {@link PartitionSpec} for the given table. * <p> * This creates a partition spec for an existing table by looking up the table's schema and * creating a spec with identity partitions for each partition column. * * @param spark a Spark session * @param name a table name and (optional) database * @return a PartitionSpec for the table, if found * @throws AnalysisException if thrown by the Spark catalog */ public static PartitionSpec specForTable(SparkSession spark, String name) throws AnalysisException { List<String> parts = Lists.newArrayList(Splitter.on('.').limit(2).split(name)); String db = parts.size() == 1 ? "default" : parts.get(0); String table = parts.get(parts.size() == 1 ? 0 : 1); return identitySpec( schemaForTable(spark, name), spark.catalog().listColumns(db, table).collectAsList()); } /** * Convert a {@link Schema} to a {@link DataType Spark type}. * * @param schema a Schema * @return the equivalent Spark type * @throws IllegalArgumentException if the type cannot be converted to Spark */ public static StructType convert(Schema schema) { return (StructType) visit(schema, new TypeToSparkType()); } /** * Convert a {@link Type} to a {@link DataType Spark type}. * * @param type a Type * @return the equivalent Spark type * @throws IllegalArgumentException if the type cannot be converted to Spark */ public static DataType convert(Type type) { return visit(type, new TypeToSparkType()); } /** * Convert a Spark {@link StructType struct} to a {@link Schema} with new field ids. * <p> * This conversion assigns fresh ids. * <p> * Some data types are represented as the same Spark type. These are converted to a default type. * <p> * To convert using a reference schema for field ids and ambiguous types, use * {@link #convert(Schema, StructType)}. * * @param sparkType a Spark StructType * @return the equivalent Schema * @throws IllegalArgumentException if the type cannot be converted */ public static Schema convert(StructType sparkType) { Type converted = visit(sparkType, new SparkTypeToType(sparkType)); return new Schema(converted.asNestedType().asStructType().fields()); } /** * Convert a Spark {@link DataType struct} to a {@link Type} with new field ids. * <p> * This conversion assigns fresh ids. * <p> * Some data types are represented as the same Spark type. These are converted to a default type. * <p> * To convert using a reference schema for field ids and ambiguous types, use * {@link #convert(Schema, StructType)}. * * @param sparkType a Spark DataType * @return the equivalent Type * @throws IllegalArgumentException if the type cannot be converted */ public static Type convert(DataType sparkType) { return visit(sparkType, new SparkTypeToType()); } /** * Convert a Spark {@link StructType struct} to a {@link Schema} based on the given schema. * <p> * This conversion does not assign new ids; it uses ids from the base schema. * <p> * Data types, field order, and nullability will match the spark type. This conversion may return * a schema that is not compatible with base schema. * * @param baseSchema a Schema on which conversion is based * @param sparkType a Spark StructType * @return the equivalent Schema * @throws IllegalArgumentException if the type cannot be converted or there are missing ids */ public static Schema convert(Schema baseSchema, StructType sparkType) { // convert to a type with fresh ids Types.StructType struct = visit(sparkType, new SparkTypeToType(sparkType)).asStructType(); // reassign ids to match the base schema Schema schema = TypeUtil.reassignIds(new Schema(struct.fields()), baseSchema); // fix types that can't be represented in Spark (UUID and Fixed) return FixupTypes.fixup(schema, baseSchema); } /** * Prune columns from a {@link Schema} using a {@link StructType Spark type} projection. * <p> * This requires that the Spark type is a projection of the Schema. Nullability and types must * match. * * @param schema a Schema * @param requestedType a projection of the Spark representation of the Schema * @return a Schema corresponding to the Spark projection * @throws IllegalArgumentException if the Spark type does not match the Schema */ public static Schema prune(Schema schema, StructType requestedType) { return new Schema(visit(schema, new PruneColumnsWithoutReordering(requestedType, ImmutableSet.of())) .asNestedType() .asStructType() .fields()); } /** * Prune columns from a {@link Schema} using a {@link StructType Spark type} projection. * <p> * This requires that the Spark type is a projection of the Schema. Nullability and types must * match. * <p> * The filters list of {@link Expression} is used to ensure that columns referenced by filters * are projected. * * @param schema a Schema * @param requestedType a projection of the Spark representation of the Schema * @param filters a list of filters * @return a Schema corresponding to the Spark projection * @throws IllegalArgumentException if the Spark type does not match the Schema */ public static Schema prune(Schema schema, StructType requestedType, List<Expression> filters) { Set<Integer> filterRefs = Binder.boundReferences(schema.asStruct(), filters); return new Schema(visit(schema, new PruneColumnsWithoutReordering(requestedType, filterRefs)) .asNestedType() .asStructType() .fields()); } /** * Prune columns from a {@link Schema} using a {@link StructType Spark type} projection. * <p> * This requires that the Spark type is a projection of the Schema. Nullability and types must * match. * <p> * The filters list of {@link Expression} is used to ensure that columns referenced by filters * are projected. * * @param schema a Schema * @param requestedType a projection of the Spark representation of the Schema * @param filter a filters * @return a Schema corresponding to the Spark projection * @throws IllegalArgumentException if the Spark type does not match the Schema */ public static Schema prune(Schema schema, StructType requestedType, Expression filter) { Set<Integer> filterRefs = Binder.boundReferences(schema.asStruct(), Collections.singletonList(filter)); return new Schema(visit(schema, new PruneColumnsWithoutReordering(requestedType, filterRefs)) .asNestedType() .asStructType() .fields()); } private static PartitionSpec identitySpec(Schema schema, Collection<Column> columns) { List<String> names = Lists.newArrayList(); for (Column column : columns) { if (column.isPartition()) { names.add(column.name()); } } return identitySpec(schema, names); } private static PartitionSpec identitySpec(Schema schema, String... partitionNames) { return identitySpec(schema, Lists.newArrayList(partitionNames)); } private static PartitionSpec identitySpec(Schema schema, List<String> partitionNames) { if (partitionNames == null || partitionNames.isEmpty()) { return null; } PartitionSpec.Builder builder = PartitionSpec.builderFor(schema); for (String partitionName : partitionNames) { builder.identity(partitionName); } return builder.build(); } }
6,598
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/SparkExpressions.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Binder; import com.netflix.iceberg.expressions.BoundReference; import com.netflix.iceberg.expressions.Expression.Operation; import com.netflix.iceberg.expressions.ExpressionVisitors; import com.netflix.iceberg.types.Types.TimestampType; import com.netflix.iceberg.util.Pair; import org.apache.spark.sql.Column; import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute; import org.apache.spark.sql.catalyst.expressions.And; import org.apache.spark.sql.catalyst.expressions.And$; import org.apache.spark.sql.catalyst.expressions.Attribute; import org.apache.spark.sql.catalyst.expressions.AttributeReference; import org.apache.spark.sql.catalyst.expressions.BinaryExpression; import org.apache.spark.sql.catalyst.expressions.Cast; import org.apache.spark.sql.catalyst.expressions.EqualNullSafe; import org.apache.spark.sql.catalyst.expressions.EqualTo; import org.apache.spark.sql.catalyst.expressions.Expression; import org.apache.spark.sql.catalyst.expressions.GreaterThan; import org.apache.spark.sql.catalyst.expressions.GreaterThanOrEqual; import org.apache.spark.sql.catalyst.expressions.In; import org.apache.spark.sql.catalyst.expressions.InSet; import org.apache.spark.sql.catalyst.expressions.IsNotNull; import org.apache.spark.sql.catalyst.expressions.IsNull; import org.apache.spark.sql.catalyst.expressions.LessThan; import org.apache.spark.sql.catalyst.expressions.LessThanOrEqual; import org.apache.spark.sql.catalyst.expressions.Literal; import org.apache.spark.sql.catalyst.expressions.Not; import org.apache.spark.sql.catalyst.expressions.Not$; import org.apache.spark.sql.catalyst.expressions.Or; import org.apache.spark.sql.catalyst.expressions.Or$; import org.apache.spark.sql.catalyst.expressions.ParseToDate; import org.apache.spark.sql.catalyst.expressions.UnaryExpression; import org.apache.spark.sql.catalyst.expressions.Year; import org.apache.spark.sql.functions$; import org.apache.spark.sql.types.DateType$; import org.apache.spark.sql.types.Decimal; import org.apache.spark.unsafe.types.UTF8String; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Function; import static com.netflix.iceberg.expressions.ExpressionVisitors.visit; import static com.netflix.iceberg.expressions.Expressions.alwaysFalse; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.or; import static com.netflix.iceberg.expressions.Expressions.predicate; import static scala.collection.JavaConverters.seqAsJavaListConverter; import static scala.collection.JavaConverters.setAsJavaSetConverter; public class SparkExpressions { private SparkExpressions() { } private static final Map<Class<? extends Expression>, Operation> FILTERS = ImmutableMap .<Class<? extends Expression>, Operation>builder() .put(EqualTo.class, Operation.EQ) .put(EqualNullSafe.class, Operation.EQ) .put(GreaterThan.class, Operation.GT) .put(GreaterThanOrEqual.class, Operation.GT_EQ) .put(LessThan.class, Operation.LT) .put(LessThanOrEqual.class, Operation.LT_EQ) .put(In.class, Operation.IN) .put(InSet.class, Operation.IN) .put(IsNull.class, Operation.IS_NULL) .put(IsNotNull.class, Operation.NOT_NULL) .put(And.class, Operation.AND) .put(Or.class, Operation.OR) .put(Not.class, Operation.NOT) .build(); public static com.netflix.iceberg.expressions.Expression convert(Expression expr) { Class<? extends Expression> exprClass = expr.getClass(); Operation op = FILTERS.get(exprClass); if (op != null) { switch (op) { case IS_NULL: case NOT_NULL: UnaryExpression unary = (UnaryExpression) expr; if (unary.child() instanceof Attribute) { Attribute attr = (Attribute) unary.child(); return predicate(op, attr.name()); } return null; case LT: case LT_EQ: case GT: case GT_EQ: case EQ: case NOT_EQ: BinaryExpression binary = (BinaryExpression) expr; return convert(op, binary.left(), binary.right()); case NOT: com.netflix.iceberg.expressions.Expression child = convert(((Not) expr).child()); if (child != null) { return not(child); } return null; case AND: And andExpr = (And) expr; com.netflix.iceberg.expressions.Expression andLeft = convert(andExpr.left()); com.netflix.iceberg.expressions.Expression andRight = convert(andExpr.right()); if (andLeft != null && andRight != null) { return and(convert(andExpr.left()), convert(andExpr.right())); } return null; case OR: Or orExpr = (Or) expr; com.netflix.iceberg.expressions.Expression orLeft = convert(orExpr.left()); com.netflix.iceberg.expressions.Expression orRight = convert(orExpr.right()); if (orLeft != null && orRight != null) { return or(orLeft, orRight); } return null; case IN: if (expr instanceof In) { In inExpr = (In) expr; List<Object> literals = convertLiterals(seqAsJavaListConverter(inExpr.list()).asJava()); if (literals != null) { return convertIn(inExpr.value(), literals); } else { // if the list contained a non-literal, it can't be converted return null; } } else if (expr instanceof InSet) { InSet inExpr = (InSet) expr; // expressions are already converted to Java objects Set<Object> literals = setAsJavaSetConverter(inExpr.hset()).asJava(); return convertIn(inExpr.child(), literals); } default: } } return null; // can't convert } private enum Transform { IDENTITY, YEAR, // literal is an integer year, like 2018 DAY, // literal is an integer date } private static final Map<Class<? extends Expression>, Transform> TRANSFORMS = ImmutableMap .<Class<? extends Expression>, Transform>builder() .put(UnresolvedAttribute.class, Transform.IDENTITY) .put(AttributeReference.class, Transform.IDENTITY) .put(Year.class, Transform.YEAR) .put(ParseToDate.class, Transform.DAY) .put(Cast.class, Transform.DAY) .build(); private static com.netflix.iceberg.expressions.Expression convertIn(Expression expr, Collection<Object> values) { if (expr instanceof Attribute) { Attribute attr = (Attribute) expr; com.netflix.iceberg.expressions.Expression converted = alwaysFalse(); for (Object item : values) { converted = or(converted, equal(attr.name(), item)); } return converted; } return null; } private static List<Object> convertLiterals(List<Expression> values) { List<Object> converted = Lists.newArrayListWithExpectedSize(values.size()); for (Expression value : values) { if (value instanceof Literal) { Literal lit = (Literal) value; converted.add(valueFromSpark(lit)); } else { return null; } } return converted; } private static com.netflix.iceberg.expressions.Expression convert(Operation op, Expression left, Expression right) { Pair<Transform, String> attrPair = null; Operation leftOperation = null; Literal lit = null; if (right instanceof Literal) { lit = (Literal) right; attrPair = convertAttr(left); leftOperation = op; } else if (left instanceof Literal) { lit = (Literal) left; attrPair = convertAttr(right); leftOperation = op.flipLR(); } if (attrPair != null) { switch (attrPair.first()) { case IDENTITY: return predicate(leftOperation, attrPair.second(), valueFromSpark(lit)); case YEAR: return filter(leftOperation, attrPair.second(), (int) lit.value(), SparkExpressions::yearToTimestampMicros); case DAY: return filter(leftOperation, attrPair.second(), (int) lit.value(), SparkExpressions::dayToTimestampMicros); default: } } return null; } private static Object valueFromSpark(Literal lit) { if (lit.value() instanceof UTF8String) { return lit.value().toString(); } else if (lit.value() instanceof Decimal) { return ((Decimal) lit.value()).toJavaBigDecimal(); } return lit.value(); } private static Pair<Transform, String> convertAttr(Expression expr) { Transform type = TRANSFORMS.get(expr.getClass()); if (type == Transform.IDENTITY) { Attribute attr = (Attribute) expr; return Pair.of(type, attr.name()); } else if (expr instanceof Cast) { Cast cast = (Cast) expr; if (DateType$.MODULE$.sameType(cast.dataType()) && cast.child() instanceof Attribute) { Attribute attr = (Attribute) cast.child(); return Pair.of(Transform.DAY, attr.name()); } } else if (expr instanceof ParseToDate) { ParseToDate toDate = (ParseToDate) expr; if (toDate.left() instanceof Attribute) { Attribute attr = (Attribute) toDate.left(); return Pair.of(Transform.DAY, attr.name()); } } else if (expr instanceof UnaryExpression) { UnaryExpression func = (UnaryExpression) expr; if (func.child() instanceof Attribute) { Attribute attr = (Attribute) func.child(); return Pair.of(type, attr.name()); } } return null; } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); private static long yearToTimestampMicros(int year) { return ChronoUnit.MICROS.between(EPOCH, LocalDateTime.of(year, 1, 1, 0, 0).atOffset(ZoneOffset.UTC)); } private static long dayToTimestampMicros(int daysFromEpoch) { return ChronoUnit.MICROS.between(EPOCH, EPOCH_DAY.plusDays(daysFromEpoch).atStartOfDay().atOffset(ZoneOffset.UTC)); } private static com.netflix.iceberg.expressions.Literal<Long> tsLiteral(long timestampMicros) { return com.netflix.iceberg.expressions.Literal .of(timestampMicros) .to(TimestampType.withoutZone()); } private static com.netflix.iceberg.expressions.Expression filter( Operation op, String name, int value, Function<Integer, Long> startTsMicros) { switch (op) { case LT: return predicate(Operation.LT, name, tsLiteral(startTsMicros.apply(value))); case LT_EQ: return predicate(Operation.LT, name, tsLiteral(startTsMicros.apply(value + 1))); case GT: return predicate(Operation.GT_EQ, name, tsLiteral(startTsMicros.apply(value + 1))); case GT_EQ: return predicate(Operation.GT_EQ, name, tsLiteral(startTsMicros.apply(value))); case EQ: return and( predicate(Operation.GT_EQ, name, tsLiteral(startTsMicros.apply(value))), predicate(Operation.LT, name, tsLiteral(startTsMicros.apply(value + 1))) ); case NOT_EQ: return or( predicate(Operation.GT_EQ, name, tsLiteral(startTsMicros.apply(value + 1))), predicate(Operation.LT, name, tsLiteral(startTsMicros.apply(value))) ); case IN: case NOT_IN: default: throw new IllegalArgumentException("Cannot convert operation to year filter: " + op); } } public static Expression convert(com.netflix.iceberg.expressions.Expression filter, Schema schema) { return visit(Binder.bind(schema.asStruct(), filter), new ExpressionToSpark(schema)); } private static class ExpressionToSpark extends ExpressionVisitors. BoundExpressionVisitor<Expression> { private final Schema schema; public ExpressionToSpark(Schema schema) { this.schema = schema; } @Override public Expression alwaysTrue() { return functions$.MODULE$.lit(true).expr(); } @Override public Expression alwaysFalse() { return functions$.MODULE$.lit(false).expr(); } @Override public Expression not(Expression child) { return Not$.MODULE$.apply(child); } @Override public Expression and(Expression left, Expression right) { return And$.MODULE$.apply(left, right); } @Override public Expression or(Expression left, Expression right) { return Or$.MODULE$.apply(left, right); } @Override public <T> Expression isNull(BoundReference<T> ref) { return column(ref).isNull().expr(); } @Override public <T> Expression notNull(BoundReference<T> ref) { return column(ref).isNotNull().expr(); } @Override public <T> Expression lt(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { return column(ref).lt(lit.value()).expr(); } @Override public <T> Expression ltEq(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { return column(ref).leq(lit.value()).expr(); } @Override public <T> Expression gt(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { return column(ref).gt(lit.value()).expr(); } @Override public <T> Expression gtEq(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { return column(ref).geq(lit.value()).expr(); } @Override public <T> Expression eq(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { return column(ref).equalTo(lit.value()).expr(); } @Override public <T> Expression notEq(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { return column(ref).notEqual(lit.value()).expr(); } @Override public <T> Expression in(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { throw new UnsupportedOperationException("Not implemented: in"); } @Override public <T> Expression notIn(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { throw new UnsupportedOperationException("Not implemented: notIn"); } private Column column(BoundReference ref) { return functions$.MODULE$.column(schema.findColumnName(ref.fieldId())); } } }
6,599