index
int64
0
0
repo_id
stringlengths
9
205
file_path
stringlengths
31
246
content
stringlengths
1
12.2M
__index_level_0__
int64
0
10k
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/DelegatingInputStream.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.io; import java.io.InputStream; public interface DelegatingInputStream { InputStream getDelegate(); }
2,100
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/DelegatingOutputStream.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.io; import java.io.OutputStream; public interface DelegatingOutputStream { OutputStream getDelegate(); }
2,101
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/InputFile.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.io; import com.netflix.iceberg.exceptions.RuntimeIOException; import java.io.IOException; /** * An interface used to read input files using {@link SeekableInputStream} instances. * <p> * This class is based on Parquet's InputFile. */ public interface InputFile { /** * @return the total length of the file, in bytes * @throws RuntimeIOException If the implementation throws an {@link IOException} */ long getLength(); /** * Opens a new {@link SeekableInputStream} for the underlying data file * * @return a seekable stream for reading the file * @throws RuntimeIOException If the implementation throws an {@link IOException} */ SeekableInputStream newStream(); /** * The fully-qualified location of the input file as a String. * * @return the input file location */ String location(); }
2,102
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/exceptions/ValidationException.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.exceptions; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; /** * Exception raised when validation checks fail. * <p> * For example, this is thrown when attempting to create a table with a {@link PartitionSpec} that * is not compatible with the table {@link Schema} */ public class ValidationException extends RuntimeException { public ValidationException(String message, Object... args) { super(String.format(message, args)); } public ValidationException(Throwable cause, String message, Object... args) { super(String.format(message, args), cause); } public static void check(boolean test, String message, Object... args) { if (!test) { throw new ValidationException(message, args); } } }
2,103
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/exceptions/AlreadyExistsException.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.exceptions; /** * Exception raised when attempting to create a table that already exists. */ public class AlreadyExistsException extends RuntimeException { public AlreadyExistsException(String message, Object... args) { super(String.format(message, args)); } public AlreadyExistsException(Throwable cause, String message, Object... args) { super(String.format(message, args), cause); } }
2,104
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/exceptions/RuntimeIOException.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.exceptions; import java.io.IOException; /** * Exception used to wrap {@link IOException} as a {@link RuntimeException} and add context. */ public class RuntimeIOException extends RuntimeException { public RuntimeIOException(IOException e) { super(e); } public RuntimeIOException(IOException e, String message, Object... args) { super(String.format(message, args), e); } public RuntimeIOException(String message, Object...args) { super(String.format(message, args)); } }
2,105
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/exceptions/NoSuchTableException.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.exceptions; /** * Exception raised when attempting to load a table that does not exist. */ public class NoSuchTableException extends RuntimeException { public NoSuchTableException(String message, Object... args) { super(String.format(message, args)); } public NoSuchTableException(Throwable cause, String message, Object... args) { super(String.format(message, args), cause); } }
2,106
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/exceptions/CommitFailedException.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.exceptions; /** * Exception raised when a commit fails because of out of date metadata. */ public class CommitFailedException extends RuntimeException { public CommitFailedException(String message, Object... args) { super(String.format(message, args)); } public CommitFailedException(Throwable cause, String message, Object... args) { super(String.format(message, args), cause); } }
2,107
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Transform.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Type; import java.io.Serializable; /** * A transform function used for partitioning. * <p> * Implementations of this interface can be used to transform values, check or types, and project * {@link BoundPredicate predicates} to predicates on partition values. * * @param <S> Java class of source values * @param <T> Java class of transformed values */ public interface Transform<S, T> extends Serializable { /** * Transforms a value to its corresponding partition value. * * @param value a source value * @return a transformed partition value */ T apply(S value); /** * Checks whether this function can be applied to the give {@link Type}. * * @param type a type * @return true if this transform can be applied to the type, false otherwise */ boolean canTransform(Type type); /** * Returns the {@link Type} produced by this transform given a source type. * * @param sourceType a type * @return the result type created by the apply method for the given type */ Type getResultType(Type sourceType); /** * Transforms a {@link BoundPredicate predicate} to an inclusive predicate on the partition * values produced by {@link #apply(Object)}. * <p> * This inclusive transform guarantees that if pred(v) is true, then projected(apply(v)) is true. * * @param name the field name for partition values * @param predicate a predicate for source values * @return an inclusive predicate on partition values */ UnboundPredicate<T> project(String name, BoundPredicate<S> predicate); /** * Transforms a {@link BoundPredicate predicate} to a strict predicate on the partition values * produced by {@link #apply(Object)}. * <p> * This strict transform guarantees that if strict(apply(v)) is true, then pred(v) is also true. * * @param name the field name for partition values * @param predicate a predicate for source values * @return an inclusive predicate on partition values */ UnboundPredicate<T> projectStrict(String name, BoundPredicate<S> predicate); /** * Returns a human-readable String representation of a transformed value. * <p> * null values will return "null" * * @param value a transformed value * @return a human-readable String representation of the value */ default String toHumanString(T value) { return String.valueOf(value); } }
2,108
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/ProjectionUtil.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.UnboundPredicate; import java.math.BigDecimal; import java.math.BigInteger; import static com.netflix.iceberg.expressions.Expressions.predicate; class ProjectionUtil { static <T> UnboundPredicate<T> truncateInteger( String name, BoundPredicate<Integer> pred, Transform<Integer, T> transform) { int boundary = pred.literal().value(); switch (pred.op()) { case LT: // adjust closed and then transform ltEq return predicate(Expression.Operation.LT_EQ, name, transform.apply(boundary - 1)); case LT_EQ: return predicate(Expression.Operation.LT_EQ, name, transform.apply(boundary)); case GT: // adjust closed and then transform gtEq return predicate(Expression.Operation.GT_EQ, name, transform.apply(boundary + 1)); case GT_EQ: return predicate(Expression.Operation.GT_EQ, name, transform.apply(boundary)); case EQ: return predicate(pred.op(), name, transform.apply(boundary)); default: return null; } } static <T> UnboundPredicate<T> truncateLong( String name, BoundPredicate<Long> pred, Transform<Long, T> transform) { long boundary = pred.literal().value(); switch (pred.op()) { case LT: // adjust closed and then transform ltEq return predicate(Expression.Operation.LT_EQ, name, transform.apply(boundary - 1L)); case LT_EQ: return predicate(Expression.Operation.LT_EQ, name, transform.apply(boundary)); case GT: // adjust closed and then transform gtEq return predicate(Expression.Operation.GT_EQ, name, transform.apply(boundary + 1L)); case GT_EQ: return predicate(Expression.Operation.GT_EQ, name, transform.apply(boundary)); case EQ: return predicate(pred.op(), name, transform.apply(boundary)); default: return null; } } static <T> UnboundPredicate<T> truncateDecimal( String name, BoundPredicate<BigDecimal> pred, Transform<BigDecimal, T> transform) { BigDecimal boundary = pred.literal().value(); switch (pred.op()) { case LT: // adjust closed and then transform ltEq BigDecimal minusOne = new BigDecimal( boundary.unscaledValue().subtract(BigInteger.ONE), boundary.scale()); return predicate(Expression.Operation.LT_EQ, name, transform.apply(minusOne)); case LT_EQ: return predicate(Expression.Operation.LT_EQ, name, transform.apply(boundary)); case GT: // adjust closed and then transform gtEq BigDecimal plusOne = new BigDecimal( boundary.unscaledValue().add(BigInteger.ONE), boundary.scale()); return predicate(Expression.Operation.GT_EQ, name, transform.apply(plusOne)); case GT_EQ: return predicate(Expression.Operation.GT_EQ, name, transform.apply(boundary)); case EQ: return predicate(pred.op(), name, transform.apply(boundary)); default: return null; } } static <S, T> UnboundPredicate<T> truncateArray( String name, BoundPredicate<S> pred, Transform<S, T> transform) { S boundary = pred.literal().value(); switch (pred.op()) { case LT: case LT_EQ: return predicate(Expression.Operation.LT_EQ, name, transform.apply(boundary)); case GT: case GT_EQ: return predicate(Expression.Operation.GT_EQ, name, transform.apply(boundary)); case EQ: return predicate(Expression.Operation.EQ, name, transform.apply(boundary)); // case IN: // TODO // return Expressions.predicate(Operation.IN, name, transform.apply(boundary)); default: return null; } } }
2,109
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Timestamps.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.time.Instant; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import static com.netflix.iceberg.expressions.Expression.Operation.IS_NULL; import static com.netflix.iceberg.expressions.Expression.Operation.NOT_NULL; enum Timestamps implements Transform<Long, Integer> { YEAR(ChronoUnit.YEARS, "year"), MONTH(ChronoUnit.MONTHS, "month"), DAY(ChronoUnit.DAYS, "day"), HOUR(ChronoUnit.HOURS, "hour"); private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private final ChronoUnit granularity; private final String name; Timestamps(ChronoUnit granularity, String name) { this.granularity = granularity; this.name = name; } @Override public Integer apply(Long timestampMicros) { // discards fractional seconds, not needed for calculation OffsetDateTime timestamp = Instant .ofEpochSecond(timestampMicros / 1_000_000) .atOffset(ZoneOffset.UTC); return (int) granularity.between(EPOCH, timestamp); } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.TIMESTAMP; } @Override public Type getResultType(Type sourceType) { return Types.IntegerType.get(); } @Override public UnboundPredicate<Integer> project(String name, BoundPredicate<Long> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateLong(name, pred, this); } @Override public UnboundPredicate<Integer> projectStrict(String name, BoundPredicate<Long> predicate) { return null; } @Override public String toHumanString(Integer value) { if (value == null) { return "null"; } switch (granularity) { case YEARS: return TransformUtil.humanYear(value); case MONTHS: return TransformUtil.humanMonth(value); case DAYS: return TransformUtil.humanDay(value); case HOURS: return TransformUtil.humanHour(value); default: throw new UnsupportedOperationException("Unsupported time unit: " + granularity); } } @Override public String toString() { return name; } }
2,110
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Identity.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.base.Objects; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.nio.ByteBuffer; class Identity<T> implements Transform<T, T> { @SuppressWarnings("unchecked") public static <I> Identity<I> get(Type type) { return new Identity<>(type); } private final Type type; private Identity(Type type) { this.type = type; } @Override public T apply(T value) { return value; } @Override public boolean canTransform(Type type) { return type.isPrimitiveType(); } @Override public Type getResultType(Type sourceType) { return sourceType; } @Override public UnboundPredicate<T> project(String name, BoundPredicate<T> predicate) { return projectStrict(name, predicate); } @Override public UnboundPredicate<T> projectStrict(String name, BoundPredicate<T> predicate) { if (predicate.literal() != null) { return Expressions.predicate(predicate.op(), name, predicate.literal().value()); } else { return Expressions.predicate(predicate.op(), name); } } @Override public String toHumanString(T value) { if (value == null) { return "null"; } switch (type.typeId()) { case DATE: return TransformUtil.humanDay((Integer) value); case TIME: return TransformUtil.humanTime((Long) value); case TIMESTAMP: if (((Types.TimestampType) type).shouldAdjustToUTC()) { return TransformUtil.humanTimestampWithZone((Long) value); } else { return TransformUtil.humanTimestampWithoutZone((Long) value); } case FIXED: case BINARY: if (value instanceof ByteBuffer) { return TransformUtil.base64encode(((ByteBuffer) value).duplicate()); } else if (value instanceof byte[]) { return TransformUtil.base64encode(ByteBuffer.wrap((byte[]) value)); } else { throw new UnsupportedOperationException("Unsupported binary type: " + value.getClass()); } default: return value.toString(); } } @Override public String toString() { return "identity"; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Identity<?> that = (Identity<?>) o; return type.equals(that.type); } @Override public int hashCode() { return Objects.hashCode(type); } }
2,111
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Transforms.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.base.Preconditions; import com.netflix.iceberg.Schema; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.types.Type; import java.util.Locale; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Factory methods for transforms. * <p> * Most users should create transforms using a * {@link PartitionSpec.Builder#builderFor(Schema)} partition spec builder}. * * @see PartitionSpec#builderFor(Schema) The partition spec builder. */ public class Transforms { private Transforms() { } private static final Pattern HAS_WIDTH = Pattern.compile("(\\w+)\\[(\\d+)\\]"); public static Transform<?, ?> fromString(Type type, String transform) { Matcher width = HAS_WIDTH.matcher(transform); if (width.matches()) { String name = width.group(1); int w = Integer.parseInt(width.group(2)); if (name.equalsIgnoreCase("truncate")) { return Truncate.get(type, w); } else if (name.equals("bucket")) { return Bucket.get(type, w); } } if (transform.equalsIgnoreCase("identity")) { return Identity.get(type); } else if (type.typeId() == Type.TypeID.TIMESTAMP) { return Timestamps.valueOf(transform.toUpperCase(Locale.ENGLISH)); } else if (type.typeId() == Type.TypeID.DATE) { return Dates.valueOf(transform.toUpperCase(Locale.ENGLISH)); } throw new IllegalArgumentException("Unknown transform: " + transform); } /** * Returns an identity {@link Transform} that can be used for any type. * * @param type the {@link Type source type} for the transform * @param <T> Java type passed to this transform * @return an identity transform */ public static <T> Transform<T, T> identity(Type type) { return Identity.get(type); } /** * Returns a year {@link Transform} for date or timestamp types. * * @param type the {@link Type source type} for the transform * @param <T> Java type passed to this transform * @return a year transform */ @SuppressWarnings("unchecked") public static <T> Transform<T, Integer> year(Type type) { switch (type.typeId()) { case DATE: return (Transform<T, Integer>) Dates.YEAR; case TIMESTAMP: return (Transform<T, Integer>) Timestamps.YEAR; default: throw new IllegalArgumentException( "Cannot partition type " + type + " by year"); } } /** * Returns a month {@link Transform} for date or timestamp types. * * @param type the {@link Type source type} for the transform * @param <T> Java type passed to this transform * @return a month transform */ @SuppressWarnings("unchecked") public static <T> Transform<T, Integer> month(Type type) { switch (type.typeId()) { case DATE: return (Transform<T, Integer>) Dates.MONTH; case TIMESTAMP: return (Transform<T, Integer>) Timestamps.MONTH; default: throw new IllegalArgumentException( "Cannot partition type " + type + " by month"); } } /** * Returns a day {@link Transform} for date or timestamp types. * * @param type the {@link Type source type} for the transform * @param <T> Java type passed to this transform * @return a day transform */ @SuppressWarnings("unchecked") public static <T> Transform<T, Integer> day(Type type) { switch (type.typeId()) { case DATE: return (Transform<T, Integer>) Dates.DAY; case TIMESTAMP: return (Transform<T, Integer>) Timestamps.DAY; default: throw new IllegalArgumentException( "Cannot partition type " + type + " by month"); } } /** * Returns a hour {@link Transform} for timestamps. * * @param type the {@link Type source type} for the transform * @param <T> Java type passed to this transform * @return a hour transform */ @SuppressWarnings("unchecked") public static <T> Transform<T, Integer> hour(Type type) { Preconditions.checkArgument(type.typeId() == Type.TypeID.TIMESTAMP, "Cannot partition type %s by hour", type); return (Transform<T, Integer>) Timestamps.HOUR; } /** * Returns a bucket {@link Transform} for the given type and number of buckets. * * @param type the {@link Type source type} for the transform * @param numBuckets the number of buckets for the transform to produce * @param <T> Java type passed to this transform * @return a transform that buckets values into numBuckets */ public static <T> Transform<T, Integer> bucket(Type type, int numBuckets) { return Bucket.get(type, numBuckets); } /** * Returns a truncate {@link Transform} for the given type and width. * * @param type the {@link Type source type} for the transform * @param width the width to truncate data values * @param <T> Java type passed to this transform * @return a transform that truncates the given type to width */ public static <T> Transform<T, T> truncate(Type type, int width) { return Truncate.get(type, width); } }
2,112
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/TransformUtil.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.base.Charsets; import java.nio.ByteBuffer; import java.time.Instant; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.Base64; class TransformUtil { private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final int EPOCH_YEAR = EPOCH.getYear(); static String humanYear(int yearOrdinal) { return String.format("%04d", EPOCH_YEAR + yearOrdinal); } static String humanMonth(int monthOrdinal) { return String.format("%04d-%02d", EPOCH_YEAR + (monthOrdinal / 12), 1 + (monthOrdinal % 12)); } static String humanDay(int dayOrdinal) { OffsetDateTime day = EPOCH.plusDays(dayOrdinal); return String.format("%04d-%02d-%02d", day.getYear(), day.getMonth().getValue(), day.getDayOfMonth()); } static String humanTime(Long microsFromMidnight) { return LocalTime.ofNanoOfDay(microsFromMidnight * 1000).toString(); } static String humanTimestampWithZone(Long timestampMicros) { return ChronoUnit.MICROS.addTo(EPOCH, timestampMicros).toString(); } static String humanTimestampWithoutZone(Long timestampMicros) { return ChronoUnit.MICROS.addTo(EPOCH, timestampMicros).toLocalDateTime().toString(); } static String humanHour(int hourOrdinal) { OffsetDateTime time = EPOCH.plusHours(hourOrdinal); return String.format("%04d-%02d-%02d-%02d", time.getYear(), time.getMonth().getValue(), time.getDayOfMonth(), time.getHour()); } static String base64encode(ByteBuffer buffer) { // use direct encoding because all of the encoded bytes are in ASCII return Charsets.ISO_8859_1.decode(Base64.getEncoder().encode(buffer)).toString(); } }
2,113
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Truncate.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.base.Objects; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Type; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import static com.netflix.iceberg.expressions.Expression.Operation.IS_NULL; import static com.netflix.iceberg.expressions.Expression.Operation.LT; import static com.netflix.iceberg.expressions.Expression.Operation.LT_EQ; import static com.netflix.iceberg.expressions.Expression.Operation.NOT_NULL; abstract class Truncate<T> implements Transform<T, T> { @SuppressWarnings("unchecked") static <T> Truncate<T> get(Type type, int width) { switch (type.typeId()) { case INTEGER: return (Truncate<T>) new TruncateInteger(width); case LONG: return (Truncate<T>) new TruncateLong(width); case DECIMAL: return (Truncate<T>) new TruncateDecimal(width); case STRING: return (Truncate<T>) new TruncateString(width); case BINARY: return (Truncate<T>) new TruncateByteBuffer(width); default: throw new UnsupportedOperationException( "Cannot truncate type: " + type); } } abstract public Integer width(); @Override abstract public T apply(T value); @Override public Type getResultType(Type sourceType) { return sourceType; } private static class TruncateInteger extends Truncate<Integer> { private final int W; private TruncateInteger(int width) { this.W = width; } @Override public Integer width() { return W; } @Override public Integer apply(Integer value) { return value - (((value % W) + W) % W); } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.INTEGER; } @Override public UnboundPredicate<Integer> project(String name, BoundPredicate<Integer> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateInteger(name, pred, this); } @Override public UnboundPredicate<Integer> projectStrict(String name, BoundPredicate<Integer> predicate) { // TODO: for integers, can this return the original predicate? // No. the predicate needs to be in terms of the applied value. For all x, apply(x) <= x. // Therefore, the lower bound can be transformed outside of a greater-than bound. int in; int out; int inImage; int outImage; switch (predicate.op()) { case LT: in = predicate.literal().value() - 1; out = predicate.literal().value(); inImage = apply(in); outImage = apply(out); if (inImage != outImage) { return Expressions.predicate(LT_EQ, name, inImage); } else { return Expressions.predicate(LT, name, inImage); } case LT_EQ: in = predicate.literal().value(); out = predicate.literal().value() + 1; inImage = apply(in); outImage = apply(out); if (inImage != outImage) { return Expressions.predicate(LT_EQ, name, inImage); } else { return Expressions.predicate(LT, name, inImage); } case GT: case GT_EQ: case EQ: case NOT_EQ: // case IN: // break; // case NOT_IN: // break; default: return null; } } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TruncateInteger that = (TruncateInteger) o; return W == that.W; } @Override public int hashCode() { return Objects.hashCode(W); } @Override public String toString() { return "truncate[" + W + "]"; } } private static class TruncateLong extends Truncate<Long> { private final int W; private TruncateLong(int width) { this.W = width; } @Override public Integer width() { return W; } @Override public Long apply(Long value) { return value - (((value % W) + W) % W); } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.LONG; } @Override public UnboundPredicate<Long> project(String name, BoundPredicate<Long> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateLong(name, pred, this); } @Override public UnboundPredicate<Long> projectStrict(String name, BoundPredicate<Long> predicate) { return null; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TruncateLong that = (TruncateLong) o; return W == that.W; } @Override public int hashCode() { return Objects.hashCode(W); } @Override public String toString() { return "truncate[" + W + "]"; } } private static class TruncateString extends Truncate<CharSequence> { private final int L; private TruncateString(int length) { this.L = length; } @Override public Integer width() { return L; } @Override public CharSequence apply(CharSequence value) { return value.subSequence(0, Math.min(value.length(), L)); } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.STRING; } @Override public UnboundPredicate<CharSequence> project(String name, BoundPredicate<CharSequence> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateArray(name, pred, this); } @Override public UnboundPredicate<CharSequence> projectStrict(String name, BoundPredicate<CharSequence> predicate) { return null; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TruncateString that = (TruncateString) o; return L == that.L; } @Override public int hashCode() { return Objects.hashCode(L); } @Override public String toString() { return "truncate[" + L + "]"; } } private static class TruncateByteBuffer extends Truncate<ByteBuffer> { private final int L; private TruncateByteBuffer(int length) { this.L = length; } @Override public Integer width() { return L; } @Override public ByteBuffer apply(ByteBuffer value) { ByteBuffer ret = value.duplicate(); ret.limit(Math.min(value.limit(), value.position() + L)); return ret; } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.BINARY; } @Override public UnboundPredicate<ByteBuffer> project(String name, BoundPredicate<ByteBuffer> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateArray(name, pred, this); } @Override public UnboundPredicate<ByteBuffer> projectStrict(String name, BoundPredicate<ByteBuffer> predicate) { return null; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TruncateByteBuffer that = (TruncateByteBuffer) o; return L == that.L; } @Override public int hashCode() { return Objects.hashCode(L); } @Override public String toHumanString(ByteBuffer value) { return value == null ? "null" : TransformUtil.base64encode(value); } @Override public String toString() { return "truncate[" + L + "]"; } } private static class TruncateDecimal extends Truncate<BigDecimal> { private final BigInteger unscaledWidth; private TruncateDecimal(int unscaledWidth) { this.unscaledWidth = BigInteger.valueOf(unscaledWidth); } @Override public Integer width() { return unscaledWidth.intValue(); } @Override public BigDecimal apply(BigDecimal value) { BigDecimal remainder = new BigDecimal( value.unscaledValue() .remainder(unscaledWidth) .add(unscaledWidth) .remainder(unscaledWidth), value.scale()); return value.subtract(remainder); } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.DECIMAL; } @Override public UnboundPredicate<BigDecimal> project(String name, BoundPredicate<BigDecimal> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateDecimal(name, pred, this); } @Override public UnboundPredicate<BigDecimal> projectStrict(String name, BoundPredicate<BigDecimal> predicate) { return null; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TruncateDecimal that = (TruncateDecimal) o; return unscaledWidth.equals(that.unscaledWidth); } @Override public int hashCode() { return Objects.hashCode(unscaledWidth); } @Override public String toString() { return "truncate[" + unscaledWidth + "]"; } } }
2,114
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/PartitionSpecVisitor.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.collect.Lists; import com.netflix.iceberg.PartitionField; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import java.util.List; public interface PartitionSpecVisitor<T> { T identity(String sourceName, int sourceId); T bucket(String sourceName, int sourceId, int width); T truncate(String sourceName, int sourceId, int width); T year(String sourceName, int sourceId); T month(String sourceName, int sourceId); T day(String sourceName, int sourceId); T hour(String sourceName, int sourceId); static <R> List<R> visit(Schema schema, PartitionSpec spec, PartitionSpecVisitor<R> visitor) { List<R> results = Lists.newArrayListWithExpectedSize(spec.fields().size()); for (PartitionField field : spec.fields()) { String sourceName = schema.findColumnName(field.sourceId()); Transform<?, ?> transform = field.transform(); if (transform instanceof Identity) { results.add(visitor.identity(sourceName, field.sourceId())); } else if (transform instanceof Bucket) { results.add(visitor.bucket(sourceName, field.sourceId(), ((Bucket<?>) transform).numBuckets())); } else if (transform instanceof Truncate) { results.add(visitor.truncate(sourceName, field.sourceId(), ((Truncate<?>) transform).width())); } else if (transform == Dates.YEAR || transform == Timestamps.YEAR) { results.add(visitor.year(sourceName, field.sourceId())); } else if (transform == Dates.MONTH || transform == Timestamps.MONTH) { results.add(visitor.month(sourceName, field.sourceId())); } else if (transform == Dates.DAY || transform == Timestamps.DAY) { results.add(visitor.day(sourceName, field.sourceId())); } else if (transform == Timestamps.HOUR) { results.add(visitor.hour(sourceName, field.sourceId())); } } return results; } }
2,115
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Dates.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.time.Instant; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import static com.netflix.iceberg.expressions.Expression.Operation.IS_NULL; import static com.netflix.iceberg.expressions.Expression.Operation.NOT_NULL; enum Dates implements Transform<Integer, Integer> { YEAR(ChronoUnit.YEARS, "year"), MONTH(ChronoUnit.MONTHS, "month"), DAY(ChronoUnit.DAYS, "day"); private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private final ChronoUnit granularity; private final String name; Dates(ChronoUnit granularity, String name) { this.granularity = granularity; this.name = name; } @Override public Integer apply(Integer days) { if (granularity == ChronoUnit.DAYS) { return days; } return (int) granularity.between(EPOCH, EPOCH.plusDays(days)); } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.DATE; } @Override public Type getResultType(Type sourceType) { return Types.IntegerType.get(); } @Override public UnboundPredicate<Integer> project(String name, BoundPredicate<Integer> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateInteger(name, pred, this); } @Override public UnboundPredicate<Integer> projectStrict(String name, BoundPredicate<Integer> predicate) { return null; } @Override public String toHumanString(Integer value) { if (value == null) { return "null"; } switch (granularity) { case YEARS: return TransformUtil.humanYear(value); case MONTHS: return TransformUtil.humanMonth(value); case DAYS: return TransformUtil.humanDay(value); default: throw new UnsupportedOperationException("Unsupported time unit: " + granularity); } } @Override public String toString() { return name; } }
2,116
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Bucket.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Objects; import com.google.common.collect.Sets; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.Set; import java.util.UUID; import static com.netflix.iceberg.types.Type.TypeID; abstract class Bucket<T> implements Transform<T, Integer> { private static final HashFunction MURMUR3 = Hashing.murmur3_32(); @SuppressWarnings("unchecked") static <T> Bucket<T> get(Type type, int N) { switch (type.typeId()) { case DATE: case INTEGER: return (Bucket<T>) new BucketInteger(N); case TIME: case TIMESTAMP: case LONG: return (Bucket<T>) new BucketLong(N); case DECIMAL: return (Bucket<T>) new BucketDecimal(N); case STRING: return (Bucket<T>) new BucketString(N); case FIXED: case BINARY: return (Bucket<T>) new BucketByteBuffer(N); case UUID: return (Bucket<T>) new BucketUUID(N); default: throw new IllegalArgumentException("Cannot bucket by type: " + type); } } private final int N; private Bucket(int N) { this.N = N; } public Integer numBuckets() { return N; } @VisibleForTesting abstract int hash(T value); @Override public Integer apply(T value) { return (hash(value) & Integer.MAX_VALUE) % N; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Bucket<?> bucket = (Bucket<?>) o; return N == bucket.N; } @Override public int hashCode() { return Objects.hashCode(N); } @Override public String toString() { return "bucket[" + N + "]"; } @Override public UnboundPredicate<Integer> project(String name, BoundPredicate<T> predicate) { switch (predicate.op()) { case EQ: return Expressions.predicate( predicate.op(), name, apply(predicate.literal().value())); // case IN: // return Expressions.predicate(); default: // comparison predicates can't be projected, notEq can't be projected // TODO: small ranges can be projected. // for example, (x > 0) and (x < 3) can be turned into in({1, 2}) and projected. return null; } } @Override public UnboundPredicate<Integer> projectStrict(String name, BoundPredicate<T> predicate) { switch (predicate.op()) { case NOT_EQ: // TODO: need to translate not(eq(...)) into notEq in expressions return Expressions.predicate(predicate.op(), name, apply(predicate.literal().value())); // case NOT_IN: // return null; default: // no strict projection for comparison or equality return null; } } @Override public Type getResultType(Type sourceType) { return Types.IntegerType.get(); } private static class BucketInteger extends Bucket<Integer> { private BucketInteger(int N) { super(N); } public int hash(Integer value) { return MURMUR3.hashLong(value.longValue()).asInt(); } @Override public boolean canTransform(Type type) { return type.typeId() == TypeID.INTEGER || type.typeId() == TypeID.DATE; } } private static class BucketLong extends Bucket<Long> { private BucketLong(int N) { super(N); } public int hash(Long value) { return MURMUR3.hashLong(value).asInt(); } @Override public boolean canTransform(Type type) { return ( type.typeId() == TypeID.LONG || type.typeId() == TypeID.TIME || type.typeId() == TypeID.TIMESTAMP ); } } // bucketing by Double is not allowed by the spec, but this has the float hash implementation static class BucketFloat extends Bucket<Float> { // used by tests because the factory method will not instantiate a bucket function for floats BucketFloat(int N) { super(N); } public int hash(Float value) { return MURMUR3.hashLong(Double.doubleToRawLongBits((double) value)).asInt(); } @Override public boolean canTransform(Type type) { return type.typeId() == TypeID.FLOAT; } } // bucketing by Double is not allowed by the spec, but this has the double hash implementation static class BucketDouble extends Bucket<Double> { // used by tests because the factory method will not instantiate a bucket function for doubles BucketDouble(int N) { super(N); } public int hash(Double value) { return MURMUR3.hashLong(Double.doubleToRawLongBits(value)).asInt(); } @Override public boolean canTransform(Type type) { return type.typeId() == TypeID.DOUBLE; } } private static class BucketString extends Bucket<CharSequence> { private BucketString(int N) { super(N); } public int hash(CharSequence value) { return MURMUR3.hashString(value, Charsets.UTF_8).asInt(); } @Override public boolean canTransform(Type type) { return type.typeId() == TypeID.STRING; } } private static class BucketBytes extends Bucket<byte[]> { private static final Set<TypeID> SUPPORTED_TYPES = Sets.newHashSet( TypeID.BINARY, TypeID.FIXED); private BucketBytes(int N) { super(N); } public int hash(byte[] value) { return MURMUR3.hashBytes(value).asInt(); } @Override public boolean canTransform(Type type) { return SUPPORTED_TYPES.contains(type.typeId()); } } private static class BucketByteBuffer extends Bucket<ByteBuffer> { private static final Set<TypeID> SUPPORTED_TYPES = Sets.newHashSet( TypeID.BINARY, TypeID.FIXED); private BucketByteBuffer(int N) { super(N); } public int hash(ByteBuffer value) { if (value.hasArray()) { return MURMUR3.hashBytes(value.array(), value.arrayOffset() + value.position(), value.arrayOffset() + value.remaining()).asInt(); } else { int position = value.position(); byte[] copy = new byte[value.remaining()]; try { value.get(copy); } finally { // make sure the buffer position is unchanged value.position(position); } return MURMUR3.hashBytes(copy).asInt(); } } @Override public boolean canTransform(Type type) { return SUPPORTED_TYPES.contains(type.typeId()); } } private static class BucketUUID extends Bucket<UUID> { private static final ThreadLocal<ByteBuffer> BUFFER = ThreadLocal.withInitial(() -> { ByteBuffer buffer = ByteBuffer.allocate(16); buffer.order(ByteOrder.BIG_ENDIAN); return buffer; }); private BucketUUID(int N) { super(N); } public int hash(UUID value) { ByteBuffer buffer = BUFFER.get(); buffer.rewind(); buffer.putLong(value.getMostSignificantBits()); buffer.putLong(value.getLeastSignificantBits()); return MURMUR3.hashBytes(buffer.array()).asInt(); } @Override public boolean canTransform(Type type) { return type.typeId() == TypeID.UUID; } } private static class BucketDecimal extends Bucket<BigDecimal> { private BucketDecimal(int N) { super(N); } public int hash(BigDecimal value) { return MURMUR3.hashBytes(value.unscaledValue().toByteArray()).asInt(); } @Override public boolean canTransform(Type type) { return type.typeId() == TypeID.DECIMAL; } } }
2,117
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Projections.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.expressions.ExpressionVisitors.ExpressionVisitor; import com.netflix.iceberg.PartitionField; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.transforms.Transform; /** * Utils to project expressions on rows to expressions on partitions. */ public class Projections { private Projections() { } /** * A class that projects expressions for a table's data rows into expressions on the table's * partition values, for a table's {@link PartitionSpec partition spec}. * <p> * There are two types of projections: inclusive and strict. * <p> * An inclusive projection guarantees that if an expression matches a row, the projected * expression will match the row's partition. * <p> * A strict projection guarantees that if a partition matches a projected expression, then all * rows in that partition will match the original expression. */ public static abstract class ProjectionEvaluator extends ExpressionVisitor<Expression> { /** * Project the given row expression to a partition expression. * * @param expr an expression on data rows * @return an expression on partition data (depends on the projection) */ public abstract Expression project(Expression expr); } /** * Creates an inclusive {@code ProjectionEvaluator} for the {@link PartitionSpec spec}. * <p> * An evaluator is used to project expressions for a table's data rows into expressions on the * table's partition values. The evaluator returned by this function is inclusive and will build * expressions with the following guarantee: if the original expression matches a row, then the * projected expression will match that row's partition. * <p> * Each predicate in the expression is projected using * {@link Transform#project(String, BoundPredicate)}. * * @param spec a partition spec * @return an inclusive projection evaluator for the partition spec * @see Transform#project(String, BoundPredicate) Inclusive transform used for each predicate */ public static ProjectionEvaluator inclusive(PartitionSpec spec) { return new InclusiveProjection(spec); } /** * Creates a strict {@code ProjectionEvaluator} for the {@link PartitionSpec spec}. * <p> * An evaluator is used to project expressions for a table's data rows into expressions on the * table's partition values. The evaluator returned by this function is strict and will build * expressions with the following guarantee: if the projected expression matches a partition, * then the original expression will match all rows in that partition. * <p> * Each predicate in the expression is projected using * {@link Transform#projectStrict(String, BoundPredicate)}. * * @param spec a partition spec * @return a strict projection evaluator for the partition spec * @see Transform#projectStrict(String, BoundPredicate) Strict transform used for each predicate */ public static ProjectionEvaluator strict(PartitionSpec spec) { return new StrictProjection(spec); } private static class BaseProjectionEvaluator extends ProjectionEvaluator { final PartitionSpec spec; private BaseProjectionEvaluator(PartitionSpec spec) { this.spec = spec; } @Override public Expression project(Expression expr) { // projections assume that there are no NOT nodes in the expression tree. to ensure that this // is the case, the expression is rewritten to push all NOT nodes down to the expression // leaf nodes. // this is necessary to ensure that the default expression returned when a predicate can't be // projected is correct. return ExpressionVisitors.visit(ExpressionVisitors.visit(expr, RewriteNot.get()), this); } @Override public Expression alwaysTrue() { return Expressions.alwaysTrue(); } @Override public Expression alwaysFalse() { return Expressions.alwaysFalse(); } @Override public Expression not(Expression result) { throw new UnsupportedOperationException("[BUG] project called on expression with a not"); } @Override public Expression and(Expression leftResult, Expression rightResult) { return Expressions.and(leftResult, rightResult); } @Override public Expression or(Expression leftResult, Expression rightResult) { return Expressions.or(leftResult, rightResult); } @Override public <T> Expression predicate(UnboundPredicate<T> pred) { Expression bound = pred.bind(spec.schema().asStruct()); if (bound instanceof BoundPredicate) { return predicate((BoundPredicate<?>) bound); } return bound; } } private static class InclusiveProjection extends BaseProjectionEvaluator { private InclusiveProjection(PartitionSpec spec) { super(spec); } @Override @SuppressWarnings("unchecked") public <T> Expression predicate(BoundPredicate<T> pred) { PartitionField part = spec.getFieldBySourceId(pred.ref().fieldId()); if (part == null) { // the predicate has no partition column return alwaysTrue(); } UnboundPredicate<?> result = ((Transform<T, ?>) part.transform()).project(part.name(), pred); if (result != null) { return result; } // if the predicate could not be projected, it always matches return alwaysTrue(); } } private static class StrictProjection extends BaseProjectionEvaluator { private StrictProjection(PartitionSpec spec) { super(spec); } @Override @SuppressWarnings("unchecked") public <T> Expression predicate(BoundPredicate<T> pred) { PartitionField part = spec.getFieldBySourceId(pred.ref().fieldId()); if (part == null) { // the predicate has no partition column return alwaysFalse(); } UnboundPredicate<?> result = ((Transform<T, ?>) part.transform()) .projectStrict(part.name(), pred); if (result != null) { return result; } // if the predicate could not be projected, it never matches return alwaysFalse(); } } }
2,118
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/ExpressionVisitors.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; /** * Utils for traversing {@link Expression expressions}. */ public class ExpressionVisitors { public abstract static class ExpressionVisitor<R> { public R alwaysTrue() { return null; } public R alwaysFalse() { return null; } public R not(R result) { return null; } public R and(R leftResult, R rightResult) { return null; } public R or(R leftResult, R rightResult) { return null; } public <T> R predicate(BoundPredicate<T> pred) { return null; } public <T> R predicate(UnboundPredicate<T> pred) { return null; } } public abstract static class BoundExpressionVisitor<R> extends ExpressionVisitor<R> { public <T> R isNull(BoundReference<T> ref) { return null; } public <T> R notNull(BoundReference<T> ref) { return null; } public <T> R lt(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R ltEq(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R gt(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R gtEq(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R eq(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R notEq(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R in(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R notIn(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R predicate(BoundPredicate<T> pred) { switch (pred.op()) { case IS_NULL: return isNull(pred.ref()); case NOT_NULL: return notNull(pred.ref()); case LT: return lt(pred.ref(), pred.literal()); case LT_EQ: return ltEq(pred.ref(), pred.literal()); case GT: return gt(pred.ref(), pred.literal()); case GT_EQ: return gtEq(pred.ref(), pred.literal()); case EQ: return eq(pred.ref(), pred.literal()); case NOT_EQ: return notEq(pred.ref(), pred.literal()); case IN: return in(pred.ref(), pred.literal()); case NOT_IN: return notIn(pred.ref(), pred.literal()); default: throw new UnsupportedOperationException( "Unknown operation for predicate: " + pred.op()); } } public <T> R predicate(UnboundPredicate<T> pred) { throw new UnsupportedOperationException("Not a bound predicate: " + pred); } } /** * Traverses the given {@link Expression expression} with a {@link ExpressionVisitor visitor}. * <p> * The visitor will be called to handle each node in the expression tree in postfix order. Result * values produced by child nodes are passed when parent nodes are handled. * * @param expr an expression to traverse * @param visitor a visitor that will be called to handle each node in the expression tree * @param <R> the return type produced by the expression visitor * @return the value returned by the visitor for the root expression node */ @SuppressWarnings("unchecked") public static <R> R visit(Expression expr, ExpressionVisitor<R> visitor) { if (expr instanceof Predicate) { if (expr instanceof BoundPredicate) { return visitor.predicate((BoundPredicate<?>) expr); } else { return visitor.predicate((UnboundPredicate<?>) expr); } } else { switch (expr.op()) { case TRUE: return visitor.alwaysTrue(); case FALSE: return visitor.alwaysFalse(); case NOT: Not not = (Not) expr; return visitor.not(visit(not.child(), visitor)); case AND: And and = (And) expr; return visitor.and(visit(and.left(), visitor), visit(and.right(), visitor)); case OR: Or or = (Or) expr; return visitor.or(visit(or.left(), visitor), visit(or.right(), visitor)); default: throw new UnsupportedOperationException( "Unknown operation: " + expr.op()); } } } }
2,119
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/False.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import java.io.ObjectStreamException; /** * An {@link Expression expression} that is always false. */ public class False implements Expression { static final False INSTANCE = new False(); private False() { } @Override public Operation op() { return Operation.FALSE; } @Override public Expression negate() { return True.INSTANCE; } @Override public String toString() { return "false"; } Object writeReplace() throws ObjectStreamException { return new SerializationProxies.ConstantExpressionProxy(false); } }
2,120
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/StrictMetricsEvaluator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.base.Preconditions; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import com.netflix.iceberg.types.Conversions; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import java.nio.ByteBuffer; import java.util.Map; import static com.netflix.iceberg.expressions.Expressions.rewriteNot; /** * Evaluates an {@link Expression} on a {@link DataFile} to test whether all rows in the file match. * <p> * This evaluation is strict: it returns true if all rows in a file must match the expression. For * example, if a file's ts column has min X and max Y, this evaluator will return true for ts &lt; Y+1 * but not for ts &lt; Y-1. * <p> * Files are passed to {@link #eval(DataFile)}, which returns true if all rows in the file must * contain matching rows and false if the file may contain rows that do not match. */ public class StrictMetricsEvaluator { private final Schema schema; private final StructType struct; private final Expression expr; private transient ThreadLocal<MetricsEvalVisitor> visitors = null; private MetricsEvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(MetricsEvalVisitor::new); } return visitors.get(); } public StrictMetricsEvaluator(Schema schema, Expression unbound) { this.schema = schema; this.struct = schema.asStruct(); this.expr = Binder.bind(struct, rewriteNot(unbound)); } /** * Test whether the file may contain records that match the expression. * * @param file a data file * @return false if the file cannot contain rows that match the expression, true otherwise. */ public boolean eval(DataFile file) { // TODO: detect the case where a column is missing from the file using file's max field id. return visitor().eval(file); } private static final boolean ROWS_MUST_MATCH = true; private static final boolean ROWS_MIGHT_NOT_MATCH = false; private class MetricsEvalVisitor extends BoundExpressionVisitor<Boolean> { private Map<Integer, Long> valueCounts = null; private Map<Integer, Long> nullCounts = null; private Map<Integer, ByteBuffer> lowerBounds = null; private Map<Integer, ByteBuffer> upperBounds = null; private boolean eval(DataFile file) { if (file.recordCount() <= 0) { return ROWS_MUST_MATCH; } this.valueCounts = file.valueCounts(); this.nullCounts = file.nullValueCounts(); this.lowerBounds = file.lowerBounds(); this.upperBounds = file.upperBounds(); return ExpressionVisitors.visit(expr, this); } @Override public Boolean alwaysTrue() { return ROWS_MUST_MATCH; // all rows match } @Override public Boolean alwaysFalse() { return ROWS_MIGHT_NOT_MATCH; // no rows match } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has any non-null values, the expression does not match Integer id = ref.fieldId(); Preconditions.checkNotNull(struct.field(id), "Cannot filter by nested column: %s", schema.findField(id)); if (valueCounts != null && valueCounts.containsKey(id) && nullCounts != null && nullCounts.containsKey(id) && valueCounts.get(id) - nullCounts.get(id) == 0) { return ROWS_MUST_MATCH; } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean notNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has any null values, the expression does not match Integer id = ref.fieldId(); Preconditions.checkNotNull(struct.field(id), "Cannot filter by nested column: %s", schema.findField(id)); if (nullCounts != null && nullCounts.containsKey(id) && nullCounts.get(id) == 0) { return ROWS_MUST_MATCH; } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { // Rows must match when: <----------Min----Max---X-------> Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_MUST_MATCH; } } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { // Rows must match when: <----------Min----Max---X-------> Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp <= 0) { return ROWS_MUST_MATCH; } } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { // Rows must match when: <-------X---Min----Max----------> Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(field.type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_MUST_MATCH; } } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { // Rows must match when: <-------X---Min----Max----------> Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(field.type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp >= 0) { return ROWS_MUST_MATCH; } } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { // Rows must match when Min == X == Max Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id) && upperBounds != null && upperBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(struct.field(id).type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp != 0) { return ROWS_MIGHT_NOT_MATCH; } T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); cmp = lit.comparator().compare(upper, lit.value()); if (cmp != 0) { return ROWS_MIGHT_NOT_MATCH; } return ROWS_MUST_MATCH; } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { // Rows must match when X < Min or Max < X because it is not in the range Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(struct.field(id).type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_MUST_MATCH; } } if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_MUST_MATCH; } } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_NOT_MATCH; } } }
2,121
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Literals.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.base.Preconditions; import com.netflix.iceberg.types.Comparators; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.io.ObjectStreamException; import java.math.BigDecimal; import java.math.RoundingMode; import java.nio.ByteBuffer; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.format.DateTimeFormatter; import java.time.temporal.ChronoUnit; import java.util.Comparator; import java.util.UUID; class Literals { private Literals() { } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); /** * Create a {@link Literal} from an Object. * * @param value a value * @param <T> Java type of value * @return a Literal for the given value */ @SuppressWarnings("unchecked") static <T> Literal<T> from(T value) { Preconditions.checkNotNull(value, "Cannot create expression literal from null"); if (value instanceof Boolean) { return (Literal<T>) new Literals.BooleanLiteral((Boolean) value); } else if (value instanceof Integer) { return (Literal<T>) new Literals.IntegerLiteral((Integer) value); } else if (value instanceof Long) { return (Literal<T>) new Literals.LongLiteral((Long) value); } else if (value instanceof Float) { return (Literal<T>) new Literals.FloatLiteral((Float) value); } else if (value instanceof Double) { return (Literal<T>) new Literals.DoubleLiteral((Double) value); } else if (value instanceof CharSequence) { return (Literal<T>) new Literals.StringLiteral((CharSequence) value); } else if (value instanceof UUID) { return (Literal<T>) new Literals.UUIDLiteral((UUID) value); } else if (value instanceof byte[]) { return (Literal<T>) new Literals.FixedLiteral(ByteBuffer.wrap((byte[]) value)); } else if (value instanceof ByteBuffer) { return (Literal<T>) new Literals.BinaryLiteral((ByteBuffer) value); } else if (value instanceof BigDecimal) { return (Literal<T>) new Literals.DecimalLiteral((BigDecimal) value); } throw new IllegalArgumentException(String.format( "Cannot create expression literal from %s: %s", value.getClass().getName(), value)); } @SuppressWarnings("unchecked") static <T> AboveMax<T> aboveMax() { return AboveMax.INSTANCE; } @SuppressWarnings("unchecked") static <T> BelowMin<T> belowMin() { return BelowMin.INSTANCE; } private abstract static class BaseLiteral<T> implements Literal<T> { private final T value; BaseLiteral(T value) { Preconditions.checkNotNull(value, "Literal values cannot be null"); this.value = value; } @Override public T value() { return value; } @Override public String toString() { return String.valueOf(value); } } private abstract static class ComparableLiteral<C extends Comparable<C>> extends BaseLiteral<C> { @SuppressWarnings("unchecked") private static final Comparator<? extends Comparable> CMP = Comparators.<Comparable>nullsFirst().thenComparing(Comparator.naturalOrder()); public ComparableLiteral(C value) { super(value); } @Override @SuppressWarnings("unchecked") public Comparator<C> comparator() { return (Comparator<C>) CMP; } } static class AboveMax<T> implements Literal<T> { private static final AboveMax INSTANCE = new AboveMax(); private AboveMax() { } @Override public T value() { throw new UnsupportedOperationException("AboveMax has no value"); } @Override public <X> Literal<X> to(Type type) { throw new UnsupportedOperationException("Cannot change the type of AboveMax"); } @Override public Comparator<T> comparator() { throw new UnsupportedOperationException("AboveMax has no comparator"); } @Override public String toString() { return "aboveMax"; } } static class BelowMin<T> implements Literal<T> { private static final BelowMin INSTANCE = new BelowMin(); private BelowMin() { } @Override public T value() { throw new UnsupportedOperationException("BelowMin has no value"); } @Override public <X> Literal<X> to(Type type) { throw new UnsupportedOperationException("Cannot change the type of BelowMin"); } @Override public Comparator<T> comparator() { throw new UnsupportedOperationException("BelowMin has no comparator"); } @Override public String toString() { return "belowMin"; } } static class BooleanLiteral extends ComparableLiteral<Boolean> { BooleanLiteral(Boolean value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { if (type.typeId() == Type.TypeID.BOOLEAN) { return (Literal<T>) this; } return null; } } static class IntegerLiteral extends ComparableLiteral<Integer> { IntegerLiteral(Integer value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case INTEGER: return (Literal<T>) this; case LONG: return (Literal<T>) new LongLiteral(value().longValue()); case FLOAT: return (Literal<T>) new FloatLiteral(value().floatValue()); case DOUBLE: return (Literal<T>) new DoubleLiteral(value().doubleValue()); case DATE: return (Literal<T>) new DateLiteral(value()); case DECIMAL: int scale = ((Types.DecimalType) type).scale(); // rounding mode isn't necessary, but pass one to avoid warnings return (Literal<T>) new DecimalLiteral( BigDecimal.valueOf(value()).setScale(scale, RoundingMode.HALF_UP)); default: return null; } } } static class LongLiteral extends ComparableLiteral<Long> { LongLiteral(Long value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case INTEGER: if ((long) Integer.MAX_VALUE < value()) { return aboveMax(); } else if ((long) Integer.MIN_VALUE > value()) { return belowMin(); } return (Literal<T>) new IntegerLiteral(value().intValue()); case LONG: return (Literal<T>) this; case FLOAT: return (Literal<T>) new FloatLiteral(value().floatValue()); case DOUBLE: return (Literal<T>) new DoubleLiteral(value().doubleValue()); case TIME: return (Literal<T>) new TimeLiteral(value()); case TIMESTAMP: return (Literal<T>) new TimestampLiteral(value()); case DECIMAL: int scale = ((Types.DecimalType) type).scale(); // rounding mode isn't necessary, but pass one to avoid warnings return (Literal<T>) new DecimalLiteral( BigDecimal.valueOf(value()).setScale(scale, RoundingMode.HALF_UP)); default: return null; } } } static class FloatLiteral extends ComparableLiteral<Float> { FloatLiteral(Float value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case FLOAT: return (Literal<T>) this; case DOUBLE: return (Literal<T>) new DoubleLiteral(value().doubleValue()); case DECIMAL: int scale = ((Types.DecimalType) type).scale(); return (Literal<T>) new DecimalLiteral( BigDecimal.valueOf(value()).setScale(scale, RoundingMode.HALF_UP)); default: return null; } } } static class DoubleLiteral extends ComparableLiteral<Double> { DoubleLiteral(Double value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case FLOAT: if ((double) Float.MAX_VALUE < value()) { return aboveMax(); } else if ((double) -Float.MAX_VALUE > value()) { // Compare with -Float.MAX_VALUE because it is the most negative float value. // Float.MIN_VALUE is the smallest non-negative floating point value. return belowMin(); } return (Literal<T>) new FloatLiteral(value().floatValue()); case DOUBLE: return (Literal<T>) this; case DECIMAL: int scale = ((Types.DecimalType) type).scale(); return (Literal<T>) new DecimalLiteral( BigDecimal.valueOf(value()).setScale(scale, RoundingMode.HALF_UP)); default: return null; } } } static class DateLiteral extends ComparableLiteral<Integer> { DateLiteral(Integer value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { if (type.typeId() == Type.TypeID.DATE) { return (Literal<T>) this; } return null; } } static class TimeLiteral extends ComparableLiteral<Long> { TimeLiteral(Long value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { if (type.typeId() == Type.TypeID.TIME) { return (Literal<T>) this ; } return null; } } static class TimestampLiteral extends ComparableLiteral<Long> { TimestampLiteral(Long value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case TIMESTAMP: return (Literal<T>) this; case DATE: return (Literal<T>) new DateLiteral((int) ChronoUnit.DAYS.between( EPOCH_DAY, EPOCH.plus(value(), ChronoUnit.MICROS).toLocalDate())); default: } return null; } } static class DecimalLiteral extends ComparableLiteral<BigDecimal> { DecimalLiteral(BigDecimal value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case DECIMAL: // do not change decimal scale if (value().scale() == ((Types.DecimalType) type).scale()) { return (Literal<T>) this; } return null; default: return null; } } } static class StringLiteral extends BaseLiteral<CharSequence> { private static final Comparator<CharSequence> CMP = Comparators.<CharSequence>nullsFirst().thenComparing(Comparators.charSequences()); StringLiteral(CharSequence value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case DATE: int date = (int) ChronoUnit.DAYS.between(EPOCH_DAY, LocalDate.parse(value(), DateTimeFormatter.ISO_LOCAL_DATE)); return (Literal<T>) new DateLiteral(date); case TIME: long timeMicros = LocalTime.parse(value(), DateTimeFormatter.ISO_LOCAL_TIME) .toNanoOfDay() / 1000; return (Literal<T>) new TimeLiteral(timeMicros); case TIMESTAMP: if (((Types.TimestampType) type).shouldAdjustToUTC()) { long timestampMicros = ChronoUnit.MICROS.between(EPOCH, OffsetDateTime.parse(value(), DateTimeFormatter.ISO_DATE_TIME)); return (Literal<T>) new TimestampLiteral(timestampMicros); } else { long timestampMicros = ChronoUnit.MICROS.between(EPOCH, LocalDateTime.parse(value(), DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atOffset(ZoneOffset.UTC)); return (Literal<T>) new TimestampLiteral(timestampMicros); } case STRING: return (Literal<T>) this; case UUID: return (Literal<T>) new UUIDLiteral(UUID.fromString(value().toString())); case DECIMAL: int scale = ((Types.DecimalType) type).scale(); BigDecimal decimal = new BigDecimal(value().toString()); if (scale == decimal.scale()) { return (Literal<T>) new DecimalLiteral(decimal); } return null; default: return null; } } @Override public Comparator<CharSequence> comparator() { return CMP; } @Override public String toString() { return "\"" + value() + "\""; } } static class UUIDLiteral extends ComparableLiteral<UUID> { UUIDLiteral(UUID value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { if (type.typeId() == Type.TypeID.UUID) { return (Literal<T>) this; } return null; } } static class FixedLiteral extends BaseLiteral<ByteBuffer> { private static final Comparator<ByteBuffer> CMP = Comparators.<ByteBuffer>nullsFirst().thenComparing(Comparators.unsignedBytes()); FixedLiteral(ByteBuffer value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case FIXED: Types.FixedType fixed = (Types.FixedType) type; if (value().remaining() == fixed.length()) { return (Literal<T>) this; } return null; case BINARY: return (Literal<T>) new BinaryLiteral(value()); default: return null; } } @Override public Comparator<ByteBuffer> comparator() { return CMP; } Object writeReplace() throws ObjectStreamException { return new SerializationProxies.FixedLiteralProxy(value()); } } static class BinaryLiteral extends BaseLiteral<ByteBuffer> { private static final Comparator<ByteBuffer> CMP = Comparators.<ByteBuffer>nullsFirst().thenComparing(Comparators.unsignedBytes()); BinaryLiteral(ByteBuffer value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case FIXED: Types.FixedType fixed = (Types.FixedType) type; if (value().remaining() == fixed.length()) { return (Literal<T>) new FixedLiteral(value()); } return null; case BINARY: return (Literal<T>) this; default: return null; } } @Override public Comparator<ByteBuffer> comparator() { return CMP; } Object writeReplace() throws ObjectStreamException { return new SerializationProxies.BinaryLiteralProxy(value()); } } }
2,122
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/UnboundPredicate.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.types.Types; import static com.netflix.iceberg.expressions.Expression.Operation.IS_NULL; import static com.netflix.iceberg.expressions.Expression.Operation.NOT_NULL; public class UnboundPredicate<T> extends Predicate<T, NamedReference> { UnboundPredicate(Operation op, NamedReference namedRef, T value) { super(op, namedRef, Literals.from(value)); } UnboundPredicate(Operation op, NamedReference namedRef) { super(op, namedRef, null); } UnboundPredicate(Operation op, NamedReference namedRef, Literal<T> lit) { super(op, namedRef, lit); } @Override public Expression negate() { return new UnboundPredicate<>(op().negate(), ref(), literal()); } public Expression bind(Types.StructType struct) { Types.NestedField field = struct.field(ref().name()); ValidationException.check(field != null, "Cannot find field '%s' in struct: %s", ref().name(), struct); if (literal() == null) { switch (op()) { case IS_NULL: if (field.isRequired()) { return Expressions.alwaysFalse(); } return new BoundPredicate<>(IS_NULL, new BoundReference<>(struct, field.fieldId())); case NOT_NULL: if (field.isRequired()) { return Expressions.alwaysTrue(); } return new BoundPredicate<>(NOT_NULL, new BoundReference<>(struct, field.fieldId())); default: throw new ValidationException("Operation must be IS_NULL or NOT_NULL"); } } Literal<T> lit = literal().to(field.type()); if (lit == null) { throw new ValidationException(String.format( "Invalid value for comparison inclusive type %s: %s (%s)", field.type(), literal().value(), literal().value().getClass().getName())); } else if (lit == Literals.aboveMax()) { switch (op()) { case LT: case LT_EQ: case NOT_EQ: return Expressions.alwaysTrue(); case GT: case GT_EQ: case EQ: return Expressions.alwaysFalse(); // case IN: // break; // case NOT_IN: // break; } } else if (lit == Literals.belowMin()) { switch (op()) { case GT: case GT_EQ: case NOT_EQ: return Expressions.alwaysTrue(); case LT: case LT_EQ: case EQ: return Expressions.alwaysFalse(); // case IN: // break; // case NOT_IN: // break; } } return new BoundPredicate<>(op(), new BoundReference<>(struct, field.fieldId()), lit); } }
2,123
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/ResidualEvaluator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.StructLike; import com.netflix.iceberg.PartitionField; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.transforms.Transform; import java.io.Serializable; import java.util.Comparator; /** * Finds the residuals for an {@link Expression} the partitions in the given {@link PartitionSpec}. * <p> * A residual expression is made by partially evaluating an expression using partition values. For * example, if a table is partitioned by day(utc_timestamp) and is read with a filter expression * utc_timestamp &gt;= a and utc_timestamp &lt;= b, then there are 4 possible residuals expressions * for the partition data, d: * <ul> * <li>If d &gt; day(a) and d &lt; day(b), the residual is always true</li> * <li>If d == day(a) and d != day(b), the residual is utc_timestamp &gt;= a</li> * <li>if d == day(b) and d != day(a), the residual is utc_timestamp &lt;= b</li> * <li>If d == day(a) == day(b), the residual is utc_timestamp &gt;= a and utc_timestamp &lt;= b * </li> * </ul> * <p> * Partition data is passed using {@link StructLike}. Residuals are returned by * {@link #residualFor(StructLike)}. * <p> * This class is thread-safe. */ public class ResidualEvaluator implements Serializable { private final PartitionSpec spec; private final Expression expr; private transient ThreadLocal<ResidualVisitor> visitors = null; private ResidualVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(ResidualVisitor::new); } return visitors.get(); } public ResidualEvaluator(PartitionSpec spec, Expression expr) { this.spec = spec; this.expr = expr; } /** * Returns a residual expression for the given partition values. * * @param partitionData partition data values * @return the residual of this evaluator's expression from the partition values */ public Expression residualFor(StructLike partitionData) { return visitor().eval(partitionData); } private class ResidualVisitor extends ExpressionVisitors.BoundExpressionVisitor<Expression> { private StructLike struct; private Expression eval(StructLike struct) { this.struct = struct; return ExpressionVisitors.visit(expr, this); } @Override public Expression alwaysTrue() { return Expressions.alwaysTrue(); } @Override public Expression alwaysFalse() { return Expressions.alwaysFalse(); } @Override public Expression not(Expression result) { return Expressions.not(result); } @Override public Expression and(Expression leftResult, Expression rightResult) { return Expressions.and(leftResult, rightResult); } @Override public Expression or(Expression leftResult, Expression rightResult) { return Expressions.or(leftResult, rightResult); } @Override public <T> Expression isNull(BoundReference<T> ref) { return (ref.get(struct) == null) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression notNull(BoundReference<T> ref) { return (ref.get(struct) != null) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression lt(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return (cmp.compare(ref.get(struct), lit.value()) < 0) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression ltEq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return (cmp.compare(ref.get(struct), lit.value()) <= 0) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression gt(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return (cmp.compare(ref.get(struct), lit.value()) > 0) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression gtEq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return (cmp.compare(ref.get(struct), lit.value()) >= 0) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression eq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return (cmp.compare(ref.get(struct), lit.value()) == 0) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression notEq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return (cmp.compare(ref.get(struct), lit.value()) != 0) ? alwaysTrue() : alwaysFalse(); } @Override @SuppressWarnings("unchecked") public <T> Expression predicate(BoundPredicate<T> pred) { // Get the strict projection of this predicate in partition data, then use it to determine // whether to return the original predicate. The strict projection returns true iff the // original predicate would have returned true, so the predicate can be eliminated if the // strict projection evaluates to true. // // If there is no strict projection or if it evaluates to false, then return the predicate. PartitionField part = spec.getFieldBySourceId(pred.ref().fieldId()); if (part == null) { return pred; // not associated inclusive a partition field, can't be evaluated } UnboundPredicate<?> strictProjection = ((Transform<T, ?>) part.transform()) .projectStrict(part.name(), pred); if (strictProjection != null) { Expression bound = strictProjection.bind(spec.partitionType()); if (bound instanceof BoundPredicate) { // the predicate methods will evaluate and return alwaysTrue or alwaysFalse return super.predicate((BoundPredicate<?>) bound); } return bound; // use the non-predicate residual (e.g. alwaysTrue) } // if the predicate could not be projected, it must be in the residual return pred; } @Override public <T> Expression predicate(UnboundPredicate<T> pred) { Expression bound = pred.bind(spec.schema().asStruct()); if (bound instanceof BoundPredicate) { Expression boundResidual = predicate((BoundPredicate<?>) bound); if (boundResidual instanceof Predicate) { return pred; // replace inclusive original unbound predicate } return boundResidual; // use the non-predicate residual (e.g. alwaysTrue) } // if binding didn't result in a Predicate, return the expression return bound; } } }
2,124
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Binder.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.expressions.ExpressionVisitors.ExpressionVisitor; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types.StructType; import java.util.List; import java.util.Set; /** * Rewrites {@link Expression expressions} by replacing unbound named references with references to * fields in a struct schema. */ public class Binder { private Binder() { } /** * Replaces all unbound/named references with bound references to fields in the given struct. * <p> * When a reference is resolved, any literal used in a predicate for that field is converted to * the field's type using {@link Literal#to(Type)}. If automatic conversion to that type isn't * allowed, a {@link ValidationException validation exception} is thrown. * <p> * The result expression may be simplified when constructed. For example, {@code isNull("a")} is * replaced with {@code alwaysFalse()} when {@code "a"} is resolved to a required field. * <p> * The expression cannot contain references that are already bound, or an * {@link IllegalStateException} will be thrown. * * @param struct The {@link StructType struct type} to resolve references by name. * @param expr An {@link Expression expression} to rewrite with bound references. * @return the expression rewritten with bound references * @throws ValidationException if literals do not match bound references * @throws IllegalStateException if any references are already bound */ public static Expression bind(StructType struct, Expression expr) { return ExpressionVisitors.visit(expr, new BindVisitor(struct)); } public static Set<Integer> boundReferences(StructType struct, List<Expression> exprs) { if (exprs == null) { return ImmutableSet.of(); } ReferenceVisitor visitor = new ReferenceVisitor(); for (Expression expr : exprs) { ExpressionVisitors.visit(bind(struct, expr), visitor); } return visitor.references; } private static class BindVisitor extends ExpressionVisitor<Expression> { private final StructType struct; private BindVisitor(StructType struct) { this.struct = struct; } @Override public Expression alwaysTrue() { return Expressions.alwaysTrue(); } @Override public Expression alwaysFalse() { return Expressions.alwaysFalse(); } @Override public Expression not(Expression result) { return Expressions.not(result); } @Override public Expression and(Expression leftResult, Expression rightResult) { return Expressions.and(leftResult, rightResult); } @Override public Expression or(Expression leftResult, Expression rightResult) { return Expressions.or(leftResult, rightResult); } @Override public <T> Expression predicate(BoundPredicate<T> pred) { throw new IllegalStateException("Found already bound predicate: " + pred); } @Override public <T> Expression predicate(UnboundPredicate<T> pred) { return pred.bind(struct); } } private static class ReferenceVisitor extends ExpressionVisitor<Set<Integer>> { private final Set<Integer> references = Sets.newHashSet(); @Override public Set<Integer> alwaysTrue() { return references; } @Override public Set<Integer> alwaysFalse() { return references; } @Override public Set<Integer> not(Set<Integer> result) { return references; } @Override public Set<Integer> and(Set<Integer> leftResult, Set<Integer> rightResult) { return references; } @Override public Set<Integer> or(Set<Integer> leftResult, Set<Integer> rightResult) { return references; } @Override public <T> Set<Integer> predicate(BoundPredicate<T> pred) { references.add(pred.ref().fieldId()); return references; } } }
2,125
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/BoundPredicate.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; public class BoundPredicate<T> extends Predicate<T, BoundReference<T>> { BoundPredicate(Operation op, BoundReference<T> ref, Literal<T> lit) { super(op, ref, lit); } BoundPredicate(Operation op, BoundReference<T> ref) { super(op, ref, null); } @Override public Expression negate() { return new BoundPredicate<>(op().negate(), ref(), literal()); } }
2,126
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Expression.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import java.io.Serializable; /** * Represents a boolean expression tree. */ public interface Expression extends Serializable { enum Operation { TRUE, FALSE, IS_NULL, NOT_NULL, LT, LT_EQ, GT, GT_EQ, EQ, NOT_EQ, IN, NOT_IN, NOT, AND, OR; /** * @return the operation used when this is negated */ public Operation negate() { switch (this) { case IS_NULL: return Operation.NOT_NULL; case NOT_NULL: return Operation.IS_NULL; case LT: return Operation.GT_EQ; case LT_EQ: return Operation.GT; case GT: return Operation.LT_EQ; case GT_EQ: return Operation.LT; case EQ: return Operation.NOT_EQ; case NOT_EQ: return Operation.EQ; case IN: return Operation.NOT_IN; case NOT_IN: return Operation.IN; default: throw new IllegalArgumentException("No negation for operation: " + this); } } /** * @return the equivalent operation when the left and right operands are exchanged */ public Operation flipLR() { switch (this) { case LT: return Operation.GT; case LT_EQ: return Operation.GT_EQ; case GT: return Operation.LT; case GT_EQ: return Operation.LT_EQ; case EQ: return Operation.EQ; case NOT_EQ: return Operation.NOT_EQ; case AND: return Operation.AND; case OR: return Operation.OR; default: throw new IllegalArgumentException("No left-right flip for operation: " + this); } } } /** * @return the operation for an expression node. */ Operation op(); /** * @return the negation of this expression, equivalent to not(this). */ default Expression negate() { throw new UnsupportedOperationException(String.format("%s cannot be negated", this)); } }
2,127
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Reference.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import java.io.Serializable; /** * Represents a variable reference in an {@link Expression expression}. * @see BoundReference * @see NamedReference */ public interface Reference extends Serializable { }
2,128
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Predicate.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; public abstract class Predicate<T, R extends Reference> implements Expression { private final Operation op; private final R ref; private final Literal<T> literal; Predicate(Operation op, R ref, Literal<T> lit) { this.op = op; this.ref = ref; this.literal = lit; } @Override public Operation op() { return op; } public R ref() { return ref; } public Literal<T> literal() { return literal; } @Override public String toString() { switch (op) { case IS_NULL: return "is_null(" + ref() + ")"; case NOT_NULL: return "not_null(" + ref() + ")"; case LT: return String.valueOf(ref()) + " < " + literal(); case LT_EQ: return String.valueOf(ref()) + " <= " + literal(); case GT: return String.valueOf(ref()) + " > " + literal(); case GT_EQ: return String.valueOf(ref()) + " >= " + literal(); case EQ: return String.valueOf(ref()) + " == " + literal(); case NOT_EQ: return String.valueOf(ref()) + " != " + literal(); // case IN: // break; // case NOT_IN: // break; default: return "Invalid predicate: operation = " + op; } } }
2,129
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/True.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import java.io.ObjectStreamException; /** * An {@link Expression expression} that is always true. */ public class True implements Expression { static final True INSTANCE = new True(); private True() { } @Override public Operation op() { return Operation.TRUE; } @Override public Expression negate() { return False.INSTANCE; } @Override public String toString() { return "true"; } Object writeReplace() throws ObjectStreamException { return new SerializationProxies.ConstantExpressionProxy(true); } }
2,130
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/And.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; public class And implements Expression { private final Expression left; private final Expression right; And(Expression left, Expression right) { this.left = left; this.right = right; } public Expression left() { return left; } public Expression right() { return right; } @Override public Operation op() { return Expression.Operation.AND; } @Override public Expression negate() { // not(and(a, b)) => or(not(a), not(b)) return Expressions.or(left.negate(), right.negate()); } @Override public String toString() { return String.format("(%s and %s)", left, right); } }
2,131
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Literal.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.types.Type; import java.io.Serializable; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.Comparator; import java.util.UUID; /** * Represents a literal fixed value in an expression predicate * @param <T> The Java type of the value wrapped by a {@link Literal} */ public interface Literal<T> extends Serializable { static Literal<Boolean> of(boolean value) { return new Literals.BooleanLiteral(value); } static Literal<Integer> of(int value) { return new Literals.IntegerLiteral(value); } static Literal<Long> of(long value) { return new Literals.LongLiteral(value); } static Literal<Float> of(float value) { return new Literals.FloatLiteral(value); } static Literal<Double> of(double value) { return new Literals.DoubleLiteral(value); } static Literal<CharSequence> of(CharSequence value) { return new Literals.StringLiteral(value); } static Literal<UUID> of(UUID value) { return new Literals.UUIDLiteral(value); } static Literal<ByteBuffer> of(byte[] value) { return new Literals.FixedLiteral(ByteBuffer.wrap(value)); } static Literal<ByteBuffer> of(ByteBuffer value) { return new Literals.BinaryLiteral(value); } static Literal<BigDecimal> of(BigDecimal value) { return new Literals.DecimalLiteral(value); } /** * @return the value wrapped by this literal */ T value(); /** * Converts this literal to a literal of the given type. * <p> * When a predicate is bound to a concrete data column, literals are converted to match the bound * column's type. This conversion process is more narrow than a cast and is only intended for * cases where substituting one type is a common mistake (e.g. 34 instead of 34L) or where this * API avoids requiring a concrete class (e.g., dates). * <p> * If conversion to a target type is not supported, this method returns null. * <p> * This method may return {@link Literals#aboveMax} or {@link Literals#belowMin} when the target * type is not as wide as the original type. These values indicate that the containing predicate * can be simplified. For example, Integer.MAX_VALUE+1 converted to an int will result in * {@code aboveMax} and can simplify a &lt; Integer.MAX_VALUE+1 to {@link Expressions#alwaysTrue} * * @param type A primitive {@link Type} * @param <X> The Java type of value the new literal contains * @return A literal of the given type or null if conversion was not valid */ <X> Literal<X> to(Type type); /** * Return a {@link Comparator} for values. * @return a comparator for T objects */ Comparator<T> comparator(); }
2,132
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/BoundReference.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.StructLike; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.util.List; public class BoundReference<T> implements Reference { private final int fieldId; private final Type type; private final int pos; BoundReference(Types.StructType struct, int fieldId) { this.fieldId = fieldId; this.pos = find(fieldId, struct); this.type = struct.fields().get(pos).type(); } private int find(int fieldId, Types.StructType struct) { List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { if (fields.get(i).fieldId() == fieldId) { return i; } } throw new ValidationException( "Cannot find top-level field id %d in struct: %s", fieldId, struct); } public Type type() { return type; } public int fieldId() { return fieldId; } public int pos() { return pos; } public T get(StructLike struct) { return struct.get(pos, javaType()); } @Override public String toString() { return String.format("ref(id=%d, pos=%d, type=%s)", fieldId, pos, type); } @SuppressWarnings("unchecked") private Class<T> javaType() { return (Class<T>) type.asPrimitiveType().typeId().javaClass(); } }
2,133
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/SerializationProxies.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import java.io.ObjectStreamException; import java.io.Serializable; import java.nio.ByteBuffer; /** * Stand-in classes for expression classes in Java Serialization. * <p> * These are used so that expression classes are immutable and can use final fields. */ class SerializationProxies { static class ConstantExpressionProxy implements Serializable { private Boolean trueOrFalse = null; /** * Constructor for Java serialization. */ public ConstantExpressionProxy() { } public ConstantExpressionProxy(boolean trueOrFalse) { this.trueOrFalse = trueOrFalse; } Object readResolve() throws ObjectStreamException { if (trueOrFalse) { return True.INSTANCE; } else { return False.INSTANCE; } } } static class BinaryLiteralProxy extends FixedLiteralProxy { /** * Constructor for Java serialization. */ BinaryLiteralProxy() { } BinaryLiteralProxy(ByteBuffer buffer) { super(buffer); } Object readResolve() throws ObjectStreamException { return new Literals.BinaryLiteral(ByteBuffer.wrap(bytes)); } } /** * Replacement for FixedLiteral in Java Serialization. */ static class FixedLiteralProxy implements Serializable { protected byte[] bytes; /** * Constructor for Java serialization. */ FixedLiteralProxy() { } FixedLiteralProxy(ByteBuffer buffer) { this.bytes = new byte[buffer.remaining()]; buffer.duplicate().get(bytes); } Object readResolve() throws ObjectStreamException { return new Literals.FixedLiteral(ByteBuffer.wrap(bytes)); } } }
2,134
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Evaluator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.StructLike; import com.netflix.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import com.netflix.iceberg.types.Types; import java.io.Serializable; import java.util.Comparator; /** * Evaluates an {@link Expression} for data described by a {@link Types.StructType}. * <p> * Data rows must implement {@link StructLike} and are passed to {@link #eval(StructLike)}. * <p> * This class is thread-safe. */ public class Evaluator implements Serializable { private final Expression expr; private transient ThreadLocal<EvalVisitor> visitors = null; private EvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(EvalVisitor::new); } return visitors.get(); } public Evaluator(Types.StructType struct, Expression unbound) { this.expr = Binder.bind(struct, unbound); } public boolean eval(StructLike data) { return visitor().eval(data); } private class EvalVisitor extends BoundExpressionVisitor<Boolean> { private StructLike struct; private boolean eval(StructLike row) { this.struct = row; return ExpressionVisitors.visit(expr, this); } @Override public Boolean alwaysTrue() { return true; } @Override public Boolean alwaysFalse() { return false; } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { return ref.get(struct) == null; } @Override public <T> Boolean notNull(BoundReference<T> ref) { return ref.get(struct) != null; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return cmp.compare(ref.get(struct), lit.value()) < 0; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return cmp.compare(ref.get(struct), lit.value()) <= 0; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return cmp.compare(ref.get(struct), lit.value()) > 0; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return cmp.compare(ref.get(struct), lit.value()) >= 0; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return cmp.compare(ref.get(struct), lit.value()) == 0; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { return !eq(ref, lit); } @Override public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) { throw new UnsupportedOperationException("In is not supported yet"); } @Override public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) { return !in(ref, lit); } } }
2,135
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/NamedReference.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.base.Preconditions; public class NamedReference implements Reference { public final String name; NamedReference(String name) { Preconditions.checkNotNull(name, "Name cannot be null"); this.name = name; } public String name() { return name; } @Override public String toString() { return String.format("ref(name=\"%s\")", name); } }
2,136
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Not.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; public class Not implements Expression { private final Expression child; Not(Expression child) { this.child = child; } public Expression child() { return child; } @Override public Operation op() { return Expression.Operation.NOT; } @Override public Expression negate() { return child; } @Override public String toString() { return String.format("not(%s)", child); } }
2,137
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/InclusiveMetricsEvaluator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.base.Preconditions; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import com.netflix.iceberg.types.Conversions; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import java.nio.ByteBuffer; import java.util.Map; import static com.netflix.iceberg.expressions.Expressions.rewriteNot; /** * Evaluates an {@link Expression} on a {@link DataFile} to test whether rows in the file may match. * <p> * This evaluation is inclusive: it returns true if a file may match and false if it cannot match. * <p> * Files are passed to {@link #eval(DataFile)}, which returns true if the file may contain matching * rows and false if the file cannot contain matching rows. Files may be skipped if and only if the * return value of {@code eval} is false. */ public class InclusiveMetricsEvaluator { private final Schema schema; private final StructType struct; private final Expression expr; private transient ThreadLocal<MetricsEvalVisitor> visitors = null; private MetricsEvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(MetricsEvalVisitor::new); } return visitors.get(); } public InclusiveMetricsEvaluator(Schema schema, Expression unbound) { this.schema = schema; this.struct = schema.asStruct(); this.expr = Binder.bind(struct, rewriteNot(unbound)); } /** * Test whether the file may contain records that match the expression. * * @param file a data file * @return false if the file cannot contain rows that match the expression, true otherwise. */ public boolean eval(DataFile file) { // TODO: detect the case where a column is missing from the file using file's max field id. return visitor().eval(file); } private static final boolean ROWS_MIGHT_MATCH = true; private static final boolean ROWS_CANNOT_MATCH = false; private class MetricsEvalVisitor extends BoundExpressionVisitor<Boolean> { private Map<Integer, Long> valueCounts = null; private Map<Integer, Long> nullCounts = null; private Map<Integer, ByteBuffer> lowerBounds = null; private Map<Integer, ByteBuffer> upperBounds = null; private boolean eval(DataFile file) { if (file.recordCount() <= 0) { return ROWS_CANNOT_MATCH; } this.valueCounts = file.valueCounts(); this.nullCounts = file.nullValueCounts(); this.lowerBounds = file.lowerBounds(); this.upperBounds = file.upperBounds(); return ExpressionVisitors.visit(expr, this); } @Override public Boolean alwaysTrue() { return ROWS_MIGHT_MATCH; // all rows match } @Override public Boolean alwaysFalse() { return ROWS_CANNOT_MATCH; // all rows fail } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has no null values, the expression cannot match Integer id = ref.fieldId(); Preconditions.checkNotNull(struct.field(id), "Cannot filter by nested column: %s", schema.findField(id)); if (nullCounts != null && nullCounts.containsKey(id) && nullCounts.get(id) == 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has no non-null values, the expression cannot match Integer id = ref.fieldId(); Preconditions.checkNotNull(struct.field(id), "Cannot filter by nested column: %s", schema.findField(id)); if (valueCounts != null && valueCounts.containsKey(id) && nullCounts != null && nullCounts.containsKey(id) && valueCounts.get(id) - nullCounts.get(id) == 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(field.type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp >= 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(field.type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp <= 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(struct.field(id).type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } } if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { // because the bounds are not necessarily a min or max value, this cannot be answered using // them. notEq(col, X) with (X, Y) doesn't guarantee that X is a value in col. return ROWS_MIGHT_MATCH; } @Override public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } } }
2,138
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Expressions.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.base.Preconditions; import com.netflix.iceberg.expressions.Expression.Operation; /** * Factory methods for creating {@link Expression expressions}. */ public class Expressions { private Expressions() { } public static Expression and(Expression left, Expression right) { Preconditions.checkNotNull(left, "Left expression cannot be null."); Preconditions.checkNotNull(right, "Right expression cannot be null."); if (left == alwaysFalse() || right == alwaysFalse()) { return alwaysFalse(); } else if (left == alwaysTrue()) { return right; } else if (right == alwaysTrue()) { return left; } return new And(left, right); } public static Expression or(Expression left, Expression right) { Preconditions.checkNotNull(left, "Left expression cannot be null."); Preconditions.checkNotNull(right, "Right expression cannot be null."); if (left == alwaysTrue() || right == alwaysTrue()) { return alwaysTrue(); } else if (left == alwaysFalse()) { return right; } else if (right == alwaysFalse()) { return left; } return new Or(left, right); } public static Expression not(Expression child) { Preconditions.checkNotNull(child, "Child expression cannot be null."); if (child == alwaysTrue()) { return alwaysFalse(); } else if (child == alwaysFalse()) { return alwaysTrue(); } else if (child instanceof Not) { return ((Not) child).child(); } return new Not(child); } public static <T> UnboundPredicate<T> isNull(String name) { return new UnboundPredicate<>(Expression.Operation.IS_NULL, ref(name)); } public static <T> UnboundPredicate<T> notNull(String name) { return new UnboundPredicate<>(Expression.Operation.NOT_NULL, ref(name)); } public static <T> UnboundPredicate<T> lessThan(String name, T value) { return new UnboundPredicate<>(Expression.Operation.LT, ref(name), value); } public static <T> UnboundPredicate<T> lessThanOrEqual(String name, T value) { return new UnboundPredicate<>(Expression.Operation.LT_EQ, ref(name), value); } public static <T> UnboundPredicate<T> greaterThan(String name, T value) { return new UnboundPredicate<>(Expression.Operation.GT, ref(name), value); } public static <T> UnboundPredicate<T> greaterThanOrEqual(String name, T value) { return new UnboundPredicate<>(Expression.Operation.GT_EQ, ref(name), value); } public static <T> UnboundPredicate<T> equal(String name, T value) { return new UnboundPredicate<>(Expression.Operation.EQ, ref(name), value); } public static <T> UnboundPredicate<T> notEqual(String name, T value) { return new UnboundPredicate<>(Expression.Operation.NOT_EQ, ref(name), value); } public static <T> UnboundPredicate<T> predicate(Operation op, String name, T value) { Preconditions.checkArgument(op != Operation.IS_NULL && op != Operation.NOT_NULL, "Cannot create %s predicate inclusive a value", op); return new UnboundPredicate<>(op, ref(name), value); } public static <T> UnboundPredicate<T> predicate(Operation op, String name, Literal<T> lit) { Preconditions.checkArgument(op != Operation.IS_NULL && op != Operation.NOT_NULL, "Cannot create %s predicate inclusive a value", op); return new UnboundPredicate<>(op, ref(name), lit); } public static <T> UnboundPredicate<T> predicate(Operation op, String name) { Preconditions.checkArgument(op == Operation.IS_NULL || op == Operation.NOT_NULL, "Cannot create %s predicate without a value", op); return new UnboundPredicate<>(op, ref(name)); } public static True alwaysTrue() { return True.INSTANCE; } public static False alwaysFalse() { return False.INSTANCE; } public static Expression rewriteNot(Expression expr) { return ExpressionVisitors.visit(expr, RewriteNot.get()); } static NamedReference ref(String name) { return new NamedReference(name); } }
2,139
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/InclusiveManifestEvaluator.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.ManifestFile; import com.netflix.iceberg.ManifestFile.PartitionFieldSummary; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import com.netflix.iceberg.types.Conversions; import com.netflix.iceberg.types.Types.StructType; import java.nio.ByteBuffer; import java.util.List; import static com.netflix.iceberg.expressions.Expressions.rewriteNot; /** * Evaluates an {@link Expression} on a {@link ManifestFile} to test whether the file contains * matching partitions. * <p> * This evaluation is inclusive: it returns true if a file may match and false if it cannot match. * <p> * Files are passed to {@link #eval(ManifestFile)}, which returns true if the manifest may contain * data files that match the partition expression. Manifest files may be skipped if and only if the * return value of {@code eval} is false. */ public class InclusiveManifestEvaluator { private final StructType struct; private final Expression expr; private transient ThreadLocal<ManifestEvalVisitor> visitors = null; private ManifestEvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(ManifestEvalVisitor::new); } return visitors.get(); } public InclusiveManifestEvaluator(PartitionSpec spec, Expression rowFilter) { this.struct = spec.partitionType(); this.expr = Binder.bind(struct, rewriteNot(Projections.inclusive(spec).project(rowFilter))); } /** * Test whether the file may contain records that match the expression. * * @param manifest a manifest file * @return false if the file cannot contain rows that match the expression, true otherwise. */ public boolean eval(ManifestFile manifest) { return visitor().eval(manifest); } private static final boolean ROWS_MIGHT_MATCH = true; private static final boolean ROWS_CANNOT_MATCH = false; private class ManifestEvalVisitor extends BoundExpressionVisitor<Boolean> { private List<PartitionFieldSummary> stats = null; private boolean eval(ManifestFile manifest) { this.stats = manifest.partitions(); if (stats == null) { return ROWS_MIGHT_MATCH; } return ExpressionVisitors.visit(expr, this); } @Override public Boolean alwaysTrue() { return ROWS_MIGHT_MATCH; // all rows match } @Override public Boolean alwaysFalse() { return ROWS_CANNOT_MATCH; // all rows fail } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has no null values, the expression cannot match if (!stats.get(ref.pos()).containsNull()) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notNull(BoundReference<T> ref) { // containsNull encodes whether at least one partition value is null, lowerBound is null if // all partition values are null. ByteBuffer lowerBound = stats.get(ref.pos()).lowerBound(); if (lowerBound == null) { return ROWS_CANNOT_MATCH; // all values are null } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { ByteBuffer lowerBound = stats.get(ref.pos()).lowerBound(); if (lowerBound == null) { return ROWS_CANNOT_MATCH; // values are all null } T lower = Conversions.fromByteBuffer(ref.type(), lowerBound); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp >= 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { ByteBuffer lowerBound = stats.get(ref.pos()).lowerBound(); if (lowerBound == null) { return ROWS_CANNOT_MATCH; // values are all null } T lower = Conversions.fromByteBuffer(ref.type(), lowerBound); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { ByteBuffer upperBound = stats.get(ref.pos()).upperBound(); if (upperBound == null) { return ROWS_CANNOT_MATCH; // values are all null } T upper = Conversions.fromByteBuffer(ref.type(), upperBound); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp <= 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { ByteBuffer upperBound = stats.get(ref.pos()).upperBound(); if (upperBound == null) { return ROWS_CANNOT_MATCH; // values are all null } T upper = Conversions.fromByteBuffer(ref.type(), upperBound); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { PartitionFieldSummary fieldStats = stats.get(ref.pos()); if (fieldStats.lowerBound() == null) { return ROWS_CANNOT_MATCH; // values are all null and literal cannot contain null } T lower = Conversions.fromByteBuffer(ref.type(), fieldStats.lowerBound()); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } T upper = Conversions.fromByteBuffer(ref.type(), fieldStats.upperBound()); cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { // because the bounds are not necessarily a min or max value, this cannot be answered using // them. notEq(col, X) with (X, Y) doesn't guarantee that X is a value in col. return ROWS_MIGHT_MATCH; } @Override public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } } }
2,140
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Or.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; public class Or implements Expression { private final Expression left; private final Expression right; Or(Expression left, Expression right) { this.left = left; this.right = right; } public Expression left() { return left; } public Expression right() { return right; } @Override public Operation op() { return Expression.Operation.OR; } @Override public Expression negate() { // not(or(a, b)) => and(not(a), not(b)) return Expressions.and(left.negate(), right.negate()); } @Override public String toString() { return String.format("(%s or %s)", left, right); } }
2,141
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/RewriteNot.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; class RewriteNot extends ExpressionVisitors.ExpressionVisitor<Expression> { private static final RewriteNot INSTANCE = new RewriteNot(); static RewriteNot get() { return INSTANCE; } private RewriteNot() { } @Override public Expression alwaysTrue() { return Expressions.alwaysTrue(); } @Override public Expression alwaysFalse() { return Expressions.alwaysFalse(); } @Override public Expression not(Expression result) { return result.negate(); } @Override public Expression and(Expression leftResult, Expression rightResult) { return Expressions.and(leftResult, rightResult); } @Override public Expression or(Expression leftResult, Expression rightResult) { return Expressions.or(leftResult, rightResult); } @Override public <T> Expression predicate(BoundPredicate<T> pred) { return pred; } @Override public <T> Expression predicate(UnboundPredicate<T> pred) { return pred; } }
2,142
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/events/Listeners.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.events; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import java.util.Iterator; import java.util.List; import java.util.Map; /** * Static registration and notification for listeners. */ public class Listeners { private Listeners() { } private static final Map<Class<?>, List<Listener<?>>> listeners = Maps.newConcurrentMap(); public static <E> void register(Listener<E> listener, Class<E> eventType) { List<Listener<?>> list = listeners.get(eventType); if (list == null) { synchronized (listeners) { list = listeners.get(eventType); if (list == null) { list = Lists.newArrayList(); listeners.put(eventType, list); } } } list.add(listener); } @SuppressWarnings("unchecked") public static <E> void notifyAll(E event) { Preconditions.checkNotNull(event, "Cannot notify listeners for a null event."); List<Listener<?>> list = listeners.get(event.getClass()); if (list != null) { Iterator<Listener<?>> iter = list.iterator(); while (iter.hasNext()) { Listener<E> listener = (Listener<E>) iter.next(); listener.notify(event); } } } }
2,143
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/events/ScanEvent.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.events; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Expression; /** * Event sent to listeners when a table scan is planned. */ public final class ScanEvent { private final String tableName; private final long snapshotId; private final Expression filter; private final Schema projection; public ScanEvent(String tableName, long snapshotId, Expression filter, Schema projection) { this.tableName = tableName; this.snapshotId = snapshotId; this.filter = filter; this.projection = projection; } public String tableName() { return tableName; } public long snapshotId() { return snapshotId; } public Expression filter() { return filter; } public Schema projection() { return projection; } }
2,144
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/events/Listener.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.events; /** * A listener interface that can receive notifications. */ public interface Listener<E> { void notify(E event); }
2,145
0
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg/orc/OrcFileAppender.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.orc; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.Schema; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.OutputFile; import org.apache.hadoop.fs.Path; import org.apache.orc.ColumnStatistics; import org.apache.orc.TypeDescription; import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch; import org.apache.orc.OrcFile; import org.apache.orc.Writer; import java.io.IOException; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; /** * Create a file appender for ORC. */ public class OrcFileAppender implements FileAppender<VectorizedRowBatch> { private final Writer writer; private final TypeDescription orcSchema; private final ColumnIdMap columnIds = new ColumnIdMap(); private final Path path; public static final String COLUMN_NUMBERS_ATTRIBUTE = "iceberg.column.ids"; OrcFileAppender(Schema schema, OutputFile file, OrcFile.WriterOptions options, Map<String,byte[]> metadata) { orcSchema = TypeConversion.toOrc(schema, columnIds); options.setSchema(orcSchema); path = new Path(file.location()); try { writer = OrcFile.createWriter(path, options); } catch (IOException e) { throw new RuntimeException("Can't create file " + path, e); } writer.addUserMetadata(COLUMN_NUMBERS_ATTRIBUTE, columnIds.serialize()); metadata.forEach( (key,value) -> writer.addUserMetadata(key, ByteBuffer.wrap(value))); } @Override public void add(VectorizedRowBatch datum) { try { writer.addRowBatch(datum); } catch (IOException e) { throw new RuntimeException("Problem writing to ORC file " + path, e); } } @Override public Metrics metrics() { try { long rows = writer.getNumberOfRows(); ColumnStatistics[] stats = writer.getStatistics(); // we don't currently have columnSizes or distinct counts. Map<Integer, Long> valueCounts = new HashMap<>(); Map<Integer, Long> nullCounts = new HashMap<>(); Integer[] icebergIds = new Integer[orcSchema.getMaximumId() + 1]; for(TypeDescription type: columnIds.keySet()) { icebergIds[type.getId()] = columnIds.get(type); } for(int c=1; c < stats.length; ++c) { if (icebergIds[c] != null) { valueCounts.put(icebergIds[c], stats[c].getNumberOfValues()); } } for(TypeDescription child: orcSchema.getChildren()) { int c = child.getId(); if (icebergIds[c] != null) { nullCounts.put(icebergIds[c], rows - stats[c].getNumberOfValues()); } } return new Metrics(rows, null, valueCounts, nullCounts); } catch (IOException e) { throw new RuntimeException("Can't get statistics " + path, e); } } @Override public void close() throws IOException { writer.close(); } public TypeDescription getSchema() { return orcSchema; } }
2,146
0
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg/orc/OrcIterator.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.orc; import org.apache.hadoop.fs.Path; import org.apache.orc.RecordReader; import org.apache.orc.TypeDescription; import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch; import java.io.Closeable; import java.io.IOException; import java.util.Iterator; /** * An adaptor so that the ORC RecordReader can be used as an Iterator. * Because the same VectorizedRowBatch is reused on each call to next, * it gets changed when hasNext or next is called. */ public class OrcIterator implements Iterator<VectorizedRowBatch>, Closeable { private final Path filename; private final RecordReader rows; private final VectorizedRowBatch batch; private boolean advanced = false; OrcIterator(Path filename, TypeDescription schema, RecordReader rows) { this.filename = filename; this.rows = rows; this.batch = schema.createRowBatch(); } @Override public void close() throws IOException { rows.close(); } private void advance() { if (!advanced) { try { rows.nextBatch(batch); } catch (IOException e) { throw new RuntimeException("Problem reading ORC file " + filename, e); } advanced = true; } } @Override public boolean hasNext() { advance(); return batch.size > 0; } @Override public VectorizedRowBatch next() { // make sure we have the next batch advance(); // mark it as used advanced = false; return batch; } }
2,147
0
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg/orc/ColumnIdMap.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.orc; import org.apache.orc.TypeDescription; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Collection; import java.util.IdentityHashMap; import java.util.Map; import java.util.Set; /** * The mapping from ORC's TypeDescription to the Iceberg column ids. * * Keep the API limited to Map rather than a concrete type so that we can * change it later. */ public class ColumnIdMap implements Map<TypeDescription, Integer> { private final IdentityHashMap<TypeDescription, Integer> idMap = new IdentityHashMap<>(); @Override public int size() { return idMap.size(); } @Override public boolean isEmpty() { return idMap.isEmpty(); } @Override public boolean containsKey(Object key) { return idMap.containsKey(key); } @Override public boolean containsValue(Object value) { return idMap.containsValue(value); } @Override public Integer get(Object key) { return idMap.get(key); } @Override public Integer put(TypeDescription key, Integer value) { return idMap.put(key, value); } @Override public Integer remove(Object key) { return idMap.remove(key); } @Override public void putAll(Map<? extends TypeDescription, ? extends Integer> map) { idMap.putAll(map); } @Override public void clear() { idMap.clear(); } @Override public Set<TypeDescription> keySet() { return idMap.keySet(); } @Override public Collection<Integer> values() { return idMap.values(); } @Override public Set<Entry<TypeDescription, Integer>> entrySet() { return idMap.entrySet(); } public ByteBuffer serialize() { StringBuilder buffer = new StringBuilder(); boolean needComma = false; for(TypeDescription key: idMap.keySet()) { if (needComma) { buffer.append(','); } else { needComma = true; } buffer.append(key.getId()); buffer.append(':'); buffer.append(idMap.get(key).intValue()); } return ByteBuffer.wrap(buffer.toString().getBytes(StandardCharsets.UTF_8)); } public static ColumnIdMap deserialize(TypeDescription schema, ByteBuffer serial) { ColumnIdMap result = new ColumnIdMap(); String[] parts = StandardCharsets.UTF_8.decode(serial).toString().split(","); for(int i = 0; i < parts.length; ++i) { String[] subparts = parts[i].split(":"); result.put(schema.findSubtype(Integer.parseInt(subparts[0])), Integer.parseInt(subparts[1])); } return result; } }
2,148
0
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg/orc/TypeConversion.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.orc; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.apache.orc.TypeDescription; import java.util.ArrayList; import java.util.List; public class TypeConversion { /** * Convert a given Iceberg schema to ORC. * @param schema the Iceberg schema to convert * @param columnIds an output with the column ids * @return the ORC schema */ public static TypeDescription toOrc(Schema schema, ColumnIdMap columnIds) { return toOrc(null, schema.asStruct(), columnIds); } static TypeDescription toOrc(Integer fieldId, Type type, ColumnIdMap columnIds) { TypeDescription result; switch (type.typeId()) { case BOOLEAN: result = TypeDescription.createBoolean(); break; case INTEGER: result = TypeDescription.createInt(); break; case LONG: result = TypeDescription.createLong(); break; case FLOAT: result = TypeDescription.createFloat(); break; case DOUBLE: result = TypeDescription.createDouble(); break; case DATE: result = TypeDescription.createDate(); break; case TIME: result = TypeDescription.createInt(); break; case TIMESTAMP: result = TypeDescription.createTimestamp(); break; case STRING: result = TypeDescription.createString(); break; case UUID: result = TypeDescription.createBinary(); break; case FIXED: result = TypeDescription.createBinary(); break; case BINARY: result = TypeDescription.createBinary(); break; case DECIMAL: { Types.DecimalType decimal = (Types.DecimalType) type; result = TypeDescription.createDecimal() .withScale(decimal.scale()) .withPrecision(decimal.precision()); break; } case STRUCT: { result = TypeDescription.createStruct(); for(Types.NestedField field: type.asStructType().fields()) { result.addField(field.name(), toOrc(field.fieldId(), field.type(), columnIds)); } break; } case LIST: { Types.ListType list = (Types.ListType) type; result = TypeDescription.createList(toOrc(list.elementId(), list.elementType(), columnIds)); break; } case MAP: { Types.MapType map = (Types.MapType) type; TypeDescription key = toOrc(map.keyId(),map.keyType(), columnIds); result = TypeDescription.createMap(key, toOrc(map.valueId(), map.valueType(), columnIds)); break; } default: throw new IllegalArgumentException("Unhandled type " + type.typeId()); } if (fieldId != null) { columnIds.put(result, fieldId); } return result; } /** * Convert an ORC schema to an Iceberg schema. * @param schema the ORC schema * @param columnIds the column ids * @return the Iceberg schema */ public Schema fromOrc(TypeDescription schema, ColumnIdMap columnIds) { return new Schema(convertOrcToType(schema, columnIds).asStructType().fields()); } Type convertOrcToType(TypeDescription schema, ColumnIdMap columnIds) { switch (schema.getCategory()) { case BOOLEAN: return Types.BooleanType.get(); case BYTE: case SHORT: case INT: return Types.IntegerType.get(); case LONG: return Types.LongType.get(); case FLOAT: return Types.FloatType.get(); case DOUBLE: return Types.DoubleType.get(); case STRING: case CHAR: case VARCHAR: return Types.StringType.get(); case BINARY: return Types.BinaryType.get(); case DATE: return Types.DateType.get(); case TIMESTAMP: return Types.TimestampType.withoutZone(); case DECIMAL: return Types.DecimalType.of(schema.getPrecision(), schema.getScale()); case STRUCT: { List<String> fieldNames = schema.getFieldNames(); List<TypeDescription> fieldTypes = schema.getChildren(); List<Types.NestedField> fields = new ArrayList<>(fieldNames.size()); for (int c=0; c < fieldNames.size(); ++c) { String name = fieldNames.get(c); TypeDescription type = fieldTypes.get(c); fields.add(Types.NestedField.optional(columnIds.get(type), name, convertOrcToType(type, columnIds))); } return Types.StructType.of(fields); } case LIST: { TypeDescription child = schema.getChildren().get(0); return Types.ListType.ofOptional(columnIds.get(child), convertOrcToType(child, columnIds)); } case MAP: { TypeDescription key = schema.getChildren().get(0); TypeDescription value = schema.getChildren().get(1); return Types.MapType.ofOptional(columnIds.get(key), columnIds.get(value), convertOrcToType(key, columnIds), convertOrcToType(value, columnIds)); } default: // We don't have an answer for union types. throw new IllegalArgumentException("Can't handle " + schema); } } }
2,149
0
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg
Create_ds/iceberg/orc/src/main/java/com/netflix/iceberg/orc/ORC.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.orc; import com.google.common.base.Preconditions; import com.netflix.iceberg.Schema; import com.netflix.iceberg.hadoop.HadoopInputFile; import com.netflix.iceberg.hadoop.HadoopOutputFile; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.orc.OrcFile; import org.apache.orc.Reader; import org.apache.orc.TypeDescription; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; public class ORC { private ORC() { } public static WriteBuilder write(OutputFile file) { return new WriteBuilder(file); } public static class WriteBuilder { private final OutputFile file; private final Configuration conf; private Schema schema = null; private Map<String, byte[]> metadata = new HashMap<>(); private WriteBuilder(OutputFile file) { this.file = file; if (file instanceof HadoopOutputFile) { conf = new Configuration(((HadoopOutputFile) file).getConf()); } else { conf = new Configuration(); } } public WriteBuilder metadata(String property, String value) { metadata.put(property, value.getBytes(StandardCharsets.UTF_8)); return this; } public WriteBuilder config(String property, String value) { conf.set(property, value); return this; } public WriteBuilder schema(Schema schema) { this.schema = schema; return this; } public OrcFileAppender build() { OrcFile.WriterOptions options = OrcFile.writerOptions(conf); return new OrcFileAppender(schema, file, options, metadata); } } public static ReadBuilder read(InputFile file) { return new ReadBuilder(file); } public static class ReadBuilder { private final InputFile file; private final Configuration conf; private com.netflix.iceberg.Schema schema = null; private Long start = null; private Long length = null; private ReadBuilder(InputFile file) { Preconditions.checkNotNull(file, "Input file cannot be null"); this.file = file; if (file instanceof HadoopInputFile) { conf = new Configuration(((HadoopInputFile) file).getConf()); } else { conf = new Configuration(); } } /** * Restricts the read to the given range: [start, start + length). * * @param start the start position for this read * @param length the length of the range this read should scan * @return this builder for method chaining */ public ReadBuilder split(long start, long length) { this.start = start; this.length = length; return this; } public ReadBuilder schema(com.netflix.iceberg.Schema schema) { this.schema = schema; return this; } public ReadBuilder config(String property, String value) { conf.set(property, value); return this; } public OrcIterator build() { Preconditions.checkNotNull(schema, "Schema is required"); try { Path path = new Path(file.location()); Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf)); ColumnIdMap columnIds = new ColumnIdMap(); TypeDescription orcSchema = TypeConversion.toOrc(schema, columnIds); Reader.Options options = reader.options(); if (start != null) { options.range(start, length); } options.schema(orcSchema); return new OrcIterator(path, orcSchema, reader.rows(options)); } catch (IOException e) { throw new RuntimeException("Can't open " + file.location(), e); } } } }
2,150
0
Create_ds/iceberg/parquet/src/test/java/com/netflix
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg/TestHelpers.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import org.junit.Assert; import java.util.concurrent.Callable; public class TestHelpers { /** * A convenience method to avoid a large number of @Test(expected=...) tests * @param message A String message to describe this assertion * @param expected An Exception class that the Runnable should throw * @param containedInMessage A String that should be contained by the thrown * exception's message * @param callable A Callable that is expected to throw the exception */ public static void assertThrows(String message, Class<? extends Exception> expected, String containedInMessage, Callable callable) { try { callable.call(); Assert.fail("No exception was thrown (" + message + "), expected: " + expected.getName()); } catch (Exception actual) { handleException(message, expected, containedInMessage, actual); } } /** * A convenience method to avoid a large number of @Test(expected=...) tests * @param message A String message to describe this assertion * @param expected An Exception class that the Runnable should throw * @param containedInMessage A String that should be contained by the thrown * exception's message * @param runnable A Runnable that is expected to throw the runtime exception */ public static void assertThrows(String message, Class<? extends Exception> expected, String containedInMessage, Runnable runnable) { try { runnable.run(); Assert.fail("No exception was thrown (" + message + "), expected: " + expected.getName()); } catch (Exception actual) { handleException(message, expected, containedInMessage, actual); } } private static void handleException(String message, Class<? extends Exception> expected, String containedInMessage, Exception actual) { try { Assert.assertEquals(message, expected, actual.getClass()); Assert.assertTrue( "Expected exception message (" + containedInMessage + ") missing: " + actual.getMessage(), actual.getMessage().contains(containedInMessage) ); } catch (AssertionError e) { e.addSuppressed(actual); throw e; } } }
2,151
0
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg/avro/TestParquetReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.avro; import com.google.common.collect.Iterables; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import org.apache.avro.generic.GenericData; import java.io.File; import java.io.IOException; public class TestParquetReadProjection extends TestReadProjection { protected GenericData.Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, GenericData.Record record) throws IOException { File file = temp.newFile(desc + ".parquet"); file.delete(); try (FileAppender<GenericData.Record> appender = Parquet.write(Files.localOutput(file)) .schema(writeSchema) .build()) { appender.add(record); } Iterable<GenericData.Record> records = Parquet.read(Files.localInput(file)) .project(readSchema) .callInit() .build(); return Iterables.getOnlyElement(records); } }
2,152
0
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg/avro/TestReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.avro; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Comparators; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Record; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.IOException; import java.util.List; import java.util.Map; public abstract class TestReadProjection { protected abstract Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException; @Rule public TemporaryFolder temp = new TemporaryFolder(); @Test public void testFullProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Record projected = writeAndRead("full_projection", schema, schema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("data")); Assert.assertTrue("Should contain the correct data value", cmp == 0); } @Test public void testReorderedFullProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("full_projection", schema, reordered, record); Assert.assertEquals("Should contain the correct 0 value", "test", projected.get(0).toString()); Assert.assertEquals("Should contain the correct 1 value", 34L, projected.get(1)); } @Test public void testReorderedProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(2, "missing_1", Types.StringType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.optional(3, "missing_2", Types.LongType.get()) ); Record projected = writeAndRead("full_projection", schema, reordered, record); Assert.assertNull("Should contain the correct 0 value", projected.get(0)); Assert.assertEquals("Should contain the correct 1 value", "test", projected.get(1).toString()); Assert.assertNull("Should contain the correct 2 value", projected.get(2)); } @Test public void testEmptyProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Record projected = writeAndRead("empty_projection", schema, schema.select(), record); Assert.assertNotNull("Should read a non-null record", projected); try { projected.get(0); Assert.fail("Should not retrieve value with ordinal 0"); } catch (ArrayIndexOutOfBoundsException e) { // this is expected because there are no values } } @Test public void testBasicProjection() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("data", "test"); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("basic_projection_id", writeSchema, idOnly, record); Assert.assertNull("Should not project data", projected.get("data")); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Schema dataOnly = new Schema( Types.NestedField.optional(1, "data", Types.StringType.get()) ); projected = writeAndRead("basic_projection_data", writeSchema, dataOnly, record); Assert.assertNull("Should not project id", projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("data")); Assert.assertTrue("Should contain the correct data value", cmp == 0); } @Test public void testRename() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("data", "test"); Schema readSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "renamed", Types.StringType.get()) ); Record projected = writeAndRead("project_and_rename", writeSchema, readSchema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("renamed")); Assert.assertTrue("Should contain the correct data/renamed value", cmp == 0); } @Test public void testNestedStructProjection() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()), Types.NestedField.required(2, "long", Types.FloatType.get()) )) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); Record location = new Record( AvroSchemaUtil.fromOption(record.getSchema().getField("location").schema())); location.put("lat", 52.995143f); location.put("long", -1.539054f); record.put("location", location); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Record projectedLocation = (Record) projected.get("location"); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project location", projectedLocation); Schema latOnly = new Schema( Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()) )) ); projected = writeAndRead("latitude_only", writeSchema, latOnly, record); projectedLocation = (Record) projected.get("location"); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project location", projected.get("location")); Assert.assertNull("Should not project longitude", projectedLocation.get("long")); Assert.assertEquals("Should project latitude", 52.995143f, (float) projectedLocation.get("lat"), 0.000001f); Schema longOnly = new Schema( Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(2, "long", Types.FloatType.get()) )) ); projected = writeAndRead("longitude_only", writeSchema, longOnly, record); projectedLocation = (Record) projected.get("location"); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project location", projected.get("location")); Assert.assertNull("Should not project latitutde", projectedLocation.get("lat")); Assert.assertEquals("Should project longitude", -1.539054f, (float) projectedLocation.get("long"), 0.000001f); Schema locationOnly = writeSchema.select("location"); projected = writeAndRead("location_only", writeSchema, locationOnly, record); projectedLocation = (Record) projected.get("location"); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project location", projected.get("location")); Assert.assertEquals("Should project latitude", 52.995143f, (float) projectedLocation.get("lat"), 0.000001f); Assert.assertEquals("Should project longitude", -1.539054f, (float) projectedLocation.get("long"), 0.000001f); } @Test public void testMapProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(5, "properties", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StringType.get())) ); Map<String, String> properties = ImmutableMap.of("a", "A", "b", "B"); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("properties", properties); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project properties map", projected.get("properties")); Schema keyOnly = writeSchema.select("properties.key"); projected = writeAndRead("key_only", writeSchema, keyOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.get("properties"))); Schema valueOnly = writeSchema.select("properties.value"); projected = writeAndRead("value_only", writeSchema, valueOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.get("properties"))); Schema mapOnly = writeSchema.select("properties"); projected = writeAndRead("map_only", writeSchema, mapOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.get("properties"))); } private Map<String, ?> toStringMap(Map<?, ?> map) { Map<String, Object> stringMap = Maps.newHashMap(); for (Map.Entry<?, ?> entry : map.entrySet()) { if (entry.getValue() instanceof CharSequence) { stringMap.put(entry.getKey().toString(), entry.getValue().toString()); } else { stringMap.put(entry.getKey().toString(), entry.getValue()); } } return stringMap; } @Test public void testMapOfStructsProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()), Types.NestedField.required(2, "long", Types.FloatType.get()) ) )) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); Record l1 = new Record(AvroSchemaUtil.fromOption( AvroSchemaUtil.fromOption(record.getSchema().getField("locations").schema()) .getValueType())); l1.put("lat", 53.992811f); l1.put("long", -1.542616f); Record l2 = new Record(l1.getSchema()); l2.put("lat", 52.995143f); l2.put("long", -1.539054f); record.put("locations", ImmutableMap.of("L1", l1, "L2", l2)); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project locations map", projected.get("locations")); projected = writeAndRead("all_locations", writeSchema, writeSchema.select("locations"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project locations map", record.get("locations"), toStringMap((Map) projected.get("locations"))); projected = writeAndRead("lat_only", writeSchema, writeSchema.select("locations.lat"), record); Assert.assertNull("Should not project id", projected.get("id")); Map<String, ?> locations = toStringMap((Map) projected.get("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); Record projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertEquals("L1 should contain lat", 53.992811f, (float) projectedL1.get("lat"), 0.000001); Assert.assertNull("L1 should not contain long", projectedL1.get("long")); Record projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertEquals("L2 should contain lat", 52.995143f, (float) projectedL2.get("lat"), 0.000001); Assert.assertNull("L2 should not contain long", projectedL2.get("long")); projected = writeAndRead("long_only", writeSchema, writeSchema.select("locations.long"), record); Assert.assertNull("Should not project id", projected.get("id")); locations = toStringMap((Map) projected.get("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertNull("L1 should not contain lat", projectedL1.get("lat")); Assert.assertEquals("L1 should contain long", -1.542616f, (float) projectedL1.get("long"), 0.000001); projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertNull("L2 should not contain lat", projectedL2.get("lat")); Assert.assertEquals("L2 should contain long", -1.539054f, (float) projectedL2.get("long"), 0.000001); Schema latitiudeRenamed = new Schema( Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StructType.of( Types.NestedField.required(1, "latitude", Types.FloatType.get()) ) )) ); projected = writeAndRead("latitude_renamed", writeSchema, latitiudeRenamed, record); Assert.assertNull("Should not project id", projected.get("id")); locations = toStringMap((Map) projected.get("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertEquals("L1 should contain latitude", 53.992811f, (float) projectedL1.get("latitude"), 0.000001); Assert.assertNull("L1 should not contain lat", projectedL1.get("lat")); Assert.assertNull("L1 should not contain long", projectedL1.get("long")); projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertEquals("L2 should contain latitude", 52.995143f, (float) projectedL2.get("latitude"), 0.000001); Assert.assertNull("L2 should not contain lat", projectedL2.get("lat")); Assert.assertNull("L2 should not contain long", projectedL2.get("long")); } @Test public void testListProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(10, "values", Types.ListType.ofOptional(11, Types.LongType.get())) ); List<Long> values = ImmutableList.of(56L, 57L, 58L); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("values", values); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project values list", projected.get("values")); Schema elementOnly = writeSchema.select("values.element"); projected = writeAndRead("element_only", writeSchema, elementOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire list", values, projected.get("values")); Schema listOnly = writeSchema.select("values"); projected = writeAndRead("list_only", writeSchema, listOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire list", values, projected.get("values")); } @Test @SuppressWarnings("unchecked") public void testListOfStructsProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.required(19, "x", Types.IntegerType.get()), Types.NestedField.optional(18, "y", Types.IntegerType.get()) )) ) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); Record p1 = new Record(AvroSchemaUtil.fromOption( AvroSchemaUtil.fromOption(record.getSchema().getField("points").schema()) .getElementType())); p1.put("x", 1); p1.put("y", 2); Record p2 = new Record(p1.getSchema()); p2.put("x", 3); p2.put("y", null); record.put("points", ImmutableList.of(p1, p2)); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project points list", projected.get("points")); projected = writeAndRead("all_points", writeSchema, writeSchema.select("points"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project points list", record.get("points"), projected.get("points")); projected = writeAndRead("x_only", writeSchema, writeSchema.select("points.x"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); List<Record> points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); Record projectedP1 = points.get(0); Assert.assertEquals("Should project x", 1, (int) projectedP1.get("x")); Assert.assertNull("Should not project y", projectedP1.get("y")); Record projectedP2 = points.get(1); Assert.assertEquals("Should project x", 3, (int) projectedP2.get("x")); Assert.assertNull("Should not project y", projectedP2.get("y")); projected = writeAndRead("y_only", writeSchema, writeSchema.select("points.y"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertNull("Should not project x", projectedP1.get("x")); Assert.assertEquals("Should project y", 2, (int) projectedP1.get("y")); projectedP2 = points.get(1); Assert.assertNull("Should not project x", projectedP2.get("x")); Assert.assertEquals("Should project null y", null, projectedP2.get("y")); Schema yRenamed = new Schema( Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.optional(18, "z", Types.IntegerType.get()) )) ) ); projected = writeAndRead("y_renamed", writeSchema, yRenamed, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertNull("Should not project x", projectedP1.get("x")); Assert.assertNull("Should not project y", projectedP1.get("y")); Assert.assertEquals("Should project z", 2, (int) projectedP1.get("z")); projectedP2 = points.get(1); Assert.assertNull("Should not project x", projectedP2.get("x")); Assert.assertNull("Should not project y", projectedP2.get("y")); Assert.assertEquals("Should project null z", null, projectedP2.get("z")); } }
2,153
0
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg/parquet/TestMetricsRowGroupFilter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.TestHelpers; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import com.netflix.iceberg.types.Types.FloatType; import com.netflix.iceberg.types.Types.IntegerType; import com.netflix.iceberg.types.Types.LongType; import com.netflix.iceberg.types.Types.StringType; import org.apache.avro.generic.GenericData.Record; import org.apache.avro.generic.GenericRecordBuilder; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.schema.MessageType; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import java.io.File; import java.io.IOException; import java.util.UUID; import static com.netflix.iceberg.avro.AvroSchemaUtil.convert; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.greaterThan; import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.isNull; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.notEqual; import static com.netflix.iceberg.expressions.Expressions.notNull; import static com.netflix.iceberg.expressions.Expressions.or; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestMetricsRowGroupFilter { private static final Schema SCHEMA = new Schema( required(1, "id", IntegerType.get()), optional(2, "no_stats", StringType.get()), required(3, "required", StringType.get()), optional(4, "all_nulls", LongType.get()), optional(5, "some_nulls", StringType.get()), optional(6, "no_nulls", StringType.get()), optional(7, "not_in_file", FloatType.get()) ); private static final Schema FILE_SCHEMA = new Schema( required(1, "_id", IntegerType.get()), optional(2, "_no_stats", StringType.get()), required(3, "_required", StringType.get()), optional(4, "_all_nulls", LongType.get()), optional(5, "_some_nulls", StringType.get()), optional(6, "_no_nulls", StringType.get()) ); private static final String TOO_LONG_FOR_STATS; static { StringBuilder sb = new StringBuilder(); for (int i = 0; i < 200; i += 1) { sb.append(UUID.randomUUID().toString()); } TOO_LONG_FOR_STATS = sb.toString(); } private static final File PARQUET_FILE = new File("/tmp/stats-row-group-filter-test.parquet"); private static MessageType PARQUET_SCHEMA = null; private static BlockMetaData ROW_GROUP_METADATA = null; @BeforeClass public static void createInputFile() throws IOException { if (PARQUET_FILE.exists()) { Assert.assertTrue(PARQUET_FILE.delete()); } OutputFile outFile = Files.localOutput(PARQUET_FILE); try (FileAppender<Record> appender = Parquet.write(outFile) .schema(FILE_SCHEMA) .build()) { GenericRecordBuilder builder = new GenericRecordBuilder(convert(FILE_SCHEMA, "table")); // create 50 records for (int i = 0; i < 50; i += 1) { builder.set("_id", 30 + i); // min=30, max=79, num-nulls=0 builder.set("_no_stats", TOO_LONG_FOR_STATS); // value longer than 4k will produce no stats builder.set("_required", "req"); // required, always non-null builder.set("_all_nulls", null); // never non-null builder.set("_some_nulls", (i % 10 == 0) ? null : "some"); // includes some null values builder.set("_no_nulls", ""); // optional, but always non-null appender.add(builder.build()); } } InputFile inFile = Files.localInput(PARQUET_FILE); try (ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(inFile))) { Assert.assertEquals("Should create only one row group", 1, reader.getRowGroups().size()); ROW_GROUP_METADATA = reader.getRowGroups().get(0); PARQUET_SCHEMA = reader.getFileMetaData().getSchema(); } PARQUET_FILE.deleteOnExit(); } @Test public void testAllNulls() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notNull("all_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: no non-null value in all null column", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notNull("some_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: column with some nulls contains a non-null value", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notNull("no_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: non-null column contains a non-null value", shouldRead); } @Test public void testNoNulls() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, isNull("all_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: at least one null value in all null column", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, isNull("some_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: column with some nulls contains a null value", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, isNull("no_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: non-null column contains no null values", shouldRead); } @Test public void testRequiredColumn() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notNull("required")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: required columns are always non-null", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, isNull("required")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: required columns are always non-null", shouldRead); } @Test public void testMissingColumn() { TestHelpers.assertThrows("Should complain about missing column in expression", ValidationException.class, "Cannot find field 'missing'", () -> new ParquetMetricsRowGroupFilter(SCHEMA, lessThan("missing", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA)); } @Test public void testColumnNotInFile() { Expression[] cannotMatch = new Expression[] { lessThan("not_in_file", 1.0f), lessThanOrEqual("not_in_file", 1.0f), equal("not_in_file", 1.0f), greaterThan("not_in_file", 1.0f), greaterThanOrEqual("not_in_file", 1.0f), notNull("not_in_file") }; for (Expression expr : cannotMatch) { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, expr) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip when column is not in file (all nulls): " + expr, shouldRead); } Expression[] canMatch = new Expression[] { isNull("not_in_file"), notEqual("not_in_file", 1.0f) }; for (Expression expr : canMatch) { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, expr) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read when column is not in file (all nulls): " + expr, shouldRead); } } @Test public void testMissingStats() { Expression[] exprs = new Expression[] { lessThan("no_stats", "a"), lessThanOrEqual("no_stats", "b"), equal("no_stats", "c"), greaterThan("no_stats", "d"), greaterThanOrEqual("no_stats", "e"), notEqual("no_stats", "f"), isNull("no_stats"), notNull("no_stats") }; for (Expression expr : exprs) { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, expr) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read when missing stats for expr: " + expr, shouldRead); } } @Test public void testZeroRecordFile() { BlockMetaData emptyBlock = new BlockMetaData(); emptyBlock.setRowCount(0); Expression[] exprs = new Expression[] { lessThan("id", 5), lessThanOrEqual("id", 30), equal("id", 70), greaterThan("id", 78), greaterThanOrEqual("id", 90), notEqual("id", 101), isNull("some_nulls"), notNull("some_nulls") }; for (Expression expr : exprs) { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, expr) .shouldRead(PARQUET_SCHEMA, emptyBlock); Assert.assertFalse("Should never read 0-record file: " + expr, shouldRead); } } @Test public void testNot() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(lessThan("id", 5))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: not(false)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(greaterThan("id", 5))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: not(true)", shouldRead); } @Test public void testAnd() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, and(lessThan("id", 5), greaterThanOrEqual("id", 0))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: and(false, false)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, and(greaterThan("id", 5), lessThanOrEqual("id", 30))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: and(true, true)", shouldRead); } @Test public void testOr() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 80))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: or(false, false)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 60))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: or(false, true)", shouldRead); } @Test public void testIntegerLt() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThan("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThan("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range below lower bound (30 is not < 30)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThan("id", 31)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThan("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerLtEq() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThanOrEqual("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThanOrEqual("id", 29)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range below lower bound (29 < 30)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThanOrEqual("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, lessThanOrEqual("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: many possible ids", shouldRead); } @Test public void testIntegerGt() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThan("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThan("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range above upper bound (79 is not > 79)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThan("id", 78)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThan("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerGtEq() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 80)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id range above upper bound (80 > 79)", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerEq() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id below lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 29)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id below lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 80)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id above upper bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should not read: id above upper bound", shouldRead); } @Test public void testIntegerNotEq() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 29)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 80)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id above upper bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, notEqual("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id above upper bound", shouldRead); } @Test public void testIntegerNotEqRewritten() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 5))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 29))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 30))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 75))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 79))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 80))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id above upper bound", shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, not(equal("id", 85))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: id above upper bound", shouldRead); } }
2,154
0
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg/parquet/TestDictionaryRowGroupFilter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.TestHelpers; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import com.netflix.iceberg.types.Types.FloatType; import com.netflix.iceberg.types.Types.IntegerType; import com.netflix.iceberg.types.Types.LongType; import com.netflix.iceberg.types.Types.StringType; import org.apache.avro.generic.GenericData.Record; import org.apache.avro.generic.GenericRecordBuilder; import org.apache.parquet.column.page.DictionaryPageReadStore; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.schema.MessageType; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import java.io.File; import java.io.IOException; import java.util.UUID; import static com.netflix.iceberg.avro.AvroSchemaUtil.convert; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.greaterThan; import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.isNull; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.notEqual; import static com.netflix.iceberg.expressions.Expressions.notNull; import static com.netflix.iceberg.expressions.Expressions.or; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestDictionaryRowGroupFilter { private static final Schema SCHEMA = new Schema( required(1, "id", IntegerType.get()), optional(2, "no_stats", StringType.get()), required(3, "required", StringType.get()), optional(4, "all_nulls", LongType.get()), optional(5, "some_nulls", StringType.get()), optional(6, "no_nulls", StringType.get()), optional(7, "non_dict", StringType.get()), optional(8, "not_in_file", FloatType.get()) ); private static final Schema FILE_SCHEMA = new Schema( required(1, "_id", IntegerType.get()), optional(2, "_no_stats", StringType.get()), required(3, "_required", StringType.get()), optional(4, "_all_nulls", LongType.get()), optional(5, "_some_nulls", StringType.get()), optional(6, "_no_nulls", StringType.get()), optional(7, "_non_dict", StringType.get()) ); private static final String TOO_LONG_FOR_STATS; static { StringBuilder sb = new StringBuilder(); for (int i = 0; i < 200; i += 1) { sb.append(UUID.randomUUID().toString()); } TOO_LONG_FOR_STATS = sb.toString(); } private static final File PARQUET_FILE = new File("/tmp/stats-row-group-filter-test.parquet"); private static MessageType PARQUET_SCHEMA = null; private static BlockMetaData ROW_GROUP_METADATA = null; private static DictionaryPageReadStore DICTIONARY_STORE = null; @BeforeClass public static void createInputFile() throws IOException { if (PARQUET_FILE.exists()) { Assert.assertTrue(PARQUET_FILE.delete()); } OutputFile outFile = Files.localOutput(PARQUET_FILE); try (FileAppender<Record> appender = Parquet.write(outFile) .schema(FILE_SCHEMA) .build()) { GenericRecordBuilder builder = new GenericRecordBuilder(convert(FILE_SCHEMA, "table")); // create 20 copies of each record to ensure dictionary-encoding for (int copy = 0; copy < 20; copy += 1) { // create 50 records for (int i = 0; i < 50; i += 1) { builder.set("_id", 30 + i); // min=30, max=79, num-nulls=0 builder.set("_no_stats", TOO_LONG_FOR_STATS); // value longer than 4k will produce no stats builder.set("_required", "req"); // required, always non-null builder.set("_all_nulls", null); // never non-null builder.set("_some_nulls", (i % 10 == 0) ? null : "some"); // includes some null values builder.set("_no_nulls", ""); // optional, but always non-null builder.set("_non_dict", UUID.randomUUID().toString()); // not dictionary-encoded appender.add(builder.build()); } } } InputFile inFile = Files.localInput(PARQUET_FILE); ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(inFile)); Assert.assertEquals("Should create only one row group", 1, reader.getRowGroups().size()); ROW_GROUP_METADATA = reader.getRowGroups().get(0); PARQUET_SCHEMA = reader.getFileMetaData().getSchema(); DICTIONARY_STORE = reader.getNextDictionaryReader(); PARQUET_FILE.deleteOnExit(); } @Test public void testAllNulls() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("all_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("some_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("no_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); } @Test public void testNoNulls() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("all_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("some_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("no_nulls")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary filter doesn't help", shouldRead); } @Test public void testRequiredColumn() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notNull("required")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: required columns are always non-null", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, isNull("required")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should skip: required columns are always non-null", shouldRead); } @Test public void testMissingColumn() { TestHelpers.assertThrows("Should complain about missing column in expression", ValidationException.class, "Cannot find field 'missing'", () -> new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("missing", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE)); } @Test public void testColumnNotInFile() { Expression[] exprs = new Expression[] { lessThan("not_in_file", 1.0f), lessThanOrEqual("not_in_file", 1.0f), equal("not_in_file", 1.0f), greaterThan("not_in_file", 1.0f), greaterThanOrEqual("not_in_file", 1.0f), notNull("not_in_file"), isNull("not_in_file"), notEqual("not_in_file", 1.0f) }; for (Expression expr : exprs) { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, expr) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary cannot be found: " + expr, shouldRead); } } @Test public void testColumnFallbackOrNotDictionaryEncoded() { Expression[] exprs = new Expression[] { lessThan("non_dict", "a"), lessThanOrEqual("non_dict", "a"), equal("non_dict", "a"), greaterThan("non_dict", "a"), greaterThanOrEqual("non_dict", "a"), notNull("non_dict"), isNull("non_dict"), notEqual("non_dict", "a") }; for (Expression expr : exprs) { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, expr) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: dictionary cannot be found: " + expr, shouldRead); } } @Test public void testMissingStats() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("no_stats", "a")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should skip: stats are missing but dictionary is present", shouldRead); } @Test public void testNot() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(lessThan("id", 5))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: not(false)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(greaterThan("id", 5))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should skip: not(true)", shouldRead); } @Test public void testAnd() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, and(lessThan("id", 5), greaterThanOrEqual("id", 0))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should skip: and(false, false)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, and(greaterThan("id", 5), lessThanOrEqual("id", 30))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: and(true, true)", shouldRead); } @Test public void testOr() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 80))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should skip: or(false, false)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 60))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: or(false, true)", shouldRead); } @Test public void testIntegerLt() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range below lower bound (30 is not < 30)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("id", 31)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThan("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerLtEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThanOrEqual("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThanOrEqual("id", 29)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range below lower bound (29 < 30)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThanOrEqual("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, lessThanOrEqual("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: many possible ids", shouldRead); } @Test public void testIntegerGt() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThan("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThan("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range above upper bound (79 is not > 79)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThan("id", 78)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThan("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerGtEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 80)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id range above upper bound (80 > 79)", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, greaterThanOrEqual("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id below lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 29)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id below lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 80)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id above upper bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, equal("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should not read: id above upper bound", shouldRead); } @Test public void testIntegerNotEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 5)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 29)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 30)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 75)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 79)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 80)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id above upper bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("id", 85)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id above upper bound", shouldRead); } @Test public void testIntegerNotEqRewritten() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 5))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 29))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 30))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 75))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 79))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 80))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id above upper bound", shouldRead); shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, not(equal("id", 85))) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertTrue("Should read: id above upper bound", shouldRead); } @Test public void testStringNotEq() { boolean shouldRead = new ParquetDictionaryRowGroupFilter(SCHEMA, notEqual("some_nulls", "some")) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA, DICTIONARY_STORE); Assert.assertFalse("Should skip: all values are 'some'", shouldRead); } }
2,155
0
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/test/java/com/netflix/iceberg/parquet/TestMetricsRowGroupFilterTypes.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.BinaryType; import com.netflix.iceberg.types.Types.BooleanType; import com.netflix.iceberg.types.Types.DateType; import com.netflix.iceberg.types.Types.DoubleType; import com.netflix.iceberg.types.Types.FixedType; import com.netflix.iceberg.types.Types.FloatType; import com.netflix.iceberg.types.Types.IntegerType; import com.netflix.iceberg.types.Types.LongType; import com.netflix.iceberg.types.Types.StringType; import com.netflix.iceberg.types.Types.TimeType; import com.netflix.iceberg.types.Types.TimestampType; import com.netflix.iceberg.types.Types.UUIDType; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericFixed; import org.apache.avro.generic.GenericRecordBuilder; import org.apache.commons.io.Charsets; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.schema.MessageType; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.UUID; import static com.netflix.iceberg.avro.AvroSchemaUtil.convert; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.types.Types.NestedField.optional; @RunWith(Parameterized.class) public class TestMetricsRowGroupFilterTypes { private static final Schema SCHEMA = new Schema( optional(1, "boolean", BooleanType.get()), optional(2, "int", IntegerType.get()), optional(3, "long", LongType.get()), optional(4, "float", FloatType.get()), optional(5, "double", DoubleType.get()), optional(6, "date", DateType.get()), optional(7, "time", TimeType.get()), optional(8, "timestamp", TimestampType.withoutZone()), optional(9, "timestamptz", TimestampType.withZone()), optional(10, "string", StringType.get()), optional(11, "uuid", UUIDType.get()), optional(12, "fixed", FixedType.ofLength(4)), optional(13, "binary", BinaryType.get()), optional(14, "int_decimal", Types.DecimalType.of(8, 2)), optional(15, "long_decimal", Types.DecimalType.of(14, 2)), optional(16, "fixed_decimal", Types.DecimalType.of(31, 2)) ); private static final Schema FILE_SCHEMA = new Schema( optional(1, "_boolean", BooleanType.get()), optional(2, "_int", IntegerType.get()), optional(3, "_long", LongType.get()), optional(4, "_float", FloatType.get()), optional(5, "_double", DoubleType.get()), optional(6, "_date", DateType.get()), optional(7, "_time", TimeType.get()), optional(8, "_timestamp", TimestampType.withoutZone()), optional(9, "_timestamptz", TimestampType.withZone()), optional(10, "_string", StringType.get()), optional(11, "_uuid", UUIDType.get()), optional(12, "_fixed", FixedType.ofLength(4)), optional(13, "_binary", BinaryType.get()), optional(14, "_int_decimal", Types.DecimalType.of(8, 2)), optional(15, "_long_decimal", Types.DecimalType.of(14, 2)), optional(16, "_fixed_decimal", Types.DecimalType.of(31, 2)) ); private static final File PARQUET_FILE = new File("/tmp/stats-row-group-filter-types-test.parquet"); private static MessageType PARQUET_SCHEMA = null; private static BlockMetaData ROW_GROUP_METADATA = null; private static final UUID uuid = UUID.randomUUID(); private static final Integer date = (Integer) Literal.of("2018-06-29").to(DateType.get()).value(); private static final Long time = (Long) Literal.of("10:02:34.000000").to(TimeType.get()).value(); private static final Long timestamp = (Long) Literal.of("2018-06-29T10:02:34.000000") .to(TimestampType.withoutZone()).value(); private static final GenericFixed fixed = new GenericData.Fixed( org.apache.avro.Schema.createFixed("_fixed", null, null, 4), "abcd".getBytes(Charsets.UTF_8)); @BeforeClass public static void createInputFile() throws IOException { if (PARQUET_FILE.exists()) { Assert.assertTrue(PARQUET_FILE.delete()); } OutputFile outFile = Files.localOutput(PARQUET_FILE); try (FileAppender<GenericData.Record> appender = Parquet.write(outFile) .schema(FILE_SCHEMA) .build()) { GenericRecordBuilder builder = new GenericRecordBuilder(convert(FILE_SCHEMA, "table")); // create 50 records for (int i = 0; i < 50; i += 1) { builder.set("_boolean", false); builder.set("_int", i); builder.set("_long", 5_000_000_000L + i); builder.set("_float", ((float) (100 - i)) / 100F + 1.0F); // 2.0f, 1.99f, 1.98f, ... builder.set("_double", ((double) i) / 100.0D + 2.0D); // 2.0d, 2.01d, 2.02d, ... builder.set("_date", date); builder.set("_time", time); builder.set("_timestamp", timestamp); builder.set("_timestamptz", timestamp); builder.set("_string", "tapir"); builder.set("_uuid", uuid); builder.set("_fixed", fixed); builder.set("_binary", ByteBuffer.wrap("xyz".getBytes(Charsets.UTF_8))); builder.set("_int_decimal", new BigDecimal("77.77")); builder.set("_long_decimal", new BigDecimal("88.88")); builder.set("_fixed_decimal", new BigDecimal("99.99")); appender.add(builder.build()); } } InputFile inFile = Files.localInput(PARQUET_FILE); try (ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(inFile))) { Assert.assertEquals("Should create only one row group", 1, reader.getRowGroups().size()); ROW_GROUP_METADATA = reader.getRowGroups().get(0); PARQUET_SCHEMA = reader.getFileMetaData().getSchema(); } PARQUET_FILE.deleteOnExit(); } private final String column; private final Object readValue; private final Object skipValue; @Parameterized.Parameters public static Object[][] parameters() { return new Object[][] { new Object[] { "boolean", false, true }, new Object[] { "int", 5, 55 }, new Object[] { "long", 5_000_000_049L, 5_000L }, new Object[] { "float", 1.97f, 2.11f }, new Object[] { "double", 2.11d, 1.97d }, new Object[] { "date", "2018-06-29", "2018-05-03" }, new Object[] { "time", "10:02:34.000000", "10:02:34.000001" }, new Object[] { "timestamp", "2018-06-29T10:02:34.000000", "2018-06-29T15:02:34.000000" }, new Object[] { "timestamptz", "2018-06-29T10:02:34.000000+00:00", "2018-06-29T10:02:34.000000-07:00" }, new Object[] { "string", "tapir", "monthly" }, // new Object[] { "uuid", uuid, UUID.randomUUID() }, // not supported yet new Object[] { "fixed", "abcd".getBytes(Charsets.UTF_8), new byte[] { 0, 1, 2, 3 } }, new Object[] { "binary", "xyz".getBytes(Charsets.UTF_8), new byte[] { 0, 1, 2, 3, 4, 5 } }, new Object[] { "int_decimal", "77.77", "12.34" }, new Object[] { "long_decimal", "88.88", "12.34" }, new Object[] { "fixed_decimal", "99.99", "12.34" }, }; } public TestMetricsRowGroupFilterTypes(String column, Object readValue, Object skipValue) { this.column = column; this.readValue = readValue; this.skipValue = skipValue; } @Test public void testEq() { boolean shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal(column, readValue)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertTrue("Should read: value is in the row group: " + readValue, shouldRead); shouldRead = new ParquetMetricsRowGroupFilter(SCHEMA, equal(column, skipValue)) .shouldRead(PARQUET_SCHEMA, ROW_GROUP_METADATA); Assert.assertFalse("Should skip: value is not in the row group: " + skipValue, shouldRead); } }
2,156
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ColumnWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.ColumnWriteStore; import org.apache.parquet.io.api.Binary; public abstract class ColumnWriter<T> implements TripleWriter<T> { @SuppressWarnings("unchecked") static <T> ColumnWriter<T> newWriter(ColumnDescriptor desc) { switch (desc.getType()) { case BOOLEAN: return (ColumnWriter<T>) new ColumnWriter<Boolean>(desc) { @Override public void write(int rl, Boolean value) { writeBoolean(rl, value); } }; case INT32: return (ColumnWriter<T>) new ColumnWriter<Integer>(desc) { @Override public void write(int rl, Integer value) { writeInteger(rl, value); } }; case INT64: return (ColumnWriter<T>) new ColumnWriter<Long>(desc) { @Override public void write(int rl, Long value) { writeLong(rl, value); } }; case FLOAT: return (ColumnWriter<T>) new ColumnWriter<Float>(desc) { @Override public void write(int rl, Float value) { writeFloat(rl, value); } }; case DOUBLE: return (ColumnWriter<T>) new ColumnWriter<Double>(desc) { @Override public void write(int rl, Double value) { writeDouble(rl, value); } }; case FIXED_LEN_BYTE_ARRAY: case BINARY: return (ColumnWriter<T>) new ColumnWriter<Binary>(desc) { @Override public void write(int rl, Binary value) { writeBinary(rl, value); } }; default: throw new UnsupportedOperationException("Unsupported primitive type: " + desc.getType()); } } private final ColumnDescriptor desc; private final int maxDefinitionLevel; private long triplesCount = 0L; private org.apache.parquet.column.ColumnWriter columnWriter = null; private ColumnWriter(ColumnDescriptor desc) { this.desc = desc; this.maxDefinitionLevel = desc.getMaxDefinitionLevel(); } public void setColumnStore(ColumnWriteStore columnStore) { this.columnWriter = columnStore.getColumnWriter(desc); } @Override public void writeBoolean(int rl, boolean value) { this.triplesCount += 1; columnWriter.write(value, rl, maxDefinitionLevel); } @Override public void writeInteger(int rl, int value) { this.triplesCount += 1; columnWriter.write(value, rl, maxDefinitionLevel); } @Override public void writeLong(int rl, long value) { this.triplesCount += 1; columnWriter.write(value, rl, maxDefinitionLevel); } @Override public void writeFloat(int rl, float value) { this.triplesCount += 1; columnWriter.write(value, rl, maxDefinitionLevel); } @Override public void writeDouble(int rl, double value) { this.triplesCount += 1; columnWriter.write(value, rl, maxDefinitionLevel); } @Override public void writeBinary(int rl, Binary value) { this.triplesCount += 1; columnWriter.write(value, rl, maxDefinitionLevel); } @Override public void writeNull(int rl, int dl) { this.triplesCount += 1; columnWriter.writeNull(rl, dl); } }
2,157
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetSchemaUtil.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.Type; import org.apache.parquet.schema.Types.MessageTypeBuilder; import java.util.Set; public class ParquetSchemaUtil { public static MessageType convert(Schema schema, String name) { return new TypeToMessageType().convert(schema, name); } public static Schema convert(MessageType parquetSchema) { MessageTypeToType converter = new MessageTypeToType(parquetSchema); return new Schema( ParquetTypeVisitor.visit(parquetSchema, converter).asNestedType().fields(), converter.getAliases()); } public static MessageType pruneColumns(MessageType fileSchema, Schema expectedSchema) { // column order must match the incoming type, so it doesn't matter that the ids are unordered Set<Integer> selectedIds = TypeUtil.getProjectedIds(expectedSchema); return (MessageType) ParquetTypeVisitor.visit(fileSchema, new PruneColumns(selectedIds)); } /** * Prunes columns from a Parquet file schema that was written without field ids. * <p> * Files that were written without field ids are read assuming that schema evolution preserved * column order. Deleting columns was not allowed. * <p> * The order of columns in the resulting Parquet schema matches the Parquet file. * * @param fileSchema schema from a Parquet file that does not have field ids. * @param expectedSchema expected schema * @return a parquet schema pruned using the expected schema */ public static MessageType pruneColumnsFallback(MessageType fileSchema, Schema expectedSchema) { Set<Integer> selectedIds = Sets.newHashSet(); for (Types.NestedField field : expectedSchema.columns()) { selectedIds.add(field.fieldId()); } MessageTypeBuilder builder = org.apache.parquet.schema.Types.buildMessage(); int ordinal = 1; for (Type type : fileSchema.getFields()) { if (selectedIds.contains(ordinal)) { builder.addField(type.withId(ordinal)); } ordinal += 1; } return builder.named(fileSchema.getName()); } public static boolean hasIds(MessageType fileSchema) { try { // Try to convert the type to Iceberg. If an ID assignment is needed, return false. ParquetTypeVisitor.visit(fileSchema, new MessageTypeToType(fileSchema) { @Override protected int nextId() { throw new IllegalStateException("Needed to assign ID"); } }); // no assignment was needed return true; } catch (IllegalStateException e) { // at least one field was missing an id. return false; } } public static MessageType addFallbackIds(MessageType fileSchema) { MessageTypeBuilder builder = org.apache.parquet.schema.Types.buildMessage(); int ordinal = 1; // ids are assigned starting at 1 for (Type type : fileSchema.getFields()) { builder.addField(type.withId(ordinal)); ordinal += 1; } return builder.named(fileSchema.getName()); } }
2,158
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetValueWriters.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.netflix.iceberg.types.TypeUtil; import org.apache.avro.util.Utf8; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.ColumnWriteStore; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.Type; import java.lang.reflect.Array; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Map; public class ParquetValueWriters { private ParquetValueWriters() { } public static <T> ParquetValueWriter<T> option(Type type, int definitionLevel, ParquetValueWriter<T> writer) { if (type.isRepetition(Type.Repetition.OPTIONAL)) { return new OptionWriter<>(definitionLevel, writer); } return writer; } public static <T> UnboxedWriter<T> unboxed(ColumnDescriptor desc) { return new UnboxedWriter<>(desc); } public static PrimitiveWriter<CharSequence> strings(ColumnDescriptor desc) { return new StringWriter(desc); } public static PrimitiveWriter<BigDecimal> decimalAsInteger(ColumnDescriptor desc, int precision, int scale) { return new IntegerDecimalWriter(desc, precision, scale); } public static PrimitiveWriter<BigDecimal> decimalAsLong(ColumnDescriptor desc, int precision, int scale) { return new LongDecimalWriter(desc, precision, scale); } public static PrimitiveWriter<BigDecimal> decimalAsFixed(ColumnDescriptor desc, int precision, int scale) { return new FixedDecimalWriter(desc, precision, scale); } public static PrimitiveWriter<ByteBuffer> byteBuffers(ColumnDescriptor desc) { return new BytesWriter(desc); } public static <E> CollectionWriter<E> collections(int dl, int rl, ParquetValueWriter<E> writer) { return new CollectionWriter<>(dl, rl, writer); } public static <K, V> MapWriter<K, V> maps(int dl, int rl, ParquetValueWriter<K> keyWriter, ParquetValueWriter<V> valueWriter) { return new MapWriter<>(dl, rl, keyWriter, valueWriter); } public abstract static class PrimitiveWriter<T> implements ParquetValueWriter<T> { private final ColumnDescriptor desc; protected final ColumnWriter<T> column; private final List<TripleWriter<?>> children; protected PrimitiveWriter(ColumnDescriptor desc) { this.desc = desc; this.column = ColumnWriter.newWriter(desc); this.children = ImmutableList.of(column); } @Override public void write(int repetitionLevel, T value) { column.write(repetitionLevel, value); } @Override public List<TripleWriter<?>> columns() { return children; } @Override public void setColumnStore(ColumnWriteStore columnStore) { this.column.setColumnStore(columnStore); } } private static class UnboxedWriter<T> extends PrimitiveWriter<T> { private UnboxedWriter(ColumnDescriptor desc) { super(desc); } public void writeBoolean(int repetitionLevel, boolean value) { column.writeBoolean(repetitionLevel, value); } public void writeInteger(int repetitionLevel, int value) { column.writeInteger(repetitionLevel, value); } public void writeLong(int repetitionLevel, long value) { column.writeLong(repetitionLevel, value); } public void writeFloat(int repetitionLevel, float value) { column.writeFloat(repetitionLevel, value); } public void writeDouble(int repetitionLevel, double value) { column.writeDouble(repetitionLevel, value); } } private static class IntegerDecimalWriter extends PrimitiveWriter<BigDecimal> { private final int precision; private final int scale; private IntegerDecimalWriter(ColumnDescriptor desc, int precision, int scale) { super(desc); this.precision = precision; this.scale = scale; } @Override public void write(int repetitionLevel, BigDecimal decimal) { Preconditions.checkArgument(decimal.scale() == scale, "Cannot write value as decimal(%s,%s), wrong scale: %s", precision, scale, decimal); Preconditions.checkArgument(decimal.precision() <= precision, "Cannot write value as decimal(%s,%s), too large: %s", precision, scale, decimal); column.writeInteger(repetitionLevel, decimal.unscaledValue().intValue()); } } private static class LongDecimalWriter extends PrimitiveWriter<BigDecimal> { private final int precision; private final int scale; private LongDecimalWriter(ColumnDescriptor desc, int precision, int scale) { super(desc); this.precision = precision; this.scale = scale; } @Override public void write(int repetitionLevel, BigDecimal decimal) { Preconditions.checkArgument(decimal.scale() == scale, "Cannot write value as decimal(%s,%s), wrong scale: %s", precision, scale, decimal); Preconditions.checkArgument(decimal.precision() <= precision, "Cannot write value as decimal(%s,%s), too large: %s", precision, scale, decimal); column.writeLong(repetitionLevel, decimal.unscaledValue().longValue()); } } private static class FixedDecimalWriter extends PrimitiveWriter<BigDecimal> { private final int precision; private final int scale; private final int length; private final ThreadLocal<byte[]> bytes; private FixedDecimalWriter(ColumnDescriptor desc, int precision, int scale) { super(desc); this.precision = precision; this.scale = scale; this.length = TypeUtil.decimalRequriedBytes(precision); this.bytes = ThreadLocal.withInitial(() -> new byte[length]); } @Override public void write(int repetitionLevel, BigDecimal decimal) { Preconditions.checkArgument(decimal.scale() == scale, "Cannot write value as decimal(%s,%s), wrong scale: %s", precision, scale, decimal); Preconditions.checkArgument(decimal.precision() <= precision, "Cannot write value as decimal(%s,%s), too large: %s", precision, scale, decimal); byte fillByte = (byte) (decimal.signum() < 0 ? 0xFF : 0x00); byte[] unscaled = decimal.unscaledValue().toByteArray(); byte[] buf = bytes.get(); int offset = length - unscaled.length; for (int i = 0; i < length; i += 1) { if (i < offset) { buf[i] = fillByte; } else { buf[i] = unscaled[i - offset]; } } column.writeBinary(repetitionLevel, Binary.fromReusedByteArray(buf)); } } private static class BytesWriter extends PrimitiveWriter<ByteBuffer> { private BytesWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, ByteBuffer buffer) { column.writeBinary(repetitionLevel, Binary.fromReusedByteBuffer(buffer)); } } private static class StringWriter extends PrimitiveWriter<CharSequence> { private StringWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, CharSequence value) { if (value instanceof Utf8) { Utf8 utf8 = (Utf8) value; column.writeBinary(repetitionLevel, Binary.fromReusedByteArray(utf8.getBytes(), 0, utf8.getByteLength())); } else { column.writeBinary(repetitionLevel, Binary.fromString(value.toString())); } } } static class OptionWriter<T> implements ParquetValueWriter<T> { private final int definitionLevel; private final ParquetValueWriter<T> writer; private final List<TripleWriter<?>> children; OptionWriter(int definitionLevel, ParquetValueWriter<T> writer) { this.definitionLevel = definitionLevel; this.writer = writer; this.children = writer.columns(); } @Override public void write(int repetitionLevel, T value) { if (value != null) { writer.write(repetitionLevel, value); } else { for (TripleWriter<?> column : children) { column.writeNull(repetitionLevel, definitionLevel - 1); } } } @Override public List<TripleWriter<?>> columns() { return children; } @Override public void setColumnStore(ColumnWriteStore columnStore) { writer.setColumnStore(columnStore); } } public abstract static class RepeatedWriter<L, E> implements ParquetValueWriter<L> { private final int definitionLevel; private final int repetitionLevel; private final ParquetValueWriter<E> writer; private final List<TripleWriter<?>> children; protected RepeatedWriter(int definitionLevel, int repetitionLevel, ParquetValueWriter<E> writer) { this.definitionLevel = definitionLevel; this.repetitionLevel = repetitionLevel; this.writer = writer; this.children = writer.columns(); } @Override public void write(int parentRepetition, L value) { Iterator<E> elements = elements(value); if (!elements.hasNext()) { // write the empty list to each column // TODO: make sure this definition level is correct for (TripleWriter<?> column : children) { column.writeNull(parentRepetition, definitionLevel - 1); } } else { boolean first = true; while (elements.hasNext()) { E element = elements.next(); int rl = repetitionLevel; if (first) { rl = parentRepetition; first = false; } writer.write(rl, element); } } } @Override public List<TripleWriter<?>> columns() { return children; } @Override public void setColumnStore(ColumnWriteStore columnStore) { writer.setColumnStore(columnStore); } protected abstract Iterator<E> elements(L value); } private static class CollectionWriter<E> extends RepeatedWriter<Collection<E>, E> { private CollectionWriter(int definitionLevel, int repetitionLevel, ParquetValueWriter<E> writer) { super(definitionLevel, repetitionLevel, writer); } @Override protected Iterator<E> elements(Collection<E> list) { return list.iterator(); } } public abstract static class RepeatedKeyValueWriter<M, K, V> implements ParquetValueWriter<M> { private final int definitionLevel; private final int repetitionLevel; private final ParquetValueWriter<K> keyWriter; private final ParquetValueWriter<V> valueWriter; private final List<TripleWriter<?>> children; protected RepeatedKeyValueWriter(int definitionLevel, int repetitionLevel, ParquetValueWriter<K> keyWriter, ParquetValueWriter<V> valueWriter) { this.definitionLevel = definitionLevel; this.repetitionLevel = repetitionLevel; this.keyWriter = keyWriter; this.valueWriter= valueWriter; this.children = ImmutableList.<TripleWriter<?>>builder() .addAll(keyWriter.columns()) .addAll(valueWriter.columns()) .build(); } @Override public void write(int parentRepetition, M value) { Iterator<Map.Entry<K, V>> pairs = pairs(value); if (!pairs.hasNext()) { // write the empty map to each column for (TripleWriter<?> column : children) { column.writeNull(parentRepetition, definitionLevel - 1); } } else { boolean first = true; while (pairs.hasNext()) { Map.Entry<K, V> pair = pairs.next(); int rl = repetitionLevel; if (first) { rl = parentRepetition; first = false; } keyWriter.write(rl, pair.getKey()); valueWriter.write(rl, pair.getValue()); } } } @Override public List<TripleWriter<?>> columns() { return children; } @Override public void setColumnStore(ColumnWriteStore columnStore) { keyWriter.setColumnStore(columnStore); valueWriter.setColumnStore(columnStore); } protected abstract Iterator<Map.Entry<K, V>> pairs(M value); } private static class MapWriter<K, V> extends RepeatedKeyValueWriter<Map<K, V>, K, V> { private MapWriter(int definitionLevel, int repetitionLevel, ParquetValueWriter<K> keyWriter, ParquetValueWriter<V> valueWriter) { super(definitionLevel, repetitionLevel, keyWriter, valueWriter); } @Override protected Iterator<Map.Entry<K, V>> pairs(Map<K, V> map) { return map.entrySet().iterator(); } } public abstract static class StructWriter<S> implements ParquetValueWriter<S> { private final ParquetValueWriter<Object>[] writers; private final List<TripleWriter<?>> children; @SuppressWarnings("unchecked") protected StructWriter(List<ParquetValueWriter<?>> writers) { this.writers = (ParquetValueWriter<Object>[]) Array.newInstance( ParquetValueWriter.class, writers.size()); ImmutableList.Builder<TripleWriter<?>> columnsBuilder = ImmutableList.builder(); for (int i = 0; i < writers.size(); i += 1) { ParquetValueWriter<?> writer = writers.get(i); this.writers[i] = (ParquetValueWriter<Object>) writer; columnsBuilder.addAll(writer.columns()); } this.children = columnsBuilder.build(); } @Override public void write(int repetitionLevel, S value) { for (int i = 0; i < writers.length; i += 1) { Object fieldValue = get(value, i); writers[i].write(repetitionLevel, fieldValue); } } @Override public List<TripleWriter<?>> columns() { return children; } @Override public void setColumnStore(ColumnWriteStore columnStore) { for (ParquetValueWriter<?> writer : writers) { writer.setColumnStore(columnStore); } } protected abstract Object get(S struct, int index); } }
2,159
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetIterable.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.io.CloseableGroup; import com.netflix.iceberg.io.CloseableIterable; import org.apache.parquet.hadoop.ParquetReader; import java.io.Closeable; import java.io.IOException; import java.util.Iterator; import java.util.NoSuchElementException; public class ParquetIterable<T> extends CloseableGroup implements CloseableIterable<T> { private final ParquetReader.Builder<T> builder; ParquetIterable(ParquetReader.Builder<T> builder) { this.builder = builder; } @Override public Iterator<T> iterator() { try { ParquetReader<T> reader = builder.build(); addCloseable(reader); return new ParquetIterator<>(reader); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to create Parquet reader"); } } private static class ParquetIterator<T> implements Iterator<T>, Closeable { private final ParquetReader<T> parquet; private boolean needsAdvance = false; private boolean hasNext = false; private T next = null; ParquetIterator(ParquetReader<T> parquet) { this.parquet = parquet; this.next = advance(); } @Override public boolean hasNext() { if (needsAdvance) { this.next = advance(); } return hasNext; } @Override public T next() { if (!hasNext()) { throw new NoSuchElementException(); } this.needsAdvance = true; return next; } private T advance() { // this must be called in hasNext because it reuses an UnsafeRow try { T next = parquet.read(); this.needsAdvance = false; this.hasNext = (next != null); return next; } catch (IOException e) { throw new RuntimeIOException(e); } } @Override public void remove() { throw new UnsupportedOperationException("Remove is not supported"); } @Override public void close() throws IOException { parquet.close(); } } }
2,160
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetAvro.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.netflix.iceberg.avro.AvroSchemaVisitor; import com.netflix.iceberg.avro.UUIDConversion; import com.netflix.iceberg.types.TypeUtil; import org.apache.avro.Conversion; import org.apache.avro.Conversions; import org.apache.avro.LogicalType; import org.apache.avro.LogicalTypes; import org.apache.avro.Schema; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericFixed; import org.apache.avro.specific.SpecificData; import java.math.BigDecimal; import java.util.List; import java.util.Map; class ParquetAvro { static Schema parquetAvroSchema(Schema avroSchema) { return AvroSchemaVisitor.visit(avroSchema, new ParquetDecimalSchemaConverter()); } static class ParquetDecimal extends LogicalType { private static final String NAME = "parquet-decimal"; private int precision; private int scale; ParquetDecimal(int precision, int scale) { super(NAME); this.precision = precision; this.scale = scale; } @Override public String getName() { return NAME; } int precision() { return precision; } int scale() { return scale; } @Override public Schema addToSchema(Schema schema) { super.addToSchema(schema); schema.addProp("precision", String.valueOf(precision)); schema.addProp("scale", String.valueOf(scale)); return schema; } @Override public void validate(Schema schema) { super.validate(schema); switch (schema.getType()) { case INT: Preconditions.checkArgument(precision <= 9, "Int cannot hold decimal precision: %s", precision); break; case LONG: Preconditions.checkArgument(precision <= 18, "Long cannot hold decimal precision: %s", precision); case FIXED: break; default: throw new IllegalArgumentException("Invalid base type for decimal: " + schema); } Preconditions.checkArgument(scale >= 0, "Scale %s cannot be negative", scale); Preconditions.checkArgument(scale <= precision, "Scale %s cannot be less than precision %s", scale, precision); } } static { LogicalTypes.register(ParquetDecimal.NAME, schema -> { int precision = Integer.parseInt(schema.getProp("precision")); int scale = Integer.parseInt(schema.getProp("scale")); return new ParquetDecimal(precision, scale); }); } private static class IntDecimalConversion extends Conversion<BigDecimal> { @Override public Class<BigDecimal> getConvertedType() { return BigDecimal.class; } @Override public String getLogicalTypeName() { return ParquetDecimal.NAME; } @Override public BigDecimal fromInt(Integer value, org.apache.avro.Schema schema, LogicalType type) { return BigDecimal.valueOf(value, ((ParquetDecimal) type).scale()); } @Override public Integer toInt(BigDecimal value, org.apache.avro.Schema schema, LogicalType type) { return value.unscaledValue().intValue(); } } private static class LongDecimalConversion extends Conversion<BigDecimal> { @Override public Class<BigDecimal> getConvertedType() { return BigDecimal.class; } @Override public String getLogicalTypeName() { return ParquetDecimal.NAME; } @Override public BigDecimal fromLong(Long value, org.apache.avro.Schema schema, LogicalType type) { return BigDecimal.valueOf(value, ((ParquetDecimal) type).scale()); } @Override public Long toLong(BigDecimal value, org.apache.avro.Schema schema, LogicalType type) { return value.unscaledValue().longValue(); } } private static class FixedDecimalConversion extends Conversions.DecimalConversion { private final LogicalType[] decimalsByScale = new LogicalType[39]; private FixedDecimalConversion() { for (int i = 0; i < decimalsByScale.length; i += 1) { decimalsByScale[i] = LogicalTypes.decimal(i, i); } } @Override public String getLogicalTypeName() { return ParquetDecimal.NAME; } @Override public BigDecimal fromFixed(GenericFixed value, Schema schema, LogicalType type) { return super.fromFixed(value, schema, decimalsByScale[((ParquetDecimal) type).scale()]); } @Override public GenericFixed toFixed(BigDecimal value, Schema schema, LogicalType type) { return super.toFixed(value, schema, decimalsByScale[((ParquetDecimal) type).scale()]); } } static GenericData DEFAULT_MODEL = new SpecificData() { private final Conversion<?> fixedDecimalConversion = new FixedDecimalConversion(); private final Conversion<?> intDecimalConversion = new IntDecimalConversion(); private final Conversion<?> longDecimalConversion = new LongDecimalConversion(); private final Conversion<?> uuidConversion = new UUIDConversion(); { addLogicalTypeConversion(fixedDecimalConversion); addLogicalTypeConversion(uuidConversion); } @Override @SuppressWarnings("unchecked") public <T> Conversion<T> getConversionByClass(Class<T> datumClass, LogicalType logicalType) { if (logicalType == null) { return null; } if (logicalType instanceof ParquetDecimal) { ParquetDecimal decimal = (ParquetDecimal) logicalType; if (decimal.precision() <= 9) { return (Conversion<T>) intDecimalConversion; } else if (decimal.precision() <= 18) { return (Conversion<T>) longDecimalConversion; } else { return (Conversion<T>) fixedDecimalConversion; } } else if ("uuid".equals(logicalType.getName())) { return (Conversion<T>) uuidConversion; } return super.getConversionByClass(datumClass, logicalType); } @Override @SuppressWarnings("unchecked") public Conversion<Object> getConversionFor(LogicalType logicalType) { if (logicalType == null) { return null; } if (logicalType instanceof LogicalTypes.Decimal) { LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; if (decimal.getPrecision() <= 9) { return (Conversion<Object>) intDecimalConversion; } else if (decimal.getPrecision() <= 18) { return (Conversion<Object>) longDecimalConversion; } else { return (Conversion<Object>) fixedDecimalConversion; } } else if ("uuid".equals(logicalType.getName())) { return (Conversion<Object>) uuidConversion; } return super.getConversionFor(logicalType); } }; private static class ParquetDecimalSchemaConverter extends AvroSchemaVisitor<Schema> { @Override public Schema record(Schema record, List<String> names, List<Schema> types) { List<Schema.Field> fields = record.getFields(); int length = fields.size(); boolean hasChange = false; if (length != types.size()) { hasChange = true; } List<Schema.Field> newFields = Lists.newArrayListWithExpectedSize(length); for (int i = 0; i < length; i += 1) { Schema.Field field = fields.get(i); Schema type = types.get(i); newFields.add(copyField(field, type)); if (field.schema() != type) { hasChange = true; } } if (hasChange) { return copyRecord(record, newFields); } return record; } @Override public Schema union(Schema union, List<Schema> options) { if (!isIdentical(union.getTypes(), options)) { return Schema.createUnion(options); } return union; } @Override public Schema array(Schema array, Schema element) { if (array.getElementType() != element) { return Schema.createArray(element); } return array; } @Override public Schema map(Schema map, Schema value) { if (map.getValueType() != value) { return Schema.createMap(value); } return map; } @Override public Schema primitive(Schema primitive) { LogicalType logicalType = primitive.getLogicalType(); if (logicalType != null && logicalType instanceof LogicalTypes.Decimal) { LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; if (decimal.getPrecision() <= 9) { return new ParquetDecimal(decimal.getPrecision(), decimal.getScale()) .addToSchema(Schema.create(Schema.Type.INT)); } else if (decimal.getPrecision() <= 18) { return new ParquetDecimal(decimal.getPrecision(), decimal.getScale()) .addToSchema(Schema.create(Schema.Type.LONG)); } else { return new ParquetDecimal(decimal.getPrecision(), decimal.getScale()) .addToSchema(Schema.createFixed(primitive.getName(), null, null, TypeUtil.decimalRequriedBytes(decimal.getPrecision()))); } } return primitive; } private boolean isIdentical(List<Schema> types, List<Schema> replacements) { if (types.size() != replacements.size()) { return false; } int length = types.size(); for (int i = 0; i < length; i += 1) { if (types.get(i) != replacements.get(i)) { return false; } } return true; } private static Schema copyRecord(Schema record, List<Schema.Field> newFields) { Schema copy = Schema.createRecord(record.getName(), record.getDoc(), record.getNamespace(), record.isError(), newFields); for (Map.Entry<String, Object> prop : record.getObjectProps().entrySet()) { copy.addProp(prop.getKey(), prop.getValue()); } return copy; } private static Schema.Field copyField(Schema.Field field, Schema newSchema) { Schema.Field copy = new Schema.Field(field.name(), newSchema, field.doc(), field.defaultVal(), field.order()); for (Map.Entry<String, Object> prop : field.getObjectProps().entrySet()) { copy.addProp(prop.getKey(), prop.getValue()); } return copy; } } }
2,161
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ColumnIterator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.Dictionary; import org.apache.parquet.column.page.DataPage; import org.apache.parquet.column.page.DictionaryPage; import org.apache.parquet.column.page.PageReader; import org.apache.parquet.io.ParquetDecodingException; import org.apache.parquet.io.api.Binary; import java.io.IOException; public abstract class ColumnIterator<T> implements TripleIterator<T> { @SuppressWarnings("unchecked") static <T> ColumnIterator<T> newIterator(ColumnDescriptor desc, String writerVersion) { switch (desc.getType()) { case BOOLEAN: return (ColumnIterator<T>) new ColumnIterator<Boolean>(desc, writerVersion) { @Override public Boolean next() { return nextBoolean(); } }; case INT32: return (ColumnIterator<T>) new ColumnIterator<Integer>(desc, writerVersion) { @Override public Integer next() { return nextInteger(); } }; case INT64: return (ColumnIterator<T>) new ColumnIterator<Long>(desc, writerVersion) { @Override public Long next() { return nextLong(); } }; case FLOAT: return (ColumnIterator<T>) new ColumnIterator<Float>(desc, writerVersion) { @Override public Float next() { return nextFloat(); } }; case DOUBLE: return (ColumnIterator<T>) new ColumnIterator<Double>(desc, writerVersion) { @Override public Double next() { return nextDouble(); } }; case FIXED_LEN_BYTE_ARRAY: case BINARY: return (ColumnIterator<T>) new ColumnIterator<Binary>(desc, writerVersion) { @Override public Binary next() { return nextBinary(); } }; default: throw new UnsupportedOperationException("Unsupported primitive type: " + desc.getType()); } } private final ColumnDescriptor desc; private final PageIterator<T> pageIterator; // state reset for each row group private PageReader pageSource = null; private long triplesCount = 0L; private long triplesRead = 0L; private long advanceNextPageCount = 0L; private ColumnIterator(ColumnDescriptor desc, String writerVersion) { this.desc = desc; this.pageIterator = PageIterator.newIterator(desc, writerVersion); } public void setPageSource(PageReader source) { this.pageSource = source; this.triplesCount = source.getTotalValueCount(); this.triplesRead = 0L; this.advanceNextPageCount = 0L; this.pageIterator.reset(); this.pageIterator.setDictionary(readDictionary(desc, pageSource)); advance(); } private void advance() { if (triplesRead >= advanceNextPageCount) { while (!pageIterator.hasNext()) { DataPage page = pageSource.readPage(); if (page != null) { pageIterator.setPage(page); this.advanceNextPageCount += pageIterator.currentPageCount(); } else { return; } } } } @Override public boolean hasNext() { return triplesRead < triplesCount; } @Override public int currentDefinitionLevel() { advance(); return pageIterator.currentDefinitionLevel(); } @Override public int currentRepetitionLevel() { advance(); return pageIterator.currentRepetitionLevel(); } @Override public boolean nextBoolean() { this.triplesRead += 1; advance(); return pageIterator.nextBoolean(); } @Override public int nextInteger() { this.triplesRead += 1; advance(); return pageIterator.nextInteger(); } @Override public long nextLong() { this.triplesRead += 1; advance(); return pageIterator.nextLong(); } @Override public float nextFloat() { this.triplesRead += 1; advance(); return pageIterator.nextFloat(); } @Override public double nextDouble() { this.triplesRead += 1; advance(); return pageIterator.nextDouble(); } @Override public Binary nextBinary() { this.triplesRead += 1; advance(); return pageIterator.nextBinary(); } @Override public <N> N nextNull() { this.triplesRead += 1; advance(); return pageIterator.nextNull(); } private static Dictionary readDictionary(ColumnDescriptor desc, PageReader pageSource) { DictionaryPage dictionaryPage = pageSource.readDictionaryPage(); if (dictionaryPage != null) { try { return dictionaryPage.getEncoding().initDictionary(desc, dictionaryPage); // if (converter.hasDictionarySupport()) { // converter.setDictionary(dictionary); // } } catch (IOException e) { throw new ParquetDecodingException("could not decode the dictionary for " + desc, e); } } return null; } }
2,162
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetValueWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import org.apache.parquet.column.ColumnWriteStore; import java.util.List; public interface ParquetValueWriter<T> { void write(int repetitionLevel, T value); List<TripleWriter<?>> columns(); void setColumnStore(ColumnWriteStore columnStore); }
2,163
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetWriteSupport.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.ImmutableMap; import org.apache.hadoop.conf.Configuration; import org.apache.parquet.hadoop.api.WriteSupport; import org.apache.parquet.io.api.RecordConsumer; import org.apache.parquet.schema.MessageType; import java.util.Map; class ParquetWriteSupport<T> extends WriteSupport<T> { private final MessageType type; private final Map<String, String> keyValueMetadata; private final WriteSupport<T> wrapped; ParquetWriteSupport(MessageType type, Map<String, String> keyValueMetadata, WriteSupport<T> writeSupport) { this.type = type; this.keyValueMetadata = keyValueMetadata; this.wrapped = writeSupport; } @Override public WriteContext init(Configuration configuration) { WriteContext wrappedContext = wrapped.init(configuration); Map<String, String> metadata = ImmutableMap.<String, String>builder() .putAll(keyValueMetadata) .putAll(wrappedContext.getExtraMetaData()) .build(); return new WriteContext(type, metadata); } @Override public String getName() { return "Iceberg/" + wrapped.getName(); } @Override public void prepareForWrite(RecordConsumer recordConsumer) { wrapped.prepareForWrite(recordConsumer); } @Override public void write(T t) { wrapped.write(t); } @Override public FinalizedWriteContext finalizeWrite() { return wrapped.finalizeWrite(); } }
2,164
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetMetrics.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.Schema; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.types.Conversions; import com.netflix.iceberg.types.Types; import org.apache.parquet.column.statistics.Statistics; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; import org.apache.parquet.hadoop.metadata.ParquetMetadata; import org.apache.parquet.schema.MessageType; import java.io.IOException; import java.io.Serializable; import java.nio.ByteBuffer; import java.util.List; import java.util.Map; import java.util.Set; import static com.netflix.iceberg.parquet.ParquetConversions.fromParquetPrimitive; public class ParquetMetrics implements Serializable { private ParquetMetrics() { } public static Metrics fromInputFile(InputFile file) { try (ParquetFileReader reader = ParquetFileReader.open(ParquetIO.file(file))) { return fromMetadata(reader.getFooter()); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to read footer of file: %s", file); } } public static Metrics fromMetadata(ParquetMetadata metadata) { long rowCount = 0; Map<Integer, Long> columnSizes = Maps.newHashMap(); Map<Integer, Long> valueCounts = Maps.newHashMap(); Map<Integer, Long> nullValueCounts = Maps.newHashMap(); Map<Integer, Literal<?>> lowerBounds = Maps.newHashMap(); Map<Integer, Literal<?>> upperBounds = Maps.newHashMap(); Set<Integer> missingStats = Sets.newHashSet(); MessageType parquetType = metadata.getFileMetaData().getSchema(); Schema fileSchema = ParquetSchemaUtil.convert(parquetType); List<BlockMetaData> blocks = metadata.getBlocks(); for (BlockMetaData block : blocks) { rowCount += block.getRowCount(); for (ColumnChunkMetaData column : block.getColumns()) { int fieldId = fileSchema.aliasToId(column.getPath().toDotString()); increment(columnSizes, fieldId, column.getTotalSize()); increment(valueCounts, fieldId, column.getValueCount()); Statistics stats = column.getStatistics(); if (stats == null) { missingStats.add(fieldId); } else if (!stats.isEmpty()) { increment(nullValueCounts, fieldId, stats.getNumNulls()); // only add min/max stats for top-level fields // TODO: allow struct nesting, but not maps or arrays Types.NestedField field = fileSchema.asStruct().field(fieldId); if (field != null && stats.hasNonNullValue()) { updateMin(lowerBounds, fieldId, fromParquetPrimitive(field.type(), stats.genericGetMin())); updateMax(upperBounds, fieldId, fromParquetPrimitive(field.type(), stats.genericGetMax())); } } } } // discard accumulated values if any stats were missing for (Integer fieldId : missingStats) { nullValueCounts.remove(fieldId); lowerBounds.remove(fieldId); upperBounds.remove(fieldId); } return new Metrics(rowCount, columnSizes, valueCounts, nullValueCounts, toBufferMap(fileSchema, lowerBounds), toBufferMap(fileSchema, upperBounds)); } private static void increment(Map<Integer, Long> columns, int fieldId, long amount) { if (columns != null) { if (columns.containsKey(fieldId)) { columns.put(fieldId, columns.get(fieldId) + amount); } else { columns.put(fieldId, amount); } } } @SuppressWarnings("unchecked") private static <T> void updateMin(Map<Integer, Literal<?>> lowerBounds, int id, Literal<T> min) { Literal<T> currentMin = (Literal<T>) lowerBounds.get(id); if (currentMin == null || min.comparator().compare(min.value(), currentMin.value()) < 0) { lowerBounds.put(id, min); } } @SuppressWarnings("unchecked") private static <T> void updateMax(Map<Integer, Literal<?>> upperBounds, int id, Literal<T> max) { Literal<T> currentMax = (Literal<T>) upperBounds.get(id); if (currentMax == null || max.comparator().compare(max.value(), currentMax.value()) > 0) { upperBounds.put(id, max); } } private static Map<Integer, ByteBuffer> toBufferMap(Schema schema, Map<Integer, Literal<?>> map) { Map<Integer, ByteBuffer> bufferMap = Maps.newHashMap(); for (Map.Entry<Integer, Literal<?>> entry : map.entrySet()) { bufferMap.put(entry.getKey(), Conversions.toByteBuffer(schema.findType(entry.getKey()), entry.getValue().value())); } return bufferMap; } }
2,165
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetValueReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import org.apache.parquet.column.page.PageReadStore; import java.util.List; public interface ParquetValueReader<T> { T read(T reuse); TripleIterator<?> column(); List<TripleIterator<?>> columns(); void setPageSource(PageReadStore pageStore); }
2,166
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetReadSupport.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.AvroSchemaUtil; import org.apache.hadoop.conf.Configuration; import org.apache.parquet.avro.AvroReadSupport; import org.apache.parquet.hadoop.api.InitContext; import org.apache.parquet.hadoop.api.ReadSupport; import org.apache.parquet.io.api.RecordMaterializer; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.Type; import java.util.List; import java.util.Map; import java.util.Set; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.hasIds; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.pruneColumns; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.pruneColumnsFallback; /** * Parquet {@link ReadSupport} that handles column projection based on {@link Schema} column IDs. * * @param <T> Java type produced by this read support instance */ class ParquetReadSupport<T> extends ReadSupport<T> { private final Schema expectedSchema; private final ReadSupport<T> wrapped; private final boolean callInit; ParquetReadSupport(Schema expectedSchema, ReadSupport<T> readSupport, boolean callInit) { this.expectedSchema = expectedSchema; this.wrapped = readSupport; this.callInit = callInit; } @Override @SuppressWarnings("deprecation") public ReadContext init(Configuration configuration, Map<String, String> keyValueMetaData, MessageType fileSchema) { // Columns are selected from the Parquet file by taking the read context's message type and // matching to the file's columns by full path, so this must select columns by using the path // in the file's schema. MessageType projection = hasIds(fileSchema) ? pruneColumns(fileSchema, expectedSchema) : pruneColumnsFallback(fileSchema, expectedSchema); // override some known backward-compatibility options configuration.set("parquet.strict.typing", "false"); configuration.set("parquet.avro.add-list-element-records", "false"); configuration.set("parquet.avro.write-old-list-structure", "false"); // set Avro schemas in case the reader is Avro AvroReadSupport.setRequestedProjection(configuration, AvroSchemaUtil.convert(expectedSchema, projection.getName())); org.apache.avro.Schema avroReadSchema = AvroSchemaUtil.buildAvroProjection( AvroSchemaUtil.convert(ParquetSchemaUtil.convert(projection), projection.getName()), expectedSchema, ImmutableMap.of()); AvroReadSupport.setAvroReadSchema(configuration, ParquetAvro.parquetAvroSchema(avroReadSchema)); // let the context set up read support metadata, but always use the correct projection ReadContext context = null; if (callInit) { try { context = wrapped.init(configuration, keyValueMetaData, projection); } catch (UnsupportedOperationException e) { // try the InitContext version context = wrapped.init(new InitContext( configuration, makeMultimap(keyValueMetaData), projection)); } } return new ReadContext(projection, context != null ? context.getReadSupportMetadata() : ImmutableMap.of()); } @Override public RecordMaterializer<T> prepareForRead(Configuration configuration, Map<String, String> fileMetadata, MessageType fileMessageType, ReadContext readContext) { // This is the type created in init that was based on the file's schema. The schema that this // will pass to the wrapped ReadSupport needs to match the expected schema's names. Rather than // renaming the file's schema, convert the expected schema to Parquet. This relies on writing // files with the correct schema. // TODO: this breaks when columns are reordered. MessageType readSchema = ParquetSchemaUtil.convert(expectedSchema, fileMessageType.getName()); return wrapped.prepareForRead(configuration, fileMetadata, readSchema, readContext); } private Map<String, Set<String>> makeMultimap(Map<String, String> map) { ImmutableMap.Builder<String, Set<String>> builder = ImmutableMap.builder(); for (Map.Entry<String, String> entry : map.entrySet()) { builder.put(entry.getKey(), Sets.newHashSet(entry.getValue())); } return builder.build(); } }
2,167
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/TypeWithSchemaVisitor.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.netflix.iceberg.types.Types; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.OriginalType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import java.util.LinkedList; import java.util.List; import static org.apache.parquet.schema.Type.Repetition.REPEATED; /** * Visitor for traversing a Parquet type with a companion Iceberg type. * * @param <T> the Java class returned by the visitor */ public class TypeWithSchemaVisitor<T> { protected LinkedList<String> fieldNames = Lists.newLinkedList(); public static <T> T visit(com.netflix.iceberg.types.Type iType, Type type, TypeWithSchemaVisitor<T> visitor) { if (type instanceof MessageType) { Types.StructType struct = iType != null ? iType.asStructType() : null; return visitor.message(struct, (MessageType) type, visitFields(struct, type.asGroupType(), visitor)); } else if (type.isPrimitive()) { com.netflix.iceberg.types.Type.PrimitiveType iPrimitive = iType != null ? iType.asPrimitiveType() : null; return visitor.primitive(iPrimitive, type.asPrimitiveType()); } else { // if not a primitive, the typeId must be a group GroupType group = type.asGroupType(); OriginalType annotation = group.getOriginalType(); if (annotation != null) { switch (annotation) { case LIST: Preconditions.checkArgument(!group.isRepetition(REPEATED), "Invalid list: top-level group is repeated: " + group); Preconditions.checkArgument(group.getFieldCount() == 1, "Invalid list: does not contain single repeated field: " + group); GroupType repeatedElement = group.getFields().get(0).asGroupType(); Preconditions.checkArgument(repeatedElement.isRepetition(REPEATED), "Invalid list: inner group is not repeated"); Preconditions.checkArgument(repeatedElement.getFieldCount() <= 1, "Invalid list: repeated group is not a single field: " + group); Types.ListType list = null; Types.NestedField element = null; if (iType != null) { list = iType.asListType(); element = list.fields().get(0); } visitor.fieldNames.push(repeatedElement.getName()); try { T elementResult = null; if (repeatedElement.getFieldCount() > 0) { elementResult = visitField(element, repeatedElement.getType(0), visitor); } return visitor.list(list, group, elementResult); } finally { visitor.fieldNames.pop(); } case MAP: Preconditions.checkArgument(!group.isRepetition(REPEATED), "Invalid map: top-level group is repeated: " + group); Preconditions.checkArgument(group.getFieldCount() == 1, "Invalid map: does not contain single repeated field: " + group); GroupType repeatedKeyValue = group.getType(0).asGroupType(); Preconditions.checkArgument(repeatedKeyValue.isRepetition(REPEATED), "Invalid map: inner group is not repeated"); Preconditions.checkArgument(repeatedKeyValue.getFieldCount() <= 2, "Invalid map: repeated group does not have 2 fields"); Types.MapType map = null; Types.NestedField keyField = null; Types.NestedField valueField = null; if (iType != null) { map = iType.asMapType(); keyField = map.fields().get(0); valueField = map.fields().get(1); } visitor.fieldNames.push(repeatedKeyValue.getName()); try { T keyResult = null; T valueResult = null; switch (repeatedKeyValue.getFieldCount()) { case 2: // if there are 2 fields, both key and value are projected keyResult = visitField(keyField, repeatedKeyValue.getType(0), visitor); valueResult = visitField(valueField, repeatedKeyValue.getType(1), visitor); case 1: // if there is just one, use the name to determine what it is Type keyOrValue = repeatedKeyValue.getType(0); if (keyOrValue.getName().equalsIgnoreCase("key")) { keyResult = visitField(keyField, keyOrValue, visitor); // value result remains null } else { valueResult = visitField(valueField, keyOrValue, visitor); // key result remains null } default: // both results will remain null } return visitor.map(map, group, keyResult, valueResult); } finally { visitor.fieldNames.pop(); } default: } } Types.StructType struct = iType != null ? iType.asStructType() : null; return visitor.struct(struct, group, visitFields(struct, group, visitor)); } } private static <T> T visitField(Types.NestedField iField, Type field, TypeWithSchemaVisitor<T> visitor) { visitor.fieldNames.push(field.getName()); try { return visit(iField != null ? iField.type() : null, field, visitor); } finally { visitor.fieldNames.pop(); } } private static <T> List<T> visitFields(Types.StructType struct, GroupType group, TypeWithSchemaVisitor<T> visitor) { List<T> results = Lists.newArrayListWithExpectedSize(group.getFieldCount()); for (Type field : group.getFields()) { int id = -1; if (field.getId() != null) { id = field.getId().intValue(); } Types.NestedField iField = (struct != null && id >= 0) ? struct.field(id) : null; results.add(visitField(iField, field, visitor)); } return results; } public T message(Types.StructType iStruct, MessageType message, List<T> fields) { return null; } public T struct(Types.StructType iStruct, GroupType struct, List<T> fields) { return null; } public T list(Types.ListType iList, GroupType array, T element) { return null; } public T map(Types.MapType iMap, GroupType map, T key, T value) { return null; } public T primitive(com.netflix.iceberg.types.Type.PrimitiveType iPrimitive, PrimitiveType primitive) { return null; } }
2,168
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetTypeVisitor.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.OriginalType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import java.util.LinkedList; import java.util.List; import static org.apache.parquet.schema.Type.Repetition.REPEATED; public class ParquetTypeVisitor<T> { protected LinkedList<String> fieldNames = Lists.newLinkedList(); public static <T> T visit(Type type, ParquetTypeVisitor<T> visitor) { if (type instanceof MessageType) { return visitor.message((MessageType) type, visitFields(type.asGroupType(), visitor)); } else if (type.isPrimitive()) { return visitor.primitive(type.asPrimitiveType()); } else { // if not a primitive, the typeId must be a group GroupType group = type.asGroupType(); OriginalType annotation = group.getOriginalType(); if (annotation != null) { switch (annotation) { case LIST: Preconditions.checkArgument(!group.isRepetition(REPEATED), "Invalid list: top-level group is repeated: " + group); Preconditions.checkArgument(group.getFieldCount() == 1, "Invalid list: does not contain single repeated field: " + group); GroupType repeatedElement = group.getFields().get(0).asGroupType(); Preconditions.checkArgument(repeatedElement.isRepetition(REPEATED), "Invalid list: inner group is not repeated"); Preconditions.checkArgument(repeatedElement.getFieldCount() <= 1, "Invalid list: repeated group is not a single field: " + group); visitor.fieldNames.push(repeatedElement.getName()); try { T elementResult = null; if (repeatedElement.getFieldCount() > 0) { elementResult = visitField(repeatedElement.getType(0), visitor); } return visitor.list(group, elementResult); } finally { visitor.fieldNames.pop(); } case MAP: Preconditions.checkArgument(!group.isRepetition(REPEATED), "Invalid map: top-level group is repeated: " + group); Preconditions.checkArgument(group.getFieldCount() == 1, "Invalid map: does not contain single repeated field: " + group); GroupType repeatedKeyValue = group.getType(0).asGroupType(); Preconditions.checkArgument(repeatedKeyValue.isRepetition(REPEATED), "Invalid map: inner group is not repeated"); Preconditions.checkArgument(repeatedKeyValue.getFieldCount() <= 2, "Invalid map: repeated group does not have 2 fields"); visitor.fieldNames.push(repeatedKeyValue.getName()); try { T keyResult = null; T valueResult = null; switch (repeatedKeyValue.getFieldCount()) { case 2: // if there are 2 fields, both key and value are projected keyResult = visitField(repeatedKeyValue.getType(0), visitor); valueResult = visitField(repeatedKeyValue.getType(1), visitor); case 1: // if there is just one, use the name to determine what it is Type keyOrValue = repeatedKeyValue.getType(0); if (keyOrValue.getName().equalsIgnoreCase("key")) { keyResult = visitField(keyOrValue, visitor); // value result remains null } else { valueResult = visitField(keyOrValue, visitor); // key result remains null } default: // both results will remain null } return visitor.map(group, keyResult, valueResult); } finally { visitor.fieldNames.pop(); } default: } } return visitor.struct(group, visitFields(group, visitor)); } } private static <T> T visitField(Type field, ParquetTypeVisitor<T> visitor) { visitor.fieldNames.push(field.getName()); try { return visit(field, visitor); } finally { visitor.fieldNames.pop(); } } private static <T> List<T> visitFields(GroupType group, ParquetTypeVisitor<T> visitor) { List<T> results = Lists.newArrayListWithExpectedSize(group.getFieldCount()); for (Type field : group.getFields()) { results.add(visitField(field, visitor)); } return results; } public T message(MessageType message, List<T> fields) { return null; } public T struct(GroupType struct, List<T> fields) { return null; } public T list(GroupType array, T element) { return null; } public T map(GroupType map, T key, T value) { return null; } public T primitive(PrimitiveType primitive) { return null; } }
2,169
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/TripleIterator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import org.apache.parquet.io.api.Binary; import java.util.Iterator; interface TripleIterator<T> extends Iterator<T> { /** * Returns the definition level from the current triple. * <p> * This method does not advance this iterator. * * @return the definition level of the current triple. * @throws java.util.NoSuchElementException if there are no more elements */ int currentDefinitionLevel(); /** * Returns the repetition level from the current triple or 0 if there are no more elements. * <p> * This method does not advance this iterator. * * @return the repetition level of the current triple, or 0 if there is no current triple. * @throws java.util.NoSuchElementException if there are no more elements */ int currentRepetitionLevel(); /** * Returns the next value as an un-boxed boolean. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return the next value as an un-boxed boolean * @throws java.util.NoSuchElementException if there are no more elements * @throws UnsupportedOperationException if the underlying data values are not booleans */ default boolean nextBoolean() { throw new UnsupportedOperationException("Not a boolean column"); } /** * Returns the next value as an un-boxed int. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return the next value as an un-boxed int * @throws java.util.NoSuchElementException if there are no more elements * @throws UnsupportedOperationException if the underlying data values are not ints */ default int nextInteger() { throw new UnsupportedOperationException("Not an integer column"); } /** * Returns the next value as an un-boxed long. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return the next value as an un-boxed long * @throws java.util.NoSuchElementException if there are no more elements * @throws UnsupportedOperationException if the underlying data values are not longs */ default long nextLong() { throw new UnsupportedOperationException("Not a long column"); } /** * Returns the next value as an un-boxed float. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return the next value as an un-boxed float * @throws java.util.NoSuchElementException if there are no more elements * @throws UnsupportedOperationException if the underlying data values are not floats */ default float nextFloat() { throw new UnsupportedOperationException("Not a float column"); } /** * Returns the next value as an un-boxed double. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return the next value as an un-boxed double * @throws java.util.NoSuchElementException if there are no more elements * @throws UnsupportedOperationException if the underlying data values are not doubles */ default double nextDouble() { throw new UnsupportedOperationException("Not a double column"); } /** * Returns the next value as a Binary. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return the next value as a Binary * @throws java.util.NoSuchElementException if there are no more elements * @throws UnsupportedOperationException if the underlying data values are not binary */ default Binary nextBinary() { throw new UnsupportedOperationException("Not a binary column"); } /** * Returns null and advances the iterator. * <p> * This method has the same behavior as {@link #next()} and will advance this iterator. * * @return null * @throws java.util.NoSuchElementException if there are no more elements */ <N> N nextNull(); }
2,170
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetAvroWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.Lists; import com.netflix.iceberg.parquet.ParquetValueWriters.PrimitiveWriter; import com.netflix.iceberg.parquet.ParquetValueWriters.StructWriter; import org.apache.avro.generic.GenericData.Fixed; import org.apache.avro.generic.IndexedRecord; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.DecimalMetadata; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import java.util.Iterator; import java.util.List; import static com.netflix.iceberg.parquet.ParquetValueWriters.collections; import static com.netflix.iceberg.parquet.ParquetValueWriters.maps; import static com.netflix.iceberg.parquet.ParquetValueWriters.option; public class ParquetAvroWriter { private ParquetAvroWriter() { } @SuppressWarnings("unchecked") public static <T> ParquetValueWriter<T> buildWriter(MessageType type) { return (ParquetValueWriter<T>) ParquetTypeVisitor.visit(type, new WriteBuilder(type)); } private static class WriteBuilder extends ParquetTypeVisitor<ParquetValueWriter<?>> { private final MessageType type; WriteBuilder(MessageType type) { this.type = type; } @Override public ParquetValueWriter<?> message(MessageType message, List<ParquetValueWriter<?>> fieldWriters) { return struct(message.asGroupType(), fieldWriters); } @Override public ParquetValueWriter<?> struct(GroupType struct, List<ParquetValueWriter<?>> fieldWriters) { List<Type> fields = struct.getFields(); List<ParquetValueWriter<?>> writers = Lists.newArrayListWithExpectedSize(fieldWriters.size()); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = struct.getType(i); int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName())); writers.add(option(fieldType, fieldD, fieldWriters.get(i))); } return new RecordWriter(writers); } @Override public ParquetValueWriter<?> list(GroupType array, ParquetValueWriter<?> elementWriter) { GroupType repeated = array.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath); int repeatedR = type.getMaxRepetitionLevel(repeatedPath); org.apache.parquet.schema.Type elementType = repeated.getType(0); int elementD = type.getMaxDefinitionLevel(path(elementType.getName())); return collections(repeatedD, repeatedR, option(elementType, elementD, elementWriter)); } @Override public ParquetValueWriter<?> map(GroupType map, ParquetValueWriter<?> keyWriter, ParquetValueWriter<?> valueWriter) { GroupType repeatedKeyValue = map.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath); int repeatedR = type.getMaxRepetitionLevel(repeatedPath); org.apache.parquet.schema.Type keyType = repeatedKeyValue.getType(0); int keyD = type.getMaxDefinitionLevel(path(keyType.getName())); org.apache.parquet.schema.Type valueType = repeatedKeyValue.getType(1); int valueD = type.getMaxDefinitionLevel(path(valueType.getName())); return maps(repeatedD, repeatedR, option(keyType, keyD, keyWriter), option(valueType, valueD, valueWriter)); } @Override public ParquetValueWriter<?> primitive(PrimitiveType primitive) { ColumnDescriptor desc = type.getColumnDescription(currentPath()); if (primitive.getOriginalType() != null) { switch (primitive.getOriginalType()) { case ENUM: case JSON: case UTF8: return ParquetValueWriters.strings(desc); case DATE: case INT_8: case INT_16: case INT_32: case INT_64: case TIME_MICROS: case TIMESTAMP_MICROS: return ParquetValueWriters.unboxed(desc); case DECIMAL: DecimalMetadata decimal = primitive.getDecimalMetadata(); switch (primitive.getPrimitiveTypeName()) { case INT32: return ParquetValueWriters.decimalAsInteger( desc, decimal.getPrecision(), decimal.getScale()); case INT64: return ParquetValueWriters.decimalAsLong( desc, decimal.getPrecision(), decimal.getScale()); case BINARY: case FIXED_LEN_BYTE_ARRAY: return ParquetValueWriters.decimalAsFixed( desc, decimal.getPrecision(), decimal.getScale()); default: throw new UnsupportedOperationException( "Unsupported base type for decimal: " + primitive.getPrimitiveTypeName()); } case BSON: return ParquetValueWriters.byteBuffers(desc); default: throw new UnsupportedOperationException( "Unsupported logical type: " + primitive.getOriginalType()); } } switch (primitive.getPrimitiveTypeName()) { case FIXED_LEN_BYTE_ARRAY: return new FixedWriter(desc); case BINARY: return ParquetValueWriters.byteBuffers(desc); case BOOLEAN: case INT32: case INT64: case FLOAT: case DOUBLE: return ParquetValueWriters.unboxed(desc); default: throw new UnsupportedOperationException("Unsupported type: " + primitive); } } private String[] currentPath() { String[] path = new String[fieldNames.size()]; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } private String[] path(String name) { String[] path = new String[fieldNames.size() + 1]; path[fieldNames.size()] = name; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } } private static class FixedWriter extends PrimitiveWriter<Fixed> { private FixedWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, Fixed buffer) { column.writeBinary(repetitionLevel, Binary.fromReusedByteArray(buffer.bytes())); } } private static class RecordWriter extends StructWriter<IndexedRecord> { private RecordWriter(List<ParquetValueWriter<?>> writers) { super(writers); } @Override protected Object get(IndexedRecord struct, int index) { return struct.get(index); } } }
2,171
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.ImmutableMap; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.Schema; import com.netflix.iceberg.common.DynConstructors; import com.netflix.iceberg.common.DynMethods; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.OutputFile; import org.apache.hadoop.conf.Configuration; import org.apache.parquet.bytes.ByteBufferAllocator; import org.apache.parquet.column.ColumnWriteStore; import org.apache.parquet.column.ParquetProperties; import org.apache.parquet.column.page.PageWriteStore; import org.apache.parquet.hadoop.CodecFactory; import org.apache.parquet.hadoop.ParquetFileWriter; import org.apache.parquet.hadoop.metadata.CompressionCodecName; import org.apache.parquet.schema.MessageType; import java.io.Closeable; import java.io.IOException; import java.util.Map; import java.util.function.Function; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.convert; import static java.lang.Math.max; import static java.lang.Math.min; import static org.apache.parquet.column.ParquetProperties.WriterVersion.PARQUET_1_0; class ParquetWriter<T> implements FileAppender<T>, Closeable { private static final DynConstructors.Ctor<PageWriteStore> pageStoreCtor = DynConstructors .builder(PageWriteStore.class) .hiddenImpl("org.apache.parquet.hadoop.ColumnChunkPageWriteStore", CodecFactory.BytesCompressor.class, MessageType.class, ByteBufferAllocator.class) .build(); private static final DynMethods.UnboundMethod flushToWriter = DynMethods .builder("flushToFileWriter") .hiddenImpl("org.apache.parquet.hadoop.ColumnChunkPageWriteStore", ParquetFileWriter.class) .build(); private final OutputFile output; private final long targetRowGroupSize; private final Map<String, String> metadata; private final ParquetProperties props = ParquetProperties.builder() .withWriterVersion(PARQUET_1_0) .build(); private final CodecFactory.BytesCompressor compressor; private final MessageType parquetSchema; private final ParquetValueWriter<T> model; private final ParquetFileWriter writer; private DynMethods.BoundMethod flushPageStoreToWriter; private ColumnWriteStore writeStore; private long nextRowGroupSize = 0; private long recordCount = 0; private long nextCheckRecordCount = 10; @SuppressWarnings("unchecked") ParquetWriter(Configuration conf, OutputFile output, Schema schema, long rowGroupSize, Map<String, String> metadata, Function<MessageType, ParquetValueWriter<?>> createWriterFunc, CompressionCodecName codec) { this.output = output; this.targetRowGroupSize = rowGroupSize; this.metadata = ImmutableMap.copyOf(metadata); this.compressor = new CodecFactory(conf, props.getPageSizeThreshold()).getCompressor(codec); this.parquetSchema = convert(schema, "table"); this.model = (ParquetValueWriter<T>) createWriterFunc.apply(parquetSchema); try { this.writer = new ParquetFileWriter(ParquetIO.file(output, conf), parquetSchema, ParquetFileWriter.Mode.OVERWRITE, rowGroupSize, 0); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to create Parquet file"); } try { writer.start(); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to start Parquet file writer"); } startRowGroup(); } @Override public void add(T value) { recordCount += 1; model.write(0, value); writeStore.endRecord(); checkSize(); } @Override public Metrics metrics() { return ParquetMetrics.fromMetadata(writer.getFooter()); } private void checkSize() { if (recordCount >= nextCheckRecordCount) { long bufferedSize = writeStore.getBufferedSize(); double avgRecordSize = ((double) bufferedSize) / recordCount; if (bufferedSize > (nextRowGroupSize - 2 * avgRecordSize)) { flushRowGroup(false); } else { long remainingSpace = nextRowGroupSize - bufferedSize; long remainingRecords = (long) (remainingSpace / avgRecordSize); this.nextCheckRecordCount = recordCount + min(max(remainingRecords / 2, 100), 10000); } } } private void flushRowGroup(boolean finished) { try { if (recordCount > 0) { writer.startBlock(recordCount); writeStore.flush(); flushPageStoreToWriter.invoke(writer); writer.endBlock(); if (!finished) { startRowGroup(); } } } catch (IOException e) { throw new RuntimeIOException(e, "Failed to flush row group"); } } private void startRowGroup() { try { this.nextRowGroupSize = min(writer.getNextRowGroupSize(), targetRowGroupSize); } catch (IOException e) { throw new RuntimeIOException(e); } this.nextCheckRecordCount = min(max(recordCount / 2, 100), 10000); this.recordCount = 0; PageWriteStore pageStore = pageStoreCtor.newInstance( compressor, parquetSchema, props.getAllocator()); this.flushPageStoreToWriter = flushToWriter.bind(pageStore); this.writeStore = props.newColumnWriteStore(parquetSchema, pageStore); model.setColumnStore(writeStore); } @Override public void close() throws IOException { flushRowGroup(true); writeStore.close(); writer.end(metadata); } }
2,172
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/PruneColumns.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import org.apache.parquet.schema.Types; import java.util.List; import java.util.Set; class PruneColumns extends ParquetTypeVisitor<Type> { private final Set<Integer> selectedIds; PruneColumns(Set<Integer> selectedIds) { this.selectedIds = selectedIds; } @Override public Type message(MessageType message, List<Type> fields) { Types.MessageTypeBuilder builder = Types.buildMessage(); boolean hasChange = false; int fieldCount = 0; for (int i = 0; i < fields.size(); i += 1) { Type originalField = message.getType(i); Type field = fields.get(i); if (selectedIds.contains(getId(originalField))) { builder.addField(originalField); fieldCount += 1; } else if (field != null) { builder.addField(field); fieldCount += 1; hasChange = true; } } if (hasChange) { return builder.named(message.getName()); } else if (message.getFieldCount() == fieldCount) { return message; } return builder.named(message.getName()); } @Override public Type struct(GroupType struct, List<Type> fields) { boolean hasChange = false; List<Type> filteredFields = Lists.newArrayListWithExpectedSize(fields.size()); for (int i = 0; i < fields.size(); i += 1) { Type originalField = struct.getType(i); Type field = fields.get(i); if (selectedIds.contains(getId(originalField))) { filteredFields.add(originalField); } else if (field != null) { filteredFields.add(originalField); hasChange = true; } } if (hasChange) { return struct.withNewFields(filteredFields); } else if (struct.getFieldCount() == filteredFields.size()) { return struct; } else if (!filteredFields.isEmpty()) { return struct.withNewFields(filteredFields); } return null; } @Override public Type list(GroupType list, Type element) { GroupType repeated = list.getType(0).asGroupType(); Type originalElement = repeated.getType(0); int elementId = getId(originalElement); if (selectedIds.contains(elementId)) { return list; } else if (element != null) { if (element != originalElement) { // the element type was projected return Types.list(list.getRepetition()) .element(element) .id(getId(list)) .named(list.getName()); } return list; } return null; } @Override public Type map(GroupType map, Type key, Type value) { GroupType repeated = map.getType(0).asGroupType(); Type originalKey = repeated.getType(0); Type originalValue = repeated.getType(1); int keyId = getId(originalKey); int valueId = getId(originalValue); if (selectedIds.contains(keyId) || selectedIds.contains(valueId)) { return map; } else if (value != null) { if (value != originalValue) { return Types.map(map.getRepetition()) .key(originalKey) .value(value) .id(getId(map)) .named(map.getName()); } return map; } return null; } @Override public Type primitive(PrimitiveType primitive) { return null; } private int getId(Type type) { Preconditions.checkNotNull(type.getId(), "Missing id for type: " + type); return type.getId().intValue(); } }
2,173
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Schema; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.io.CloseableGroup; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.InputFile; import org.apache.parquet.ParquetReadOptions; import org.apache.parquet.column.page.PageReadStore; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.schema.MessageType; import java.io.Closeable; import java.io.IOException; import java.util.Iterator; import java.util.List; import java.util.function.Function; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.addFallbackIds; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.hasIds; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.pruneColumns; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.pruneColumnsFallback; public class ParquetReader<T> extends CloseableGroup implements CloseableIterable<T> { private final InputFile input; private final Schema expectedSchema; private final ParquetReadOptions options; private final Function<MessageType, ParquetValueReader<?>> readerFunc; private final Expression filter; private final boolean reuseContainers; public ParquetReader(InputFile input, Schema expectedSchema, ParquetReadOptions options, Function<MessageType, ParquetValueReader<?>> readerFunc, Expression filter, boolean reuseContainers) { this.input = input; this.expectedSchema = expectedSchema; this.options = options; this.readerFunc = readerFunc; // replace alwaysTrue with null to avoid extra work evaluating a trivial filter this.filter = filter == Expressions.alwaysTrue() ? null : filter; this.reuseContainers = reuseContainers; } private static class ReadConf<T> { private final ParquetFileReader reader; private final InputFile file; private final ParquetReadOptions options; private final MessageType projection; private final ParquetValueReader<T> model; private final List<BlockMetaData> rowGroups; private final boolean[] shouldSkip; private final long totalValues; private final boolean reuseContainers; @SuppressWarnings("unchecked") ReadConf(InputFile file, ParquetReadOptions options, Schema expectedSchema, Expression filter, Function<MessageType, ParquetValueReader<?>> readerFunc, boolean reuseContainers) { this.file = file; this.options = options; this.reader = newReader(file, options); MessageType fileSchema = reader.getFileMetaData().getSchema(); boolean hasIds = hasIds(fileSchema); MessageType typeWithIds = hasIds ? fileSchema : addFallbackIds(fileSchema); this.projection = hasIds ? pruneColumns(fileSchema, expectedSchema) : pruneColumnsFallback(fileSchema, expectedSchema); this.model = (ParquetValueReader<T>) readerFunc.apply(typeWithIds); this.rowGroups = reader.getRowGroups(); this.shouldSkip = new boolean[rowGroups.size()]; ParquetMetricsRowGroupFilter statsFilter = null; ParquetDictionaryRowGroupFilter dictFilter = null; if (filter != null) { statsFilter = new ParquetMetricsRowGroupFilter(expectedSchema, filter); dictFilter = new ParquetDictionaryRowGroupFilter(expectedSchema, filter); } long totalValues = 0L; for (int i = 0; i < shouldSkip.length; i += 1) { BlockMetaData rowGroup = rowGroups.get(i); boolean shouldRead = filter == null || ( statsFilter.shouldRead(typeWithIds, rowGroup) && dictFilter.shouldRead(typeWithIds, rowGroup, reader.getDictionaryReader(rowGroup))); this.shouldSkip[i] = !shouldRead; if (shouldRead) { totalValues += rowGroup.getRowCount(); } } this.totalValues = totalValues; this.reuseContainers = reuseContainers; } ReadConf(ReadConf<T> toCopy) { this.reader = null; this.file = toCopy.file; this.options = toCopy.options; this.projection = toCopy.projection; this.model = toCopy.model; this.rowGroups = toCopy.rowGroups; this.shouldSkip = toCopy.shouldSkip; this.totalValues = toCopy.totalValues; this.reuseContainers = toCopy.reuseContainers; } ParquetFileReader reader() { if (reader != null) { reader.setRequestedSchema(projection); return reader; } ParquetFileReader newReader = newReader(file, options); newReader.setRequestedSchema(projection); return newReader; } ParquetValueReader<T> model() { return model; } boolean[] shouldSkip() { return shouldSkip; } long totalValues() { return totalValues; } boolean reuseContainers() { return reuseContainers; } ReadConf<T> copy() { return new ReadConf<>(this); } private static ParquetFileReader newReader(InputFile file, ParquetReadOptions options) { try { return ParquetFileReader.open(ParquetIO.file(file), options); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to open Parquet file: %s", file.location()); } } } private ReadConf<T> conf = null; private ReadConf<T> init() { if (conf == null) { ReadConf<T> conf = new ReadConf<>( input, options, expectedSchema, filter, readerFunc, reuseContainers); this.conf = conf.copy(); return conf; } return conf; } @Override public Iterator<T> iterator() { FileIterator<T> iter = new FileIterator<>(init()); addCloseable(iter); return iter; } private static class FileIterator<T> implements Iterator<T>, Closeable { private final ParquetFileReader reader; private final boolean[] shouldSkip; private final ParquetValueReader<T> model; private final long totalValues; private final boolean reuseContainers; private int nextRowGroup = 0; private long nextRowGroupStart = 0; private long valuesRead = 0; private T last = null; FileIterator(ReadConf<T> conf) { this.reader = conf.reader(); this.shouldSkip = conf.shouldSkip(); this.model = conf.model(); this.totalValues = conf.totalValues(); this.reuseContainers = conf.reuseContainers(); } @Override public boolean hasNext() { return valuesRead < totalValues; } @Override public T next() { if (valuesRead >= nextRowGroupStart) { advance(); } if (reuseContainers) { this.last = model.read(last); } else { this.last = model.read(null); } valuesRead += 1; return last; } private void advance() { while (shouldSkip[nextRowGroup]) { nextRowGroup += 1; reader.skipNextRowGroup(); } PageReadStore pages; try { pages = reader.readNextRowGroup(); } catch (IOException e) { throw new RuntimeIOException(e); } nextRowGroupStart += pages.getRowCount(); model.setPageSource(pages); } @Override public void close() throws IOException { reader.close(); } } }
2,174
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/TypeToMessageType.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Type.NestedType; import com.netflix.iceberg.types.Type.PrimitiveType; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types.DecimalType; import com.netflix.iceberg.types.Types.FixedType; import com.netflix.iceberg.types.Types.ListType; import com.netflix.iceberg.types.Types.MapType; import com.netflix.iceberg.types.Types.NestedField; import com.netflix.iceberg.types.Types.StructType; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.Type; import org.apache.parquet.schema.Types; import static org.apache.parquet.schema.OriginalType.DATE; import static org.apache.parquet.schema.OriginalType.DECIMAL; import static org.apache.parquet.schema.OriginalType.TIMESTAMP_MICROS; import static org.apache.parquet.schema.OriginalType.TIME_MICROS; import static org.apache.parquet.schema.OriginalType.UTF8; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.BINARY; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.BOOLEAN; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.DOUBLE; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.FIXED_LEN_BYTE_ARRAY; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.FLOAT; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT32; import static org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT64; public class TypeToMessageType { public static final int DECIMAL_INT32_MAX_DIGITS = 9; public static final int DECIMAL_INT64_MAX_DIGITS = 18; public MessageType convert(Schema schema, String name) { Types.MessageTypeBuilder builder = Types.buildMessage(); for (NestedField field : schema.columns()) { builder.addField(field(field)); } return builder.named(name); } public GroupType struct(StructType struct, Type.Repetition repetition, int id, String name) { Types.GroupBuilder<GroupType> builder = Types.buildGroup(repetition); for (NestedField field : struct.fields()) { builder.addField(field(field)); } return builder.id(id).named(name); } public Type field(NestedField field) { Type.Repetition repetition = field.isOptional() ? Type.Repetition.OPTIONAL : Type.Repetition.REQUIRED; int id = field.fieldId(); String name = field.name(); if (field.type().isPrimitiveType()) { return primitive(field.type().asPrimitiveType(), repetition, id, name); } else { NestedType nested = field.type().asNestedType(); if (nested.isStructType()) { return struct(nested.asStructType(), repetition, id, name); } else if (nested.isMapType()) { return map(nested.asMapType(), repetition, id, name); } else if (nested.isListType()) { return list(nested.asListType(), repetition, id, name); } throw new UnsupportedOperationException("Can't convert unknown type: " + nested); } } public GroupType list(ListType list, Type.Repetition repetition, int id, String name) { NestedField elementField = list.fields().get(0); return Types.list(repetition) .element(field(elementField)) .id(id) .named(name); } public GroupType map(MapType map, Type.Repetition repetition, int id, String name) { NestedField keyField = map.fields().get(0); NestedField valueField = map.fields().get(1); return Types.map(repetition) .key(field(keyField)) .value(field(valueField)) .id(id) .named(name); } public Type primitive(PrimitiveType primitive, Type.Repetition repetition, int id, String name) { switch (primitive.typeId()) { case BOOLEAN: return Types.primitive(BOOLEAN, repetition).id(id).named(name); case INTEGER: return Types.primitive(INT32, repetition).id(id).named(name); case LONG: return Types.primitive(INT64, repetition).id(id).named(name); case FLOAT: return Types.primitive(FLOAT, repetition).id(id).named(name); case DOUBLE: return Types.primitive(DOUBLE, repetition).id(id).named(name); case DATE: return Types.primitive(INT32, repetition).as(DATE).id(id).named(name); case TIME: return Types.primitive(INT64, repetition).as(TIME_MICROS).id(id).named(name); case TIMESTAMP: return Types.primitive(INT64, repetition).as(TIMESTAMP_MICROS).id(id).named(name); case STRING: return Types.primitive(BINARY, repetition).as(UTF8).id(id).named(name); case BINARY: return Types.primitive(BINARY, repetition).id(id).named(name); case FIXED: FixedType fixed = (FixedType) primitive; return Types.primitive(FIXED_LEN_BYTE_ARRAY, repetition).length(fixed.length()) .id(id) .named(name); case DECIMAL: DecimalType decimal = (DecimalType) primitive; if (decimal.precision() <= DECIMAL_INT32_MAX_DIGITS) { // store as an int return Types.primitive(INT32, repetition) .as(DECIMAL) .precision(decimal.precision()) .scale(decimal.scale()) .id(id) .named(name); } else if (decimal.precision() <= DECIMAL_INT64_MAX_DIGITS) { // store as a long return Types.primitive(INT64, repetition) .as(DECIMAL) .precision(decimal.precision()) .scale(decimal.scale()) .id(id) .named(name); } else { // store as a fixed-length array int minLength = TypeUtil.decimalRequriedBytes(decimal.precision()); return Types.primitive(FIXED_LEN_BYTE_ARRAY, repetition).length(minLength) .as(DECIMAL) .precision(decimal.precision()) .scale(decimal.scale()) .id(id) .named(name); } case UUID: return Types.primitive(FIXED_LEN_BYTE_ARRAY, repetition).length(16).id(id).named(name); default: throw new UnsupportedOperationException("Unsupported type for Parquet: " + primitive); } } }
2,175
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetAvroReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; public class ParquetAvroReader { }
2,176
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/TripleWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import org.apache.parquet.io.api.Binary; public interface TripleWriter<T> { // TODO: should definition level be included, or should it be part of the column? /** * Write a value. * * @param rl repetition level * @param value the value */ void write(int rl, T value); /** * Write a triple. * * @param rl repetition level * @param value the boolean value */ default void writeBoolean(int rl, boolean value) { throw new UnsupportedOperationException("Not a boolean column"); } /** * Write a triple. * * @param rl repetition level * @param value the boolean value */ default void writeInteger(int rl, int value) { throw new UnsupportedOperationException("Not an integer column"); } /** * Write a triple. * * @param rl repetition level * @param value the boolean value */ default void writeLong(int rl, long value) { throw new UnsupportedOperationException("Not an long column"); } /** * Write a triple. * * @param rl repetition level * @param value the boolean value */ default void writeFloat(int rl, float value) { throw new UnsupportedOperationException("Not an float column"); } /** * Write a triple. * * @param rl repetition level * @param value the boolean value */ default void writeDouble(int rl, double value) { throw new UnsupportedOperationException("Not an double column"); } /** * Write a triple. * * @param rl repetition level * @param value the boolean value */ default void writeBinary(int rl, Binary value) { throw new UnsupportedOperationException("Not an binary column"); } /** * Write a triple for a null value. * * @param rl repetition level * @param dl definition level */ void writeNull(int rl, int dl); }
2,177
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetMetricsRowGroupFilter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Binder; import com.netflix.iceberg.expressions.BoundReference; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.ExpressionVisitors; import com.netflix.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import org.apache.parquet.column.statistics.Statistics; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import java.util.Map; import java.util.function.Function; import static com.netflix.iceberg.expressions.Expressions.rewriteNot; import static com.netflix.iceberg.parquet.ParquetConversions.converterFromParquet; public class ParquetMetricsRowGroupFilter { private final Schema schema; private final StructType struct; private final Expression expr; private transient ThreadLocal<MetricsEvalVisitor> visitors = null; private MetricsEvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(MetricsEvalVisitor::new); } return visitors.get(); } public ParquetMetricsRowGroupFilter(Schema schema, Expression unbound) { this.schema = schema; this.struct = schema.asStruct(); this.expr = Binder.bind(struct, rewriteNot(unbound)); } /** * Test whether the file may contain records that match the expression. * * @param fileSchema schema for the Parquet file * @param rowGroup metadata for a row group * @return false if the file cannot contain rows that match the expression, true otherwise. */ public boolean shouldRead(MessageType fileSchema, BlockMetaData rowGroup) { return visitor().eval(fileSchema, rowGroup); } private static final boolean ROWS_MIGHT_MATCH = true; private static final boolean ROWS_CANNOT_MATCH = false; private class MetricsEvalVisitor extends BoundExpressionVisitor<Boolean> { private Map<Integer, Statistics> stats = null; private Map<Integer, Long> valueCounts = null; private Map<Integer, Function<Object, Object>> conversions = null; private boolean eval(MessageType fileSchema, BlockMetaData rowGroup) { if (rowGroup.getRowCount() <= 0) { return ROWS_CANNOT_MATCH; } this.stats = Maps.newHashMap(); this.valueCounts = Maps.newHashMap(); this.conversions = Maps.newHashMap(); for (ColumnChunkMetaData col : rowGroup.getColumns()) { PrimitiveType colType = fileSchema.getType(col.getPath().toArray()).asPrimitiveType(); if (colType.getId() != null) { int id = colType.getId().intValue(); stats.put(id, col.getStatistics()); valueCounts.put(id, col.getValueCount()); conversions.put(id, converterFromParquet(colType)); } } return ExpressionVisitors.visit(expr, this); } @Override public Boolean alwaysTrue() { return ROWS_MIGHT_MATCH; // all rows match } @Override public Boolean alwaysFalse() { return ROWS_CANNOT_MATCH; // all rows fail } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has no null values, the expression cannot match Integer id = ref.fieldId(); Preconditions.checkNotNull(struct.field(id), "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_MIGHT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && !colStats.isEmpty() && colStats.getNumNulls() == 0) { // there are stats and no values are null => all values are non-null return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has no non-null values, the expression cannot match Integer id = ref.fieldId(); Preconditions.checkNotNull(struct.field(id), "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_CANNOT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && valueCount - colStats.getNumNulls() == 0) { // (num nulls == value count) => all values are null => no non-null values return ROWS_CANNOT_MATCH; } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_CANNOT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && !colStats.isEmpty()) { if (!colStats.hasNonNullValue()) { return ROWS_CANNOT_MATCH; } T lower = min(colStats, id); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp >= 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_CANNOT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && !colStats.isEmpty()) { if (!colStats.hasNonNullValue()) { return ROWS_CANNOT_MATCH; } T lower = min(colStats, id); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_CANNOT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && !colStats.isEmpty()) { if (!colStats.hasNonNullValue()) { return ROWS_CANNOT_MATCH; } T upper = max(colStats, id); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp <= 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_CANNOT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && !colStats.isEmpty()) { if (!colStats.hasNonNullValue()) { return ROWS_CANNOT_MATCH; } T upper = max(colStats, id); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Long valueCount = valueCounts.get(id); if (valueCount == null) { // the column is not present and is all nulls return ROWS_CANNOT_MATCH; } Statistics<?> colStats = stats.get(id); if (colStats != null && !colStats.isEmpty()) { if (!colStats.hasNonNullValue()) { return ROWS_CANNOT_MATCH; } T lower = min(colStats, id); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_CANNOT_MATCH; } T upper = max(colStats, id); cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_CANNOT_MATCH; } } return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { // because the bounds are not necessarily a min or max value, this cannot be answered using // them. notEq(col, X) with (X, Y) doesn't guarantee that X is a value in col. return ROWS_MIGHT_MATCH; } @Override public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } @SuppressWarnings("unchecked") private <T> T min(Statistics<?> stats, int id) { return (T) conversions.get(id).apply(stats.genericGetMin()); } @SuppressWarnings("unchecked") private <T> T max(Statistics<?> stats, int id) { return (T) conversions.get(id).apply(stats.genericGetMax()); } } }
2,178
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetFilters.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.BoundReference; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.Expression.Operation; import com.netflix.iceberg.expressions.ExpressionVisitors.ExpressionVisitor; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Types; import org.apache.parquet.filter2.compat.FilterCompat; import org.apache.parquet.filter2.predicate.FilterApi; import org.apache.parquet.filter2.predicate.FilterPredicate; import org.apache.parquet.filter2.predicate.Operators; import org.apache.parquet.io.api.Binary; import java.nio.ByteBuffer; import static com.netflix.iceberg.expressions.ExpressionVisitors.visit; class ParquetFilters { static FilterCompat.Filter convert(Schema schema, Expression expr) { FilterPredicate pred = visit(expr, new ConvertFilterToParquet(schema)); // TODO: handle AlwaysFalse.INSTANCE if (pred != null && pred != AlwaysTrue.INSTANCE) { // FilterCompat will apply LogicalInverseRewriter return FilterCompat.get(pred); } else { return FilterCompat.NOOP; } } static FilterCompat.Filter convertColumnFilter(Schema schema, String column, Expression expr) { FilterPredicate pred = visit(expr, new ConvertColumnFilterToParquet(schema, column)); // TODO: handle AlwaysFalse.INSTANCE if (pred != null && pred != AlwaysTrue.INSTANCE) { // FilterCompat will apply LogicalInverseRewriter return FilterCompat.get(pred); } else { return FilterCompat.NOOP; } } private static class ConvertFilterToParquet extends ExpressionVisitor<FilterPredicate> { private final Schema schema; private ConvertFilterToParquet(Schema schema) { this.schema = schema; } @Override public FilterPredicate alwaysTrue() { return AlwaysTrue.INSTANCE; } @Override public FilterPredicate alwaysFalse() { return AlwaysFalse.INSTANCE; } @Override public FilterPredicate not(FilterPredicate child) { if (child == AlwaysTrue.INSTANCE) { return AlwaysFalse.INSTANCE; } else if (child == AlwaysFalse.INSTANCE) { return AlwaysTrue.INSTANCE; } return FilterApi.not(child); } @Override public FilterPredicate and(FilterPredicate left, FilterPredicate right) { if (left == AlwaysFalse.INSTANCE || right == AlwaysFalse.INSTANCE) { return AlwaysFalse.INSTANCE; } else if (left == AlwaysTrue.INSTANCE) { return right; } else if (right == AlwaysTrue.INSTANCE) { return left; } return FilterApi.and(left, right); } @Override public FilterPredicate or(FilterPredicate left, FilterPredicate right) { if (left == AlwaysTrue.INSTANCE || right == AlwaysTrue.INSTANCE) { return AlwaysTrue.INSTANCE; } else if (left == AlwaysFalse.INSTANCE) { return right; } else if (right == AlwaysFalse.INSTANCE) { return left; } return FilterApi.or(left, right); } @Override public <T> FilterPredicate predicate(BoundPredicate<T> pred) { Operation op = pred.op(); BoundReference<T> ref = pred.ref(); Literal<T> lit = pred.literal(); String path = schema.idToAlias(ref.fieldId()); switch (ref.type().typeId()) { case BOOLEAN: Operators.BooleanColumn col = FilterApi.booleanColumn(schema.idToAlias(ref.fieldId())); switch (op) { case EQ: return FilterApi.eq(col, getParquetPrimitive(lit)); case NOT_EQ: return FilterApi.eq(col, getParquetPrimitive(lit)); } case INTEGER: return pred(op, FilterApi.intColumn(path), getParquetPrimitive(lit)); case LONG: return pred(op, FilterApi.longColumn(path), getParquetPrimitive(lit)); case FLOAT: return pred(op, FilterApi.floatColumn(path), getParquetPrimitive(lit)); case DOUBLE: return pred(op, FilterApi.doubleColumn(path), getParquetPrimitive(lit)); case DATE: return pred(op, FilterApi.intColumn(path), getParquetPrimitive(lit)); case TIME: return pred(op, FilterApi.longColumn(path), getParquetPrimitive(lit)); case TIMESTAMP: return pred(op, FilterApi.longColumn(path), getParquetPrimitive(lit)); case STRING: return pred(op, FilterApi.binaryColumn(path), getParquetPrimitive(lit)); case UUID: return pred(op, FilterApi.binaryColumn(path), getParquetPrimitive(lit)); case FIXED: return pred(op, FilterApi.binaryColumn(path), getParquetPrimitive(lit)); case BINARY: return pred(op, FilterApi.binaryColumn(path), getParquetPrimitive(lit)); case DECIMAL: return pred(op, FilterApi.binaryColumn(path), getParquetPrimitive(lit)); } throw new UnsupportedOperationException("Cannot convert to Parquet filter: " + pred); } protected Expression bind(UnboundPredicate<?> pred) { return pred.bind(schema.asStruct()); } @Override @SuppressWarnings("unchecked") public <T> FilterPredicate predicate(UnboundPredicate<T> pred) { Expression bound = bind(pred); if (bound instanceof BoundPredicate) { return predicate((BoundPredicate<?>) bound); } else if (bound == Expressions.alwaysTrue()) { return AlwaysTrue.INSTANCE; } else if (bound == Expressions.alwaysFalse()) { return AlwaysFalse.INSTANCE; } throw new UnsupportedOperationException("Cannot convert to Parquet filter: " + pred); } } private static class ConvertColumnFilterToParquet extends ConvertFilterToParquet { private final Types.StructType partitionStruct; private ConvertColumnFilterToParquet(Schema schema, String column) { super(schema); this.partitionStruct = schema.findField(column).type().asNestedType().asStructType(); } protected Expression bind(UnboundPredicate<?> pred) { // instead of binding the predicate using the top-level schema, bind it to the partition data return pred.bind(partitionStruct); } } private static <C extends Comparable<C>, COL extends Operators.Column<C> & Operators.SupportsLtGt> FilterPredicate pred(Operation op, COL col, C value) { switch (op) { case IS_NULL: return FilterApi.eq(col, null); case NOT_NULL: return FilterApi.notEq(col, null); case EQ: return FilterApi.eq(col, value); case NOT_EQ: return FilterApi.notEq(col, value); case GT: return FilterApi.gt(col, value); case GT_EQ: return FilterApi.gtEq(col, value); case LT: return FilterApi.lt(col, value); case LT_EQ: return FilterApi.ltEq(col, value); default: throw new UnsupportedOperationException("Unsupported predicate operation: " + op); } } @SuppressWarnings("unchecked") private static <C extends Comparable<C>> C getParquetPrimitive(Literal<?> lit) { if (lit == null) { return null; } // TODO: this needs to convert to handle BigDecimal and UUID Object value = lit.value(); if (value instanceof Number) { return (C) lit.value(); } else if (value instanceof CharSequence) { return (C) Binary.fromString(value.toString()); } else if (value instanceof ByteBuffer) { return (C) Binary.fromReusedByteBuffer((ByteBuffer) value); } throw new UnsupportedOperationException( "Type not supported yet: " + value.getClass().getName()); } private static class AlwaysTrue implements FilterPredicate { static final AlwaysTrue INSTANCE = new AlwaysTrue(); @Override public <R> R accept(Visitor<R> visitor) { throw new UnsupportedOperationException("AlwaysTrue is a placeholder only"); } } private static class AlwaysFalse implements FilterPredicate { static final AlwaysFalse INSTANCE = new AlwaysFalse(); @Override public <R> R accept(Visitor<R> visitor) { throw new UnsupportedOperationException("AlwaysTrue is a placeholder only"); } } }
2,179
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/Parquet.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import com.netflix.iceberg.SchemaParser; import com.netflix.iceberg.Table; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.hadoop.HadoopInputFile; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import org.apache.hadoop.conf.Configuration; import org.apache.parquet.HadoopReadOptions; import org.apache.parquet.ParquetReadOptions; import org.apache.parquet.avro.AvroReadSupport; import org.apache.parquet.avro.AvroWriteSupport; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.ParquetFileWriter; import org.apache.parquet.hadoop.ParquetReader; import org.apache.parquet.hadoop.ParquetWriter; import org.apache.parquet.hadoop.api.ReadSupport; import org.apache.parquet.hadoop.api.WriteSupport; import org.apache.parquet.hadoop.metadata.CompressionCodecName; import org.apache.parquet.schema.MessageType; import java.io.IOException; import java.util.Locale; import java.util.Map; import java.util.function.Function; import static com.netflix.iceberg.TableProperties.PARQUET_COMPRESSION; import static com.netflix.iceberg.TableProperties.PARQUET_COMPRESSION_DEFAULT; import static com.netflix.iceberg.TableProperties.PARQUET_DICT_SIZE_BYTES; import static com.netflix.iceberg.TableProperties.PARQUET_DICT_SIZE_BYTES_DEFAULT; import static com.netflix.iceberg.TableProperties.PARQUET_PAGE_SIZE_BYTES; import static com.netflix.iceberg.TableProperties.PARQUET_PAGE_SIZE_BYTES_DEFAULT; import static com.netflix.iceberg.TableProperties.PARQUET_ROW_GROUP_SIZE_BYTES; import static com.netflix.iceberg.TableProperties.PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT; public class Parquet { private Parquet() { } public static WriteBuilder write(OutputFile file) { return new WriteBuilder(file); } public static class WriteBuilder { private final OutputFile file; private Schema schema = null; private String name = "table"; private WriteSupport<?> writeSupport = null; private Map<String, String> metadata = Maps.newLinkedHashMap(); private Map<String, String> config = Maps.newLinkedHashMap(); private Function<MessageType, ParquetValueWriter<?>> createWriterFunc = null; private WriteBuilder(OutputFile file) { this.file = file; } public WriteBuilder forTable(Table table) { schema(table.schema()); setAll(table.properties()); return this; } public WriteBuilder schema(Schema schema) { this.schema = schema; return this; } public WriteBuilder named(String name) { this.name = name; return this; } public WriteBuilder writeSupport(WriteSupport<?> writeSupport) { this.writeSupport = writeSupport; return this; } public WriteBuilder set(String property, String value) { config.put(property, value); return this; } public WriteBuilder setAll(Map<String, String> properties) { config.putAll(properties); return this; } public WriteBuilder meta(String property, String value) { metadata.put(property, value); return this; } public WriteBuilder createWriterFunc( Function<MessageType, ParquetValueWriter<?>> createWriterFunc) { this.createWriterFunc = createWriterFunc; return this; } @SuppressWarnings("unchecked") private <T> WriteSupport<T> getWriteSupport(MessageType type) { if (writeSupport != null) { return (WriteSupport<T>) writeSupport; } else { return new AvroWriteSupport<>( type, ParquetAvro.parquetAvroSchema(AvroSchemaUtil.convert(schema, name)), ParquetAvro.DEFAULT_MODEL); } } private CompressionCodecName codec() { String codec = config.getOrDefault(PARQUET_COMPRESSION, PARQUET_COMPRESSION_DEFAULT); try { return CompressionCodecName.valueOf(codec.toUpperCase(Locale.ENGLISH)); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Unsupported compression codec: " + codec); } } void forwardConfig(String parquetProperty, String icebergProperty, String defaultValue) { String value = config.getOrDefault(icebergProperty, defaultValue); if (value != null) { set(parquetProperty, value); } } public <D> FileAppender<D> build() throws IOException { Preconditions.checkNotNull(schema, "Schema is required"); Preconditions.checkNotNull(name, "Table name is required and cannot be null"); // add the Iceberg schema to keyValueMetadata meta("iceberg.schema", SchemaParser.toJson(schema)); // add Parquet configuration forwardConfig("parquet.block.size", PARQUET_ROW_GROUP_SIZE_BYTES, PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT); forwardConfig("parquet.page.size", PARQUET_PAGE_SIZE_BYTES, PARQUET_PAGE_SIZE_BYTES_DEFAULT); forwardConfig("parquet.dictionary.page.size", PARQUET_DICT_SIZE_BYTES, PARQUET_DICT_SIZE_BYTES_DEFAULT); set("parquet.avro.write-old-list-structure", "false"); MessageType type = ParquetSchemaUtil.convert(schema, name); if (createWriterFunc != null) { Preconditions.checkArgument(writeSupport == null, "Cannot write with both write support and Parquet value writer"); Configuration conf; if (file instanceof HadoopInputFile) { conf = ((HadoopInputFile) file).getConf(); } else { conf = new Configuration(); } for (Map.Entry<String, String> entry : config.entrySet()) { conf.set(entry.getKey(), entry.getValue()); } long rowGroupSize = Long.parseLong(config.getOrDefault( PARQUET_ROW_GROUP_SIZE_BYTES, PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT)); return new com.netflix.iceberg.parquet.ParquetWriter<>( conf, file, schema, rowGroupSize, metadata, createWriterFunc, codec()); } else { return new ParquetWriteAdapter<>(new ParquetWriteBuilder<D>(ParquetIO.file(file)) .setType(type) .setConfig(config) .setKeyValueMetadata(metadata) .setWriteSupport(getWriteSupport(type)) .withCompressionCodec(codec()) .withWriteMode(ParquetFileWriter.Mode.OVERWRITE) // TODO: support modes .build()); } } } private static class ParquetWriteBuilder<T> extends ParquetWriter.Builder<T, ParquetWriteBuilder<T>> { private Map<String, String> keyValueMetadata = Maps.newHashMap(); private Map<String, String> config = Maps.newHashMap(); private MessageType type; private WriteSupport<T> writeSupport; private ParquetWriteBuilder(org.apache.parquet.io.OutputFile path) { super(path); } @Override protected ParquetWriteBuilder<T> self() { return this; } public ParquetWriteBuilder<T> setKeyValueMetadata(Map<String, String> keyValueMetadata) { this.keyValueMetadata = keyValueMetadata; return self(); } public ParquetWriteBuilder<T> setConfig(Map<String, String> config) { this.config = config; return self(); } public ParquetWriteBuilder<T> setType(MessageType type) { this.type = type; return self(); } public ParquetWriteBuilder<T> setWriteSupport(WriteSupport<T> writeSupport) { this.writeSupport = writeSupport; return self(); } @Override protected WriteSupport<T> getWriteSupport(Configuration configuration) { for (Map.Entry<String, String> entry : config.entrySet()) { configuration.set(entry.getKey(), entry.getValue()); } return new ParquetWriteSupport<>(type, keyValueMetadata, writeSupport); } } public static ReadBuilder read(InputFile file) { return new ReadBuilder(file); } public static class ReadBuilder { private final InputFile file; private Long start = null; private Long length = null; private Schema schema = null; private Expression filter = null; private ReadSupport<?> readSupport = null; private Function<MessageType, ParquetValueReader<?>> readerFunc = null; private boolean filterRecords = true; private Map<String, String> properties = Maps.newHashMap(); private boolean callInit = false; private boolean reuseContainers = false; private ReadBuilder(InputFile file) { this.file = file; } /** * Restricts the read to the given range: [start, start + length). * * @param start the start position for this read * @param length the length of the range this read should scan * @return this builder for method chaining */ public ReadBuilder split(long start, long length) { this.start = start; this.length = length; return this; } public ReadBuilder project(Schema schema) { this.schema = schema; return this; } public ReadBuilder filterRecords(boolean filterRecords) { this.filterRecords = filterRecords; return this; } public ReadBuilder filter(Expression filter) { this.filter = filter; return this; } public ReadBuilder readSupport(ReadSupport<?> readSupport) { this.readSupport = readSupport; return this; } public ReadBuilder createReaderFunc(Function<MessageType, ParquetValueReader<?>> readerFunc) { this.readerFunc = readerFunc; return this; } public ReadBuilder set(String key, String value) { properties.put(key, value); return this; } public ReadBuilder callInit() { this.callInit = true; return this; } public ReadBuilder reuseContainers() { this.reuseContainers = true; return this; } @SuppressWarnings("unchecked") public <D> CloseableIterable<D> build() { if (readerFunc != null) { ParquetReadOptions.Builder optionsBuilder; if (file instanceof HadoopInputFile) { optionsBuilder = HadoopReadOptions.builder(((HadoopInputFile) file).getConf()); } else { optionsBuilder = ParquetReadOptions.builder(); } for (Map.Entry<String, String> entry : properties.entrySet()) { optionsBuilder.set(entry.getKey(), entry.getValue()); } if (start != null) { optionsBuilder.withRange(start, start + length); } ParquetReadOptions options = optionsBuilder.build(); return new com.netflix.iceberg.parquet.ParquetReader<>( file, schema, options, readerFunc, filter, reuseContainers); } ParquetReadBuilder<D> builder = new ParquetReadBuilder<>(ParquetIO.file(file)); builder.project(schema); if (readSupport != null) { builder.readSupport((ReadSupport<D>) readSupport); } else { builder.readSupport(new AvroReadSupport<>(ParquetAvro.DEFAULT_MODEL)); } // default options for readers builder.set("parquet.strict.typing", "false") // allow type promotion .set("parquet.avro.compatible", "false") // use the new RecordReader with Utf8 support .set("parquet.avro.add-list-element-records", "false"); // assume that lists use a 3-level schema for (Map.Entry<String, String> entry : properties.entrySet()) { builder.set(entry.getKey(), entry.getValue()); } if (filter != null) { // TODO: should not need to get the schema to push down before opening the file. // Parquet should allow setting a filter inside its read support MessageType type; try (ParquetFileReader schemaReader = ParquetFileReader.open(ParquetIO.file(file))) { type = schemaReader.getFileMetaData().getSchema(); } catch (IOException e) { throw new RuntimeIOException(e); } Schema fileSchema = ParquetSchemaUtil.convert(type); builder.useStatsFilter() .useDictionaryFilter() .useRecordFilter(filterRecords) .withFilter(ParquetFilters.convert(fileSchema, filter)); } else { // turn off filtering builder.useStatsFilter(false) .useDictionaryFilter(false) .useRecordFilter(false); } if (callInit) { builder.callInit(); } if (start != null) { builder.withFileRange(start, start + length); } return new ParquetIterable<>(builder); } } private static class ParquetReadBuilder<T> extends ParquetReader.Builder<T> { private Schema schema = null; private ReadSupport<T> readSupport = null; private boolean callInit = false; private ParquetReadBuilder(org.apache.parquet.io.InputFile file) { super(file); } public ParquetReadBuilder<T> project(Schema schema) { this.schema = schema; return this; } public ParquetReadBuilder<T> readSupport(ReadSupport<T> readSupport) { this.readSupport = readSupport; return this; } public ParquetReadBuilder<T> callInit() { this.callInit = true; return this; } @Override protected ReadSupport<T> getReadSupport() { return new ParquetReadSupport<>(schema, readSupport, callInit); } } }
2,180
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetDictionaryRowGroupFilter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import avro.shaded.com.google.common.collect.Sets; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.expressions.Binder; import com.netflix.iceberg.expressions.BoundReference; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.ExpressionVisitors; import com.netflix.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.Dictionary; import org.apache.parquet.column.Encoding; import org.apache.parquet.column.EncodingStats; import org.apache.parquet.column.page.DictionaryPage; import org.apache.parquet.column.page.DictionaryPageReadStore; import org.apache.parquet.hadoop.metadata.BlockMetaData; import org.apache.parquet.hadoop.metadata.ColumnChunkMetaData; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import java.io.IOException; import java.util.Comparator; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.function.Function; import static com.netflix.iceberg.expressions.Expressions.rewriteNot; import static com.netflix.iceberg.parquet.ParquetConversions.converterFromParquet; public class ParquetDictionaryRowGroupFilter { private final Schema schema; private final StructType struct; private final Expression expr; private transient ThreadLocal<EvalVisitor> visitors = null; private EvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(EvalVisitor::new); } return visitors.get(); } public ParquetDictionaryRowGroupFilter(Schema schema, Expression unbound) { this.schema = schema; this.struct = schema.asStruct(); this.expr = Binder.bind(struct, rewriteNot(unbound)); } /** * Test whether the dictionaries for a row group may contain records that match the expression. * * @param fileSchema schema for the Parquet file * @param dictionaries a dictionary page read store * @return false if the file cannot contain rows that match the expression, true otherwise. */ public boolean shouldRead(MessageType fileSchema, BlockMetaData rowGroup, DictionaryPageReadStore dictionaries) { return visitor().eval(fileSchema, rowGroup, dictionaries); } private static final boolean ROWS_MIGHT_MATCH = true; private static final boolean ROWS_CANNOT_MATCH = false; private class EvalVisitor extends BoundExpressionVisitor<Boolean> { private DictionaryPageReadStore dictionaries = null; private Map<Integer, Set<?>> dictCache = null; private Map<Integer, Boolean> isFallback = null; private Map<Integer, ColumnDescriptor> cols = null; private Map<Integer, Function<Object, Object>> conversions = null; private boolean eval(MessageType fileSchema, BlockMetaData rowGroup, DictionaryPageReadStore dictionaries) { this.dictionaries = dictionaries; this.dictCache = Maps.newHashMap(); this.isFallback = Maps.newHashMap(); this.cols = Maps.newHashMap(); this.conversions = Maps.newHashMap(); for (ColumnDescriptor desc : fileSchema.getColumns()) { PrimitiveType colType = fileSchema.getType(desc.getPath()).asPrimitiveType(); if (colType.getId() != null) { int id = colType.getId().intValue(); cols.put(id, desc); conversions.put(id, converterFromParquet(colType)); } } for (ColumnChunkMetaData meta : rowGroup.getColumns()) { PrimitiveType colType = fileSchema.getType(meta.getPath().toArray()).asPrimitiveType(); if (colType.getId() != null) { int id = colType.getId().intValue(); isFallback.put(id, hasNonDictionaryPages(meta)); } } return ExpressionVisitors.visit(expr, this); } @Override public Boolean alwaysTrue() { return ROWS_MIGHT_MATCH; // all rows match } @Override public Boolean alwaysFalse() { return ROWS_CANNOT_MATCH; // all rows fail } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { // dictionaries only contain non-nulls and cannot eliminate based on isNull or NotNull return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notNull(BoundReference<T> ref) { // dictionaries only contain non-nulls and cannot eliminate based on isNull or NotNull return ROWS_MIGHT_MATCH; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Boolean hasNonDictPage = isFallback.get(id); if (hasNonDictPage == null || hasNonDictPage) { return ROWS_MIGHT_MATCH; } Set<T> dictionary = dict(id, lit.comparator()); // if any item in the dictionary matches the predicate, then at least one row does for (T item : dictionary) { int cmp = lit.comparator().compare(item, lit.value()); if (cmp < 0) { return ROWS_MIGHT_MATCH; } } return ROWS_CANNOT_MATCH; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Boolean hasNonDictPage = isFallback.get(id); if (hasNonDictPage == null || hasNonDictPage) { return ROWS_MIGHT_MATCH; } Set<T> dictionary = dict(id, lit.comparator()); // if any item in the dictionary matches the predicate, then at least one row does for (T item : dictionary) { int cmp = lit.comparator().compare(item, lit.value()); if (cmp <= 0) { return ROWS_MIGHT_MATCH; } } return ROWS_CANNOT_MATCH; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Boolean hasNonDictPage = isFallback.get(id); if (hasNonDictPage == null || hasNonDictPage) { return ROWS_MIGHT_MATCH; } Set<T> dictionary = dict(id, lit.comparator()); // if any item in the dictionary matches the predicate, then at least one row does for (T item : dictionary) { int cmp = lit.comparator().compare(item, lit.value()); if (cmp > 0) { return ROWS_MIGHT_MATCH; } } return ROWS_CANNOT_MATCH; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Boolean hasNonDictPage = isFallback.get(id); if (hasNonDictPage == null || hasNonDictPage) { return ROWS_MIGHT_MATCH; } Set<T> dictionary = dict(id, lit.comparator()); // if any item in the dictionary matches the predicate, then at least one row does for (T item : dictionary) { int cmp = lit.comparator().compare(item, lit.value()); if (cmp >= 0) { return ROWS_MIGHT_MATCH; } } return ROWS_CANNOT_MATCH; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Boolean hasNonDictPage = isFallback.get(id); if (hasNonDictPage == null || hasNonDictPage) { return ROWS_MIGHT_MATCH; } Set<T> dictionary = dict(id, lit.comparator()); return dictionary.contains(lit.value()) ? ROWS_MIGHT_MATCH : ROWS_CANNOT_MATCH; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); Boolean hasNonDictPage = isFallback.get(id); if (hasNonDictPage == null || hasNonDictPage) { return ROWS_MIGHT_MATCH; } Set<T> dictionary = dict(id, lit.comparator()); if (dictionary.size() > 1) { return ROWS_MIGHT_MATCH; } return dictionary.contains(lit.value()) ? ROWS_CANNOT_MATCH : ROWS_MIGHT_MATCH; } @Override public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } @Override public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_MATCH; } @SuppressWarnings("unchecked") private <T> Set<T> dict(int id, Comparator<T> comparator) { Set<?> cached = dictCache.get(id); if (cached != null) { return (Set<T>) cached; } ColumnDescriptor col = cols.get(id); DictionaryPage page = dictionaries.readDictionaryPage(col); // may not be dictionary-encoded if (page == null) { return null; } Function<Object, Object> conversion = conversions.get(id); Dictionary dict; try { dict = page.getEncoding().initDictionary(col, page); } catch (IOException e) { throw new RuntimeIOException("Failed to create reader for dictionary page"); } Set<T> dictSet = Sets.newTreeSet(comparator);; for (int i=0; i<=dict.getMaxId(); i++) { switch (col.getType()) { case BINARY: dictSet.add((T) conversion.apply(dict.decodeToBinary(i))); break; case INT32: dictSet.add((T) conversion.apply(dict.decodeToInt(i))); break; case INT64: dictSet.add((T) conversion.apply(dict.decodeToLong(i))); break; case FLOAT: dictSet.add((T) conversion.apply(dict.decodeToFloat(i))); break; case DOUBLE: dictSet.add((T) conversion.apply(dict.decodeToDouble(i))); break; default: throw new IllegalArgumentException( "Cannot decode dictionary of type: " + col.getType()); } } dictCache.put(id, dictSet); return dictSet; } } @SuppressWarnings("deprecation") private static boolean hasNonDictionaryPages(ColumnChunkMetaData meta) { EncodingStats stats = meta.getEncodingStats(); if (stats != null) { return stats.hasNonDictionaryEncodedPages(); } // without EncodingStats, fall back to testing the encoding list Set<Encoding> encodings = new HashSet<Encoding>(meta.getEncodings()); if (encodings.remove(Encoding.PLAIN_DICTIONARY)) { // if remove returned true, PLAIN_DICTIONARY was present, which means at // least one page was dictionary encoded and 1.0 encodings are used // RLE and BIT_PACKED are only used for repetition or definition levels encodings.remove(Encoding.RLE); encodings.remove(Encoding.BIT_PACKED); if (encodings.isEmpty()) { return false; // no encodings other than dictionary or rep/def levels } return true; } else { // if PLAIN_DICTIONARY wasn't present, then either the column is not // dictionary-encoded, or the 2.0 encoding, RLE_DICTIONARY, was used. // for 2.0, this cannot determine whether a page fell back without // page encoding stats return true; } } }
2,181
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/MessageTypeToType.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Joiner; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.apache.parquet.Preconditions; import org.apache.parquet.schema.DecimalMetadata; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.OriginalType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type.Repetition; import java.util.List; import java.util.Map; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; class MessageTypeToType extends ParquetTypeVisitor<Type> { private static final Joiner DOT = Joiner.on("."); private final Map<String, Integer> aliasToId = Maps.newHashMap(); private final GroupType root; private int nextId = 1; public MessageTypeToType(GroupType root) { this.root = root; this.nextId = 1_000; // use ids that won't match other than for root } public Map<String, Integer> getAliases() { return aliasToId; } @Override public Type message(MessageType message, List<Type> fields) { return struct(message, fields); } @Override public Type struct(GroupType struct, List<Type> fieldTypes) { if (struct == root) { nextId = 1; // use the reserved IDs for the root struct } List<org.apache.parquet.schema.Type> parquetFields = struct.getFields(); List<Types.NestedField> fields = Lists.newArrayListWithExpectedSize(fieldTypes.size()); for (int i = 0; i < parquetFields.size(); i += 1) { org.apache.parquet.schema.Type field = parquetFields.get(i); Preconditions.checkArgument( !field.isRepetition(Repetition.REPEATED), "Fields cannot have repetition REPEATED: {}", field); int fieldId = getId(field); addAlias(field.getName(), fieldId); if (parquetFields.get(i).isRepetition(Repetition.OPTIONAL)) { fields.add(optional(fieldId, field.getName(), fieldTypes.get(i))); } else { fields.add(required(fieldId, field.getName(), fieldTypes.get(i))); } } return Types.StructType.of(fields); } @Override public Type list(GroupType array, Type elementType) { GroupType repeated = array.getType(0).asGroupType(); org.apache.parquet.schema.Type element = repeated.getType(0); Preconditions.checkArgument( !element.isRepetition(Repetition.REPEATED), "Elements cannot have repetition REPEATED: {}", element); int elementFieldId = getId(element); addAlias(element.getName(), elementFieldId); if (element.isRepetition(Repetition.OPTIONAL)) { return Types.ListType.ofOptional(elementFieldId, elementType); } else { return Types.ListType.ofRequired(elementFieldId, elementType); } } @Override public Type map(GroupType map, Type keyType, Type valueType) { GroupType keyValue = map.getType(0).asGroupType(); org.apache.parquet.schema.Type key = keyValue.getType(0); org.apache.parquet.schema.Type value = keyValue.getType(1); Preconditions.checkArgument( !value.isRepetition(Repetition.REPEATED), "Values cannot have repetition REPEATED: {}", value); int keyFieldId = getId(key); int valueFieldId = getId(value); addAlias(key.getName(), keyFieldId); addAlias(value.getName(), valueFieldId); if (value.isRepetition(Repetition.OPTIONAL)) { return Types.MapType.ofOptional(keyFieldId, valueFieldId, keyType, valueType); } else { return Types.MapType.ofRequired(keyFieldId, valueFieldId, keyType, valueType); } } @Override public Type primitive(PrimitiveType primitive) { OriginalType annotation = primitive.getOriginalType(); if (annotation != null) { switch (annotation) { case INT_8: case UINT_8: case INT_16: case UINT_16: case INT_32: return Types.IntegerType.get(); case INT_64: return Types.LongType.get(); case DATE: return Types.DateType.get(); case TIME_MILLIS: case TIME_MICROS: return Types.TimeType.get(); case TIMESTAMP_MILLIS: case TIMESTAMP_MICROS: return Types.TimestampType.withZone(); case JSON: case BSON: case ENUM: case UTF8: return Types.StringType.get(); case DECIMAL: DecimalMetadata decimal = primitive.getDecimalMetadata(); return Types.DecimalType.of( decimal.getPrecision(), decimal.getScale()); default: throw new UnsupportedOperationException("Unsupported logical type: " + annotation); } } switch (primitive.getPrimitiveTypeName()) { case BOOLEAN: return Types.BooleanType.get(); case INT32: return Types.IntegerType.get(); case INT64: return Types.LongType.get(); case FLOAT: return Types.FloatType.get(); case DOUBLE: return Types.DoubleType.get(); case FIXED_LEN_BYTE_ARRAY: return Types.FixedType.ofLength(primitive.getTypeLength()); case BINARY: return Types.BinaryType.get(); } throw new UnsupportedOperationException( "Cannot convert unknown primitive type: " + primitive); } private void addAlias(int fieldId) { if (!fieldNames.isEmpty()) { String fullName = DOT.join(fieldNames.descendingIterator()); aliasToId.put(fullName, fieldId); } } private void addAlias(String name, int fieldId) { String fullName = name; if (!fieldNames.isEmpty()) { fullName = DOT.join(DOT.join(fieldNames.descendingIterator()), name); } aliasToId.put(fullName, fieldId); } protected int nextId() { int current = nextId; nextId += 1; return current; } private int getId(org.apache.parquet.schema.Type type) { org.apache.parquet.schema.Type.ID id = type.getId(); if (id != null) { return id.intValue(); } else { return nextId(); } } }
2,182
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetConversions.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.apache.commons.io.Charsets; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.PrimitiveType; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.UUID; import java.util.function.Function; class ParquetConversions { private ParquetConversions() { } static <T> Literal<T> fromParquetPrimitive(Type type, Object value) { if (value instanceof Boolean) { return Literal.of((Boolean) value).to(type); } else if (value instanceof Integer) { return Literal.of((Integer) value).to(type); } else if (value instanceof Long) { return Literal.of((Long) value).to(type); } else if (value instanceof Float) { return Literal.of((Float) value).to(type); } else if (value instanceof Double) { return Literal.of((Double) value).to(type); } else if (value instanceof Binary) { switch (type.typeId()) { case STRING: return Literal.of(Charsets.UTF_8.decode(((Binary) value).toByteBuffer())).to(type); case UUID: ByteBuffer buffer = ((Binary) value).toByteBuffer().order(ByteOrder.BIG_ENDIAN); long mostSigBits = buffer.getLong(); long leastSigBits = buffer.getLong(); return Literal.of(new UUID(mostSigBits, leastSigBits)).to(type); case FIXED: case BINARY: return Literal.of(((Binary) value).toByteBuffer()).to(type); case DECIMAL: Types.DecimalType decimal = (Types.DecimalType) type; return Literal.of( new BigDecimal(new BigInteger(((Binary) value).getBytes()), decimal.scale()) ).to(type); default: throw new IllegalArgumentException("Unsupported primitive type: " + type); } } else { throw new IllegalArgumentException("Unsupported primitive value: " + value); } } static Function<Object, Object> converterFromParquet(PrimitiveType type) { if (type.getOriginalType() != null) { switch (type.getOriginalType()) { case UTF8: // decode to CharSequence to avoid copying into a new String return binary -> Charsets.UTF_8.decode(((Binary) binary).toByteBuffer()); case DECIMAL: int scale = type.getDecimalMetadata().getScale(); switch (type.getPrimitiveTypeName()) { case INT32: case INT64: return num -> BigDecimal.valueOf(((Number) num).longValue(), scale); case FIXED_LEN_BYTE_ARRAY: case BINARY: return bin -> new BigDecimal(new BigInteger(((Binary) bin).getBytes()), scale); default: throw new IllegalArgumentException( "Unsupported primitive type for decimal: " + type.getPrimitiveTypeName()); } default: } } switch (type.getPrimitiveTypeName()) { case FIXED_LEN_BYTE_ARRAY: case BINARY: return binary -> ByteBuffer.wrap(((Binary) binary).getBytes()); default: } return obj -> obj; } }
2,183
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetAvroValueReaders.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.parquet.ParquetValueReaders.BytesReader; import com.netflix.iceberg.parquet.ParquetValueReaders.FloatAsDoubleReader; import com.netflix.iceberg.parquet.ParquetValueReaders.IntAsLongReader; import com.netflix.iceberg.parquet.ParquetValueReaders.IntegerAsDecimalReader; import com.netflix.iceberg.parquet.ParquetValueReaders.ListReader; import com.netflix.iceberg.parquet.ParquetValueReaders.LongAsDecimalReader; import com.netflix.iceberg.parquet.ParquetValueReaders.MapReader; import com.netflix.iceberg.parquet.ParquetValueReaders.StructReader; import com.netflix.iceberg.parquet.ParquetValueReaders.UnboxedReader; import com.netflix.iceberg.types.Type.TypeID; import com.netflix.iceberg.types.Types; import org.apache.avro.Schema; import org.apache.avro.generic.GenericData.Fixed; import org.apache.avro.generic.GenericData.Record; import org.apache.avro.util.Utf8; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.schema.DecimalMetadata; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.UUID; import static com.netflix.iceberg.parquet.ParquetValueReaders.option; public class ParquetAvroValueReaders { private ParquetAvroValueReaders() { } @SuppressWarnings("unchecked") public static ParquetValueReader<Record> buildReader(com.netflix.iceberg.Schema expectedSchema, MessageType fileSchema) { return (ParquetValueReader<Record>) TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema, new ReadBuilder(expectedSchema, fileSchema)); } private static class ReadBuilder extends TypeWithSchemaVisitor<ParquetValueReader<?>> { private final com.netflix.iceberg.Schema schema; private final Map<com.netflix.iceberg.types.Type, Schema> avroSchemas; private final MessageType type; ReadBuilder(com.netflix.iceberg.Schema schema, MessageType type) { this.schema = schema; this.avroSchemas = AvroSchemaUtil.convertTypes(schema.asStruct(), type.getName()); this.type = type; } @Override public ParquetValueReader<?> message(Types.StructType expected, MessageType message, List<ParquetValueReader<?>> fieldReaders) { return struct(expected, message.asGroupType(), fieldReaders); } @Override public ParquetValueReader<?> struct(Types.StructType expected, GroupType struct, List<ParquetValueReader<?>> fieldReaders) { Schema avroSchema = avroSchemas.get(expected); // match the expected struct's order Map<Integer, ParquetValueReader<?>> readersById = Maps.newHashMap(); Map<Integer, Type> typesById = Maps.newHashMap(); List<Type> fields = struct.getFields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i); int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName()))-1; int id = fieldType.getId().intValue(); readersById.put(id, option(fieldType, fieldD, fieldReaders.get(i))); typesById.put(id, fieldType); } List<Types.NestedField> expectedFields = expected != null ? expected.fields() : ImmutableList.of(); List<ParquetValueReader<?>> reorderedFields = Lists.newArrayListWithExpectedSize( expectedFields.size()); List<Type> types = Lists.newArrayListWithExpectedSize(expectedFields.size()); for (Types.NestedField field : expectedFields) { int id = field.fieldId(); ParquetValueReader<?> reader = readersById.get(id); if (reader != null) { reorderedFields.add(reader); types.add(typesById.get(id)); } else { reorderedFields.add(ParquetValueReaders.nulls()); types.add(null); } } return new RecordReader(types, reorderedFields, avroSchema); } @Override public ParquetValueReader<?> list(Types.ListType expectedList, GroupType array, ParquetValueReader<?> elementReader) { GroupType repeated = array.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath)-1; int repeatedR = type.getMaxRepetitionLevel(repeatedPath)-1; Type elementType = repeated.getType(0); int elementD = type.getMaxDefinitionLevel(path(elementType.getName()))-1; return new ListReader<>(repeatedD, repeatedR, option(elementType, elementD, elementReader)); } @Override public ParquetValueReader<?> map(Types.MapType expectedMap, GroupType map, ParquetValueReader<?> keyReader, ParquetValueReader<?> valueReader) { GroupType repeatedKeyValue = map.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath)-1; int repeatedR = type.getMaxRepetitionLevel(repeatedPath)-1; Type keyType = repeatedKeyValue.getType(0); int keyD = type.getMaxDefinitionLevel(path(keyType.getName()))-1; Type valueType = repeatedKeyValue.getType(1); int valueD = type.getMaxDefinitionLevel(path(valueType.getName()))-1; return new MapReader<>(repeatedD, repeatedR, option(keyType, keyD, keyReader), option(valueType, valueD, valueReader)); } @Override public ParquetValueReader<?> primitive(com.netflix.iceberg.types.Type.PrimitiveType expected, PrimitiveType primitive) { ColumnDescriptor desc = type.getColumnDescription(currentPath()); boolean isMapKey = fieldNames.contains("key"); if (primitive.getOriginalType() != null) { switch (primitive.getOriginalType()) { case ENUM: case JSON: case UTF8: if (isMapKey) { return new StringReader(desc); } return new Utf8Reader(desc); case DATE: case INT_8: case INT_16: case INT_32: case INT_64: case TIME_MICROS: case TIMESTAMP_MICROS: return new UnboxedReader<>(desc); case TIME_MILLIS: return new TimeMillisReader(desc); case TIMESTAMP_MILLIS: return new TimestampMillisReader(desc); case DECIMAL: DecimalMetadata decimal = primitive.getDecimalMetadata(); switch (primitive.getPrimitiveTypeName()) { case BINARY: case FIXED_LEN_BYTE_ARRAY: return new DecimalReader(desc, decimal.getScale()); case INT64: return new IntegerAsDecimalReader(desc, decimal.getScale()); case INT32: return new LongAsDecimalReader(desc, decimal.getScale()); default: throw new UnsupportedOperationException( "Unsupported base type for decimal: " + primitive.getPrimitiveTypeName()); } case BSON: return new BytesReader(desc); default: throw new UnsupportedOperationException( "Unsupported logical type: " + primitive.getOriginalType()); } } switch (primitive.getPrimitiveTypeName()) { case FIXED_LEN_BYTE_ARRAY: int fieldId = primitive.getId().intValue(); Schema avroSchema = AvroSchemaUtil.convert(schema.findType(fieldId)); return new FixedReader(desc, avroSchema); case BINARY: return new BytesReader(desc); case INT32: if (expected != null && expected.typeId() == TypeID.LONG) { return new IntAsLongReader(desc); } else { return new UnboxedReader<>(desc); } case FLOAT: if (expected != null && expected.typeId() == TypeID.DOUBLE) { return new FloatAsDoubleReader(desc); } else { return new UnboxedReader<>(desc); } case BOOLEAN: case INT64: case DOUBLE: return new UnboxedReader<>(desc); default: throw new UnsupportedOperationException("Unsupported type: " + primitive); } } private String[] currentPath() { String[] path = new String[fieldNames.size()]; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } private String[] path(String name) { String[] path = new String[fieldNames.size() + 1]; path[fieldNames.size()] = name; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } } static class DecimalReader extends ParquetValueReaders.PrimitiveReader<BigDecimal> { private final int scale; DecimalReader(ColumnDescriptor desc, int scale) { super(desc); this.scale = scale; } @Override public BigDecimal read(BigDecimal ignored) { return new BigDecimal(new BigInteger(column.nextBinary().getBytesUnsafe()), scale); } } static class StringReader extends ParquetValueReaders.PrimitiveReader<String> { StringReader(ColumnDescriptor desc) { super(desc); } @Override public String read(String ignored) { return column.nextBinary().toStringUsingUTF8(); } } static class Utf8Reader extends ParquetValueReaders.PrimitiveReader<Utf8> { Utf8Reader(ColumnDescriptor desc) { super(desc); } @Override public Utf8 read(Utf8 reuse) { Utf8 utf8; if (reuse != null) { utf8 = reuse; } else { utf8 = new Utf8(); } // use a byte buffer because it never results in a copy ByteBuffer buffer = column.nextBinary().toByteBuffer(); // always copy the bytes into the Utf8. for constant binary data backed by an array starting // at 0, it is possible to wrap the bytes in a Utf8, but reusing that Utf8 could corrupt the // constant binary if its backing buffer is copied to. utf8.setByteLength(buffer.remaining()); buffer.get(utf8.getBytes(), 0, buffer.remaining()); return utf8; } } static class UUIDReader extends ParquetValueReaders.PrimitiveReader<UUID> { UUIDReader(ColumnDescriptor desc) { super(desc); } @Override public UUID read(UUID ignored) { ByteBuffer buffer = column.nextBinary().toByteBuffer(); buffer.order(ByteOrder.BIG_ENDIAN); long mostSigBits = buffer.getLong(); long leastSigBits = buffer.getLong(); return new UUID(mostSigBits, leastSigBits); } } static class FixedReader extends ParquetValueReaders.PrimitiveReader<Fixed> { private final Schema schema; FixedReader(ColumnDescriptor desc, Schema schema) { super(desc); this.schema = schema; } @Override public Fixed read(Fixed reuse) { Fixed fixed; if (reuse != null) { fixed = reuse; } else { fixed = new Fixed(schema); } column.nextBinary().toByteBuffer().get(fixed.bytes()); return fixed; } } public static class TimeMillisReader extends UnboxedReader<Long> { TimeMillisReader(ColumnDescriptor desc) { super(desc); } @Override public long readLong() { return 1000 * column.nextLong(); } } public static class TimestampMillisReader extends UnboxedReader<Long> { TimestampMillisReader(ColumnDescriptor desc) { super(desc); } @Override public long readLong() { return 1000 * column.nextLong(); } } static class RecordReader extends StructReader<Record, Record> { private final Schema schema; RecordReader(List<Type> types, List<ParquetValueReader<?>> readers, Schema schema) { super(types, readers); this.schema = schema; } @Override protected Record newStructData(Record reuse) { if (reuse != null) { return reuse; } else { return new Record(schema); } } @Override @SuppressWarnings("unchecked") protected Object getField(Record intermediate, int pos) { return intermediate.get(pos); } @Override protected Record buildStruct(Record struct) { return struct; } @Override protected void set(Record struct, int pos, Object value) { struct.put(pos, value); } } }
2,184
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetValueReaders.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.page.PageReadStore; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.Type; import java.lang.reflect.Array; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.util.Iterator; import java.util.List; import java.util.Map; import static java.util.Collections.emptyIterator; public class ParquetValueReaders { private ParquetValueReaders() { } public static <T> ParquetValueReader<T> option(Type type, int definitionLevel, ParquetValueReader<T> reader) { if (type.isRepetition(Type.Repetition.OPTIONAL)) { return new OptionReader<>(definitionLevel, reader); } return reader; } @SuppressWarnings("unchecked") public static <T> ParquetValueReader<T> nulls() { return (ParquetValueReader<T>) NullReader.INSTANCE; } private static class NullReader<T> implements ParquetValueReader<T> { private static final NullReader<Void> INSTANCE = new NullReader<>(); private static final List<TripleIterator<?>> COLUMNS = ImmutableList.of(); private static final TripleIterator<?> NULL_COLUMN = new TripleIterator<Object> () { @Override public int currentDefinitionLevel() { return 0; } @Override public int currentRepetitionLevel() { return 0; } @Override public <N> N nextNull() { return null; } @Override public boolean hasNext() { return false; } @Override public Object next() { return null; } }; private NullReader() { } @Override public T read(T reuse) { return null; } @Override public TripleIterator<?> column() { return NULL_COLUMN; } @Override public List<TripleIterator<?>> columns() { return COLUMNS; } @Override public void setPageSource(PageReadStore pageStore) { } } public abstract static class PrimitiveReader<T> implements ParquetValueReader<T> { private final ColumnDescriptor desc; protected final ColumnIterator<?> column; private final List<TripleIterator<?>> children; protected PrimitiveReader(ColumnDescriptor desc) { this.desc = desc; this.column = ColumnIterator.newIterator(desc, ""); this.children = ImmutableList.of(column); } @Override public void setPageSource(PageReadStore pageStore) { column.setPageSource(pageStore.getPageReader(desc)); } @Override public TripleIterator<?> column() { return column; } @Override public List<TripleIterator<?>> columns() { return children; } } public static class UnboxedReader<T> extends PrimitiveReader<T> { public UnboxedReader(ColumnDescriptor desc) { super(desc); } @Override @SuppressWarnings("unchecked") public T read(T ignored) { return (T) column.next(); } public boolean readBoolean() { return column.nextBoolean(); } public int readInteger() { return column.nextInteger(); } public long readLong() { return column.nextLong(); } public float readFloat() { return column.nextFloat(); } public double readDouble() { return column.nextDouble(); } public Binary readBinary() { return column.nextBinary(); } } public static class StringReader extends PrimitiveReader<String> { public StringReader(ColumnDescriptor desc) { super(desc); } @Override public String read(String reuse) { return column.nextBinary().toStringUsingUTF8(); } } public static class IntAsLongReader extends UnboxedReader<Long> { public IntAsLongReader(ColumnDescriptor desc) { super(desc); } @Override public Long read(Long ignored) { return readLong(); } @Override public long readLong() { return super.readInteger(); } } public static class FloatAsDoubleReader extends UnboxedReader<Double> { public FloatAsDoubleReader(ColumnDescriptor desc) { super(desc); } @Override public Double read(Double ignored) { return readDouble(); } @Override public double readDouble() { return super.readFloat(); } } public static class IntegerAsDecimalReader extends PrimitiveReader<BigDecimal> { private final int scale; public IntegerAsDecimalReader(ColumnDescriptor desc, int scale) { super(desc); this.scale = scale; } @Override public BigDecimal read(BigDecimal ignored) { return new BigDecimal(BigInteger.valueOf(column.nextInteger()), scale); } } public static class LongAsDecimalReader extends PrimitiveReader<BigDecimal> { private final int scale; public LongAsDecimalReader(ColumnDescriptor desc, int scale) { super(desc); this.scale = scale; } @Override public BigDecimal read(BigDecimal ignored) { return new BigDecimal(BigInteger.valueOf(column.nextLong()), scale); } } public static class BinaryAsDecimalReader extends PrimitiveReader<BigDecimal> { private int scale; public BinaryAsDecimalReader(ColumnDescriptor desc, int scale) { super(desc); this.scale = scale; } @Override public BigDecimal read(BigDecimal reuse) { byte[] bytes = column.nextBinary().getBytesUnsafe(); return new BigDecimal(new BigInteger(bytes), scale); } } public static class BytesReader extends PrimitiveReader<ByteBuffer> { public BytesReader(ColumnDescriptor desc) { super(desc); } @Override public ByteBuffer read(ByteBuffer reuse) { Binary binary = column.nextBinary(); ByteBuffer data = binary.toByteBuffer(); if (reuse != null && reuse.hasArray() && reuse.capacity() >= data.remaining()) { data.get(reuse.array(), reuse.arrayOffset(), data.remaining()); reuse.position(0); reuse.limit(data.remaining()); return reuse; } else { byte[] array = new byte[data.remaining()]; data.get(array, 0, data.remaining()); return ByteBuffer.wrap(array); } } } private static class OptionReader<T> implements ParquetValueReader<T> { private final int definitionLevel; private final ParquetValueReader<T> reader; private final TripleIterator<?> column; private final List<TripleIterator<?>> children; OptionReader(int definitionLevel, ParquetValueReader<T> reader) { this.definitionLevel = definitionLevel; this.reader = reader; this.column = reader.column(); this.children = reader.columns(); } @Override public void setPageSource(PageReadStore pageStore) { reader.setPageSource(pageStore); } @Override public TripleIterator<?> column() { return column; } @Override public T read(T reuse) { if (column.currentDefinitionLevel() > definitionLevel) { return reader.read(reuse); } for (TripleIterator<?> column : children) { column.nextNull(); } return null; } @Override public List<TripleIterator<?>> columns() { return children; } } public abstract static class RepeatedReader<T, I, E> implements ParquetValueReader<T> { private final int definitionLevel; private final int repetitionLevel; private final ParquetValueReader<E> reader; private final TripleIterator<?> column; private final List<TripleIterator<?>> children; protected RepeatedReader(int definitionLevel, int repetitionLevel, ParquetValueReader<E> reader) { this.definitionLevel = definitionLevel; this.repetitionLevel = repetitionLevel; this.reader = reader; this.column = reader.column(); this.children = reader.columns(); } @Override public void setPageSource(PageReadStore pageStore) { reader.setPageSource(pageStore); } @Override public TripleIterator<?> column() { return column; } @Override public T read(T reuse) { I intermediate = newListData(reuse); do { if (column.currentDefinitionLevel() > definitionLevel) { addElement(intermediate, reader.read(getElement(intermediate))); } else { // consume the empty list triple for (TripleIterator<?> column : children) { column.nextNull(); } // if the current definition level is equal to the definition level of this repeated type, // then the result is an empty list and the repetition level will always be <= rl. break; } } while (column.currentRepetitionLevel() > repetitionLevel); return buildList(intermediate); } @Override public List<TripleIterator<?>> columns() { return children; } protected abstract I newListData(T reuse); protected abstract E getElement(I list); protected abstract void addElement(I list, E element); protected abstract T buildList(I list); } public static class ListReader<E> extends RepeatedReader<List<E>, List<E>, E> { private List<E> lastList = null; private Iterator<E> elements = null; public ListReader(int definitionLevel, int repetitionLevel, ParquetValueReader<E> reader) { super(definitionLevel, repetitionLevel, reader); } @Override protected List<E> newListData(List<E> reuse) { List<E> list; if (lastList != null) { lastList.clear(); list = lastList; } else { list = Lists.newArrayList(); } if (reuse != null) { this.lastList = reuse; this.elements = reuse.iterator(); } else { this.lastList = null; this.elements = emptyIterator(); } return list; } @Override protected E getElement(List<E> reuse) { if (elements.hasNext()) { return elements.next(); } return null; } @Override protected void addElement(List<E> list, E element) { list.add(element); } @Override protected List<E> buildList(List<E> list) { return list; } } public abstract static class RepeatedKeyValueReader<M, I, K, V> implements ParquetValueReader<M> { private final int definitionLevel; private final int repetitionLevel; private final ParquetValueReader<K> keyReader; private final ParquetValueReader<V> valueReader; private final TripleIterator<?> column; private final List<TripleIterator<?>> children; protected RepeatedKeyValueReader(int definitionLevel, int repetitionLevel, ParquetValueReader<K> keyReader, ParquetValueReader<V> valueReader) { this.definitionLevel = definitionLevel; this.repetitionLevel = repetitionLevel; this.keyReader = keyReader; this.valueReader = valueReader; this.column = keyReader.column(); this.children = ImmutableList.<TripleIterator<?>>builder() .addAll(keyReader.columns()) .addAll(valueReader.columns()) .build(); } @Override public void setPageSource(PageReadStore pageStore) { keyReader.setPageSource(pageStore); valueReader.setPageSource(pageStore); } @Override public TripleIterator<?> column() { return column; } @Override public M read(M reuse) { I intermediate = newMapData(reuse); do { if (column.currentDefinitionLevel() > definitionLevel) { Map.Entry<K, V> pair = getPair(intermediate); addPair(intermediate, keyReader.read(pair.getKey()), valueReader.read(pair.getValue())); } else { // consume the empty map triple for (TripleIterator<?> column : children) { column.nextNull(); } // if the current definition level is equal to the definition level of this repeated type, // then the result is an empty list and the repetition level will always be <= rl. break; } } while (column.currentRepetitionLevel() > repetitionLevel); return buildMap(intermediate); } @Override public List<TripleIterator<?>> columns() { return children; } protected abstract I newMapData(M reuse); protected abstract Map.Entry<K, V> getPair(I map); protected abstract void addPair(I map, K key, V value); protected abstract M buildMap(I map); } public static class MapReader<K, V> extends RepeatedKeyValueReader<Map<K, V>, Map<K, V>, K, V> { private final ReusableEntry<K, V> nullEntry = new ReusableEntry<>(); private Map<K, V> lastMap = null; private Iterator<Map.Entry<K, V>> pairs = null; public MapReader(int definitionLevel, int repetitionLevel, ParquetValueReader<K> keyReader, ParquetValueReader<V> valueReader) { super(definitionLevel, repetitionLevel, keyReader, valueReader); } @Override protected Map<K, V> newMapData(Map<K, V> reuse) { Map<K, V> map; if (lastMap != null) { lastMap.clear(); map = lastMap; } else { map = Maps.newLinkedHashMap(); } if (reuse != null) { this.lastMap = reuse; this.pairs = reuse.entrySet().iterator(); } else { this.lastMap = null; this.pairs = emptyIterator(); } return map; } @Override protected Map.Entry<K, V> getPair(Map<K, V> map) { if (pairs.hasNext()) { return pairs.next(); } else { return nullEntry; } } @Override protected void addPair(Map<K, V> map, K key, V value) { map.put(key, value); } @Override protected Map<K, V> buildMap(Map<K, V> map) { return map; } } public static class ReusableEntry<K, V> implements Map.Entry<K, V> { private K key = null; private V value = null; public void set(K key, V value) { this.key = key; this.value = value; } @Override public K getKey() { return key; } @Override public V getValue() { return value; } @Override public V setValue(V value) { V lastValue = this.value; this.value = value; return lastValue; } } public abstract static class StructReader<T, I> implements ParquetValueReader<T> { private interface Setter<R> { void set(R record, int pos, Object reuse); } private final ParquetValueReader<?>[] readers; private final TripleIterator<?> column; private final TripleIterator<?>[] columns; private final Setter<I>[] setters; private final List<TripleIterator<?>> children; @SuppressWarnings("unchecked") protected StructReader(List<Type> types, List<ParquetValueReader<?>> readers) { this.readers = (ParquetValueReader<?>[]) Array.newInstance( ParquetValueReader.class, readers.size()); this.columns = (TripleIterator<?>[]) Array.newInstance(TripleIterator.class, readers.size()); this.setters = (Setter<I>[]) Array.newInstance(Setter.class, readers.size()); ImmutableList.Builder<TripleIterator<?>> columnsBuilder = ImmutableList.builder(); for (int i = 0; i < readers.size(); i += 1) { ParquetValueReader<?> reader = readers.get(i); this.readers[i] = readers.get(i); this.columns[i] = reader.column(); this.setters[i] = newSetter(reader, types.get(i)); columnsBuilder.addAll(reader.columns()); } this.children = columnsBuilder.build(); if (children.size() > 0) { this.column = children.get(0); } else { this.column = NullReader.NULL_COLUMN; } } @Override public final void setPageSource(PageReadStore pageStore) { for (int i = 0; i < readers.length; i += 1) { readers[i].setPageSource(pageStore); } } @Override public final TripleIterator<?> column() { return column; } @Override public final T read(T reuse) { I intermediate = newStructData(reuse); for (int i = 0; i < readers.length; i += 1) { set(intermediate, i, readers[i].read(get(intermediate, i))); //setters[i].set(intermediate, i, get(intermediate, i)); } return buildStruct(intermediate); } @Override public List<TripleIterator<?>> columns() { return children; } @SuppressWarnings("unchecked") private <E> Setter<I> newSetter(ParquetValueReader<E> reader, Type type) { if (reader instanceof UnboxedReader && type.isPrimitive()) { UnboxedReader<?> unboxed = (UnboxedReader<?>) reader; switch (type.asPrimitiveType().getPrimitiveTypeName()) { case BOOLEAN: return (record, pos, ignored) -> setBoolean(record, pos, unboxed.readBoolean()); case INT32: return (record, pos, ignored) -> setInteger(record, pos, unboxed.readInteger()); case INT64: return (record, pos, ignored) -> setLong(record, pos, unboxed.readLong()); case FLOAT: return (record, pos, ignored) -> setFloat(record, pos, unboxed.readFloat()); case DOUBLE: return (record, pos, ignored) -> setDouble(record, pos, unboxed.readDouble()); case FIXED_LEN_BYTE_ARRAY: case BINARY: return (record, pos, ignored) -> set(record, pos, unboxed.readBinary()); default: throw new UnsupportedOperationException("Unsupported type: " + type); } } // TODO: Add support for options to avoid the null check return (record, pos, reuse) -> { Object obj = reader.read((E) reuse); if (obj != null) { set(record, pos, obj); } else { setNull(record, pos); } }; } @SuppressWarnings("unchecked") private <E> E get(I intermediate, int pos) { return (E) getField(intermediate, pos); } protected abstract I newStructData(T reuse); protected abstract Object getField(I intermediate, int pos); protected abstract T buildStruct(I struct); /** * Used to set a struct value by position. * <p> * To avoid boxing, override {@link #setInteger(Object, int, int)} and similar methods. * * @param struct a struct object created by {@link #newStructData(Object)} * @param pos the position in the struct to set * @param value the value to set */ protected abstract void set(I struct, int pos, Object value); protected void setNull(I struct, int pos) { set(struct, pos, null); } protected void setBoolean(I struct, int pos, boolean value) { set(struct, pos, value); } protected void setInteger(I struct, int pos, int value) { set(struct, pos, value); } protected void setLong(I struct, int pos, long value) { set(struct, pos, value); } protected void setFloat(I struct, int pos, float value) { set(struct, pos, value); } protected void setDouble(I struct, int pos, double value) { set(struct, pos, value); } } }
2,185
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/PageIterator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.google.common.base.Preconditions; import org.apache.parquet.CorruptDeltaByteArrays; import org.apache.parquet.bytes.ByteBufferInputStream; import org.apache.parquet.bytes.BytesInput; import org.apache.parquet.bytes.BytesUtils; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.Dictionary; import org.apache.parquet.column.Encoding; import org.apache.parquet.column.page.DataPage; import org.apache.parquet.column.page.DataPageV1; import org.apache.parquet.column.page.DataPageV2; import org.apache.parquet.column.values.RequiresPreviousReader; import org.apache.parquet.column.values.ValuesReader; import org.apache.parquet.column.values.rle.RunLengthBitPackingHybridDecoder; import org.apache.parquet.io.ParquetDecodingException; import org.apache.parquet.io.api.Binary; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import static java.lang.String.format; import static org.apache.parquet.column.ValuesType.DEFINITION_LEVEL; import static org.apache.parquet.column.ValuesType.REPETITION_LEVEL; import static org.apache.parquet.column.ValuesType.VALUES; abstract class PageIterator<T> implements TripleIterator<T> { private static final Logger LOG = LoggerFactory.getLogger(PageIterator.class); @SuppressWarnings("unchecked") static <T> PageIterator<T> newIterator(ColumnDescriptor desc, String writerVersion) { switch (desc.getType()) { case BOOLEAN: return (PageIterator<T>) new PageIterator<Boolean>(desc, writerVersion) { @Override public Boolean next() { return nextBoolean(); } }; case INT32: return (PageIterator<T>) new PageIterator<Integer>(desc, writerVersion) { @Override public Integer next() { return nextInteger(); } }; case INT64: return (PageIterator<T>) new PageIterator<Long>(desc, writerVersion) { @Override public Long next() { return nextLong(); } }; case FLOAT: return (PageIterator<T>) new PageIterator<Float>(desc, writerVersion) { @Override public Float next() { return nextFloat(); } }; case DOUBLE: return (PageIterator<T>) new PageIterator<Double>(desc, writerVersion) { @Override public Double next() { return nextDouble(); } }; case FIXED_LEN_BYTE_ARRAY: case BINARY: return (PageIterator<T>) new PageIterator<Binary>(desc, writerVersion) { @Override public Binary next() { return nextBinary(); } }; default: throw new UnsupportedOperationException("Unsupported primitive type: " + desc.getType()); } } private final ColumnDescriptor desc; private final String writerVersion; // iterator state private boolean hasNext = false; private int triplesRead = 0; private int currentDL = 0; private int currentRL = 0; // page bookkeeping private Dictionary dict = null; private DataPage page = null; private int triplesCount = 0; private Encoding valueEncoding = null; private IntIterator definitionLevels = null; private IntIterator repetitionLevels = null; private ValuesReader values = null; private PageIterator(ColumnDescriptor desc, String writerVersion) { this.desc = desc; this.writerVersion = writerVersion; } public void setPage(DataPage page) { Preconditions.checkNotNull(page, "Cannot read from null page"); this.page = page; this.page.accept(new DataPage.Visitor<ValuesReader>() { @Override public ValuesReader visit(DataPageV1 dataPageV1) { initFromPage(dataPageV1); return null; } @Override public ValuesReader visit(DataPageV2 dataPageV2) { initFromPage(dataPageV2); return null; } }); this.triplesRead = 0; advance(); } public void setDictionary(Dictionary dict) { this.dict = dict; } public void reset() { this.page = null; this.triplesCount = 0; this.triplesRead = 0; this.definitionLevels = null; this.repetitionLevels = null; this.values = null; this.hasNext = false; } public int currentPageCount() { return triplesCount; } @Override public boolean hasNext() { return hasNext; } @Override public int currentDefinitionLevel() { Preconditions.checkArgument(currentDL >= 0, "Should not read definition, past page end"); return currentDL; } @Override public int currentRepetitionLevel() { // Preconditions.checkArgument(currentDL >= 0, "Should not read repetition, past page end"); return currentRL; } @Override public boolean nextBoolean() { advance(); try { return values.readBoolean(); } catch (RuntimeException e) { throw handleRuntimeException(e); } } @Override public int nextInteger() { advance(); try { return values.readInteger(); } catch (RuntimeException e) { throw handleRuntimeException(e); } } @Override public long nextLong() { advance(); try { return values.readLong(); } catch (RuntimeException e) { throw handleRuntimeException(e); } } @Override public float nextFloat() { advance(); try { return values.readFloat(); } catch (RuntimeException e) { throw handleRuntimeException(e); } } @Override public double nextDouble() { advance(); try { return values.readDouble(); } catch (RuntimeException e) { throw handleRuntimeException(e); } } @Override public Binary nextBinary() { advance(); try { return values.readBytes(); } catch (RuntimeException e) { throw handleRuntimeException(e); } } @Override public <V> V nextNull() { advance(); // values do not contain nulls return null; } private void advance() { if (triplesRead < triplesCount) { this.currentDL = definitionLevels.nextInt(); this.currentRL = repetitionLevels.nextInt(); this.triplesRead += 1; this.hasNext = true; } else { this.currentDL = -1; this.currentRL = -1; this.hasNext = false; } } RuntimeException handleRuntimeException(RuntimeException e) { if (CorruptDeltaByteArrays.requiresSequentialReads(writerVersion, valueEncoding) && e instanceof ArrayIndexOutOfBoundsException) { // this is probably PARQUET-246, which may happen if reading data with // MR because this can't be detected without reading all footers throw new ParquetDecodingException("Read failure possibly due to " + "PARQUET-246: try setting parquet.split.files to false", new ParquetDecodingException( format("Can't read value in column %s at value %d out of %d in current page. " + "repetition level: %d, definition level: %d", desc, triplesRead, triplesCount, currentRL, currentDL), e)); } throw new ParquetDecodingException( format("Can't read value in column %s at value %d out of %d in current page. " + "repetition level: %d, definition level: %d", desc, triplesRead, triplesCount, currentRL, currentDL), e); } private void initDataReader(Encoding dataEncoding, ByteBufferInputStream in, int valueCount) { ValuesReader previousReader = values; this.valueEncoding = dataEncoding; // TODO: May want to change this so that this class is not dictionary-aware. // For dictionary columns, this class could rely on wrappers to correctly handle dictionaries // This isn't currently possible because RLE must be read by getDictionaryBasedValuesReader if (dataEncoding.usesDictionary()) { if (dict == null) { throw new ParquetDecodingException( "could not read page in col " + desc + " as the dictionary was missing for encoding " + dataEncoding); } this.values = dataEncoding.getDictionaryBasedValuesReader(desc, VALUES, dict); } else { this.values = dataEncoding.getValuesReader(desc, VALUES); } // if (dataEncoding.usesDictionary() && converter.hasDictionarySupport()) { // bindToDictionary(dictionary); // } else { // bind(path.getType()); // } try { values.initFromPage(valueCount, in); } catch (IOException e) { throw new ParquetDecodingException("could not read page in col " + desc, e); } if (CorruptDeltaByteArrays.requiresSequentialReads(writerVersion, dataEncoding) && previousReader != null && previousReader instanceof RequiresPreviousReader) { // previous reader can only be set if reading sequentially ((RequiresPreviousReader) values).setPreviousReader(previousReader); } } private void initFromPage(DataPageV1 page) { this.triplesCount = page.getValueCount(); ValuesReader rlReader = page.getRlEncoding().getValuesReader(desc, REPETITION_LEVEL); ValuesReader dlReader = page.getDlEncoding().getValuesReader(desc, DEFINITION_LEVEL); this.repetitionLevels = new ValuesReaderIntIterator(rlReader); this.definitionLevels = new ValuesReaderIntIterator(dlReader); try { BytesInput bytes = page.getBytes(); LOG.debug("page size {} bytes and {} records", bytes.size(), triplesCount); LOG.debug("reading repetition levels at 0"); ByteBufferInputStream in = bytes.toInputStream(); rlReader.initFromPage(triplesCount, in); LOG.debug("reading definition levels at {}", in.position()); dlReader.initFromPage(triplesCount, in); LOG.debug("reading data at {}", in.position()); initDataReader(page.getValueEncoding(), in, page.getValueCount()); } catch (IOException e) { throw new ParquetDecodingException("could not read page " + page + " in col " + desc, e); } } private void initFromPage(DataPageV2 page) { this.triplesCount = page.getValueCount(); this.repetitionLevels = newRLEIterator(desc.getMaxRepetitionLevel(), page.getRepetitionLevels()); this.definitionLevels = newRLEIterator(desc.getMaxDefinitionLevel(), page.getDefinitionLevels()); LOG.debug("page data size {} bytes and {} records", page.getData().size(), triplesCount); try { initDataReader(page.getDataEncoding(), page.getData().toInputStream(), triplesCount); } catch (IOException e) { throw new ParquetDecodingException("could not read page " + page + " in col " + desc, e); } } private IntIterator newRLEIterator(int maxLevel, BytesInput bytes) { try { if (maxLevel == 0) { return new NullIntIterator(); } return new RLEIntIterator( new RunLengthBitPackingHybridDecoder( BytesUtils.getWidthFromMaxInt(maxLevel), bytes.toInputStream())); } catch (IOException e) { throw new ParquetDecodingException("could not read levels in page for col " + desc, e); } } static abstract class IntIterator { abstract int nextInt(); } static class ValuesReaderIntIterator extends IntIterator { ValuesReader delegate; ValuesReaderIntIterator(ValuesReader delegate) { super(); this.delegate = delegate; } @Override int nextInt() { return delegate.readInteger(); } } static class RLEIntIterator extends IntIterator { RunLengthBitPackingHybridDecoder delegate; RLEIntIterator(RunLengthBitPackingHybridDecoder delegate) { this.delegate = delegate; } @Override int nextInt() { try { return delegate.readInt(); } catch (IOException e) { throw new ParquetDecodingException(e); } } } private static final class NullIntIterator extends IntIterator { @Override int nextInt() { return 0; } } }
2,186
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetWriteAdapter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.io.FileAppender; import org.apache.parquet.hadoop.ParquetWriter; import java.io.IOException; public class ParquetWriteAdapter<D> implements FileAppender<D> { private ParquetWriter<D> writer = null; private long numRecords = 0L; public ParquetWriteAdapter(ParquetWriter<D> writer) throws IOException { this.writer = writer; } @Override public void add(D datum) { try { numRecords += 1L; writer.write(datum); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to write record %s", datum); } } @Override public Metrics metrics() { return new Metrics(numRecords, null, null, null); } @Override public void close() throws IOException { if (writer != null) { writer.close(); this.writer = null; } } }
2,187
0
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg
Create_ds/iceberg/parquet/src/main/java/com/netflix/iceberg/parquet/ParquetIO.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.parquet; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.hadoop.HadoopInputFile; import com.netflix.iceberg.hadoop.HadoopOutputFile; import com.netflix.iceberg.io.DelegatingInputStream; import com.netflix.iceberg.io.DelegatingOutputStream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.parquet.hadoop.util.HadoopStreams; import org.apache.parquet.io.InputFile; import org.apache.parquet.io.OutputFile; import org.apache.parquet.io.DelegatingSeekableInputStream; import org.apache.parquet.io.DelegatingPositionOutputStream; import org.apache.parquet.io.PositionOutputStream; import org.apache.parquet.io.SeekableInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import static org.apache.parquet.hadoop.util.HadoopOutputFile.fromPath; import static org.apache.parquet.hadoop.util.HadoopInputFile.fromStatus; /** * Methods in this class translate from the IO API to Parquet's IO API. */ class ParquetIO { private ParquetIO() { } static InputFile file(com.netflix.iceberg.io.InputFile file) { // TODO: use reflection to avoid depending on classes from iceberg-hadoop // TODO: use reflection to avoid depending on classes from hadoop if (file instanceof HadoopInputFile) { HadoopInputFile hfile = (HadoopInputFile) file; try { return fromStatus(hfile.getStat(), hfile.getConf()); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to create Parquet input file for %s", file); } } return new ParquetInputFile(file); } static OutputFile file(com.netflix.iceberg.io.OutputFile file) { if (file instanceof HadoopOutputFile) { HadoopOutputFile hfile = (HadoopOutputFile) file; try { return fromPath(hfile.getPath(), hfile.getConf()); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to create Parquet output file for %s", file); } } return new ParquetOutputFile(file); } static OutputFile file(com.netflix.iceberg.io.OutputFile file, Configuration conf) { if (file instanceof HadoopOutputFile) { HadoopOutputFile hfile = (HadoopOutputFile) file; try { return fromPath(hfile.getPath(), conf); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to create Parquet output file for %s", file); } } return new ParquetOutputFile(file); } static SeekableInputStream stream(com.netflix.iceberg.io.SeekableInputStream stream) { if (stream instanceof DelegatingInputStream) { InputStream wrapped = ((DelegatingInputStream) stream).getDelegate(); if (wrapped instanceof FSDataInputStream) { return HadoopStreams.wrap((FSDataInputStream) wrapped); } } return new ParquetInputStreamAdapter(stream); } static PositionOutputStream stream(com.netflix.iceberg.io.PositionOutputStream stream) { if (stream instanceof DelegatingOutputStream) { OutputStream wrapped = ((DelegatingOutputStream) stream).getDelegate(); if (wrapped instanceof FSDataOutputStream) { return HadoopStreams.wrap((FSDataOutputStream) wrapped); } } return new ParquetOutputStreamAdapter(stream); } private static class ParquetInputStreamAdapter extends DelegatingSeekableInputStream { private final com.netflix.iceberg.io.SeekableInputStream delegate; private ParquetInputStreamAdapter(com.netflix.iceberg.io.SeekableInputStream delegate) { super(delegate); this.delegate = delegate; } @Override public long getPos() throws IOException { return delegate.getPos(); } @Override public void seek(long newPos) throws IOException { delegate.seek(newPos); } } private static class ParquetOutputStreamAdapter extends DelegatingPositionOutputStream { private final com.netflix.iceberg.io.PositionOutputStream delegate; private ParquetOutputStreamAdapter(com.netflix.iceberg.io.PositionOutputStream delegate) { super(delegate); this.delegate = delegate; } @Override public long getPos() throws IOException { return delegate.getPos(); } } private static class ParquetOutputFile implements OutputFile { private final com.netflix.iceberg.io.OutputFile file; private ParquetOutputFile(com.netflix.iceberg.io.OutputFile file) { this.file = file; } @Override public PositionOutputStream create(long ignored) throws IOException { return stream(file.create()); } @Override public PositionOutputStream createOrOverwrite(long ignored) throws IOException { return stream(file.createOrOverwrite()); } @Override public boolean supportsBlockSize() { return false; } @Override public long defaultBlockSize() { return 0; } } private static class ParquetInputFile implements InputFile { private final com.netflix.iceberg.io.InputFile file; private ParquetInputFile(com.netflix.iceberg.io.InputFile file) { this.file = file; } @Override public long getLength() throws IOException { return file.getLength(); } @Override public SeekableInputStream newStream() throws IOException { return stream(file.newStream()); } } }
2,188
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/DataTestHelpers.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.junit.Assert; import java.util.List; import java.util.Map; public class DataTestHelpers { public static void assertEquals(Types.StructType struct, Record expected, Record actual) { List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i).type(); Object expectedValue = expected.get(i); Object actualValue = actual.get(i); assertEquals(fieldType, expectedValue, actualValue); } } public static void assertEquals(Types.ListType list, List<?> expected, List<?> actual) { Type elementType = list.elementType(); Assert.assertEquals("List size should match", expected.size(), actual.size()); for (int i = 0; i < expected.size(); i += 1) { Object expectedValue = expected.get(i); Object actualValue = actual.get(i); assertEquals(elementType, expectedValue, actualValue); } } public static void assertEquals(Types.MapType map, Map<?, ?> expected, Map<?, ?> actual) { Type valueType = map.valueType(); Assert.assertEquals("Map size should match", expected.size(), actual.size()); for (Object expectedKey : expected.keySet()) { Object expectedValue = expected.get(expectedKey); Object actualValue = actual.get(expectedKey); assertEquals(valueType, expectedValue, actualValue); } } private static void assertEquals(Type type, Object expected, Object actual) { if (expected == null && actual == null) { return; } switch (type.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case STRING: case DATE: case TIME: case TIMESTAMP: case UUID: case BINARY: case DECIMAL: Assert.assertEquals("Primitive value should be equal to expected", expected, actual); break; case FIXED: Assert.assertTrue("Expected should be a byte[]", expected instanceof byte[]); Assert.assertTrue("Actual should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Array contents should be equal", (byte[]) expected, (byte[]) actual); break; case STRUCT: Assert.assertTrue("Expected should be a Record", expected instanceof Record); Assert.assertTrue("Actual should be a Record", actual instanceof Record); assertEquals(type.asStructType(), (Record) expected, (Record) actual); break; case LIST: Assert.assertTrue("Expected should be a List", expected instanceof List); Assert.assertTrue("Actual should be a List", actual instanceof List); assertEquals(type.asListType(), (List) expected, (List) actual); break; case MAP: Assert.assertTrue("Expected should be a Map", expected instanceof Map); Assert.assertTrue("Actual should be a Map", actual instanceof Map); assertEquals(type.asMapType(), (Map<?, ?>) expected, (Map<?, ?>) actual); break; default: throw new IllegalArgumentException("Not a supported type: " + type); } } }
2,189
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/TestLocalScan.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.netflix.iceberg.AppendFiles; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.TableProperties; import com.netflix.iceberg.Tables; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.data.avro.DataWriter; import com.netflix.iceberg.data.parquet.GenericParquetWriter; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.hadoop.HadoopInputFile; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.types.Types; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Set; import static com.google.common.collect.Iterables.concat; import static com.google.common.collect.Iterables.filter; import static com.google.common.collect.Iterables.transform; import static com.netflix.iceberg.DataFiles.fromInputFile; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual; import static com.netflix.iceberg.hadoop.HadoopOutputFile.fromPath; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; @RunWith(Parameterized.class) public class TestLocalScan { private static final Schema SCHEMA = new Schema( required(1, "id", Types.LongType.get()), optional(2, "data", Types.StringType.get())); private static final Configuration CONF = new Configuration(); private static final Tables TABLES = new HadoopTables(CONF); @Rule public final TemporaryFolder temp = new TemporaryFolder(); @Parameterized.Parameters public static Object[][] parameters() { return new Object[][] { new Object[] { "parquet" }, new Object[] { "avro" } }; } private final FileFormat format; public TestLocalScan(String format) { this.format = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); } private String sharedTableLocation = null; private Table sharedTable = null; private List<Record> file1Records = null; private List<Record> file2Records = null; private List<Record> file3Records = null; @Before public void createTables() throws IOException { File location = temp.newFolder("shared"); Assert.assertTrue(location.delete()); this.sharedTableLocation = location.toString(); this.sharedTable = TABLES.create( SCHEMA, PartitionSpec.unpartitioned(), ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), sharedTableLocation); Record record = GenericRecord.create(SCHEMA); this.file1Records = Lists.newArrayList( record.copy(ImmutableMap.of("id", 0L, "data", "clarification")), record.copy(ImmutableMap.of("id", 1L, "data", "risky")), record.copy(ImmutableMap.of("id", 2L, "data", "falafel")) ); InputFile file1 = writeFile(sharedTableLocation, format.addExtension("file-1"), file1Records); Record nullData = record.copy(); nullData.setField("id", 11L); nullData.setField("data", null); this.file2Records = Lists.newArrayList( record.copy(ImmutableMap.of("id", 10L, "data", "clammy")), record.copy(ImmutableMap.of("id", 11L, "data", "evacuate")), record.copy(ImmutableMap.of("id", 12L, "data", "tissue")) ); InputFile file2 = writeFile(sharedTableLocation, format.addExtension("file-2"), file2Records); this.file3Records = Lists.newArrayList( record.copy(ImmutableMap.of("id", 20L, "data", "ocean")), record.copy(ImmutableMap.of("id", 21L, "data", "holistic")), record.copy(ImmutableMap.of("id", 22L, "data", "preventative")) ); InputFile file3 = writeFile(sharedTableLocation, format.addExtension("file-3"), file3Records); // commit the test data sharedTable.newAppend() .appendFile(DataFiles.builder(PartitionSpec.unpartitioned()) .withInputFile(file1) .withMetrics(new Metrics(3L, null, // no column sizes ImmutableMap.of(1, 3L), // value count ImmutableMap.of(1, 0L), // null count ImmutableMap.of(1, longToBuffer(0L)), // lower bounds ImmutableMap.of(1, longToBuffer(2L)))) // upper bounds) .build()) .appendFile(DataFiles.builder(PartitionSpec.unpartitioned()) .withInputFile(file2) .withMetrics(new Metrics(3L, null, // no column sizes ImmutableMap.of(1, 3L), // value count ImmutableMap.of(1, 0L), // null count ImmutableMap.of(1, longToBuffer(10L)), // lower bounds ImmutableMap.of(1, longToBuffer(12L)))) // upper bounds) .build()) .appendFile(DataFiles.builder(PartitionSpec.unpartitioned()) .withInputFile(file3) .withMetrics(new Metrics(3L, null, // no column sizes ImmutableMap.of(1, 3L), // value count ImmutableMap.of(1, 0L), // null count ImmutableMap.of(1, longToBuffer(20L)), // lower bounds ImmutableMap.of(1, longToBuffer(22L)))) // upper bounds) .build()) .commit(); } @Test public void testRandomData() throws IOException { List<Record> expected = RandomGenericData.generate(SCHEMA, 1000, 435691832918L); File location = temp.newFolder(format.name()); Assert.assertTrue(location.delete()); Table table = TABLES.create(SCHEMA, PartitionSpec.unpartitioned(), ImmutableMap.of(TableProperties.DEFAULT_FILE_FORMAT, format.name()), location.toString()); AppendFiles append = table.newAppend(); int fileNum = 0; int recordsPerFile = 200; Iterator<Record> iter = expected.iterator(); while (iter.hasNext()) { Path path = new Path(location.toString(), format.addExtension("file-" + fileNum)); int numRecords; List<Record> records = Lists.newArrayList(); for (numRecords = 0; numRecords < recordsPerFile && iter.hasNext(); numRecords += 1) { records.add(iter.next()); } writeFile(location.toString(), format.addExtension("file-" + fileNum), records); append.appendFile(fromInputFile(HadoopInputFile.fromPath(path, CONF), numRecords)); fileNum += 1; } append.commit(); Set<Record> records = Sets.newHashSet(IcebergGenerics.read(table).build()); Assert.assertEquals("Should produce correct number of records", expected.size(), records.size()); Assert.assertEquals("Random record set should match", Sets.newHashSet(expected), records); } @Test public void testFullScan() { Iterable<Record> results = IcebergGenerics.read(sharedTable).build(); Set<Record> expected = Sets.newHashSet(); expected.addAll(file1Records); expected.addAll(file2Records); expected.addAll(file3Records); Set<Record> records = Sets.newHashSet(results); Assert.assertEquals("Should produce correct number of records", expected.size(), records.size()); Assert.assertEquals("Random record set should match", Sets.newHashSet(expected), records); } @Test public void testFilter() { Iterable<Record> result = IcebergGenerics.read(sharedTable).where(lessThan("id", 3)).build(); Assert.assertEquals("Records should match file 1", Sets.newHashSet(file1Records), Sets.newHashSet(result)); result = IcebergGenerics.read(sharedTable).where(lessThanOrEqual("id", 1)).build(); Assert.assertEquals("Records should match file 1 without id 2", Sets.newHashSet(filter(file1Records, r -> (Long) r.getField("id") <= 1)), Sets.newHashSet(result)); } @Test public void testProject() { Iterable<Record> results = IcebergGenerics.read(sharedTable).select("id").build(); Set<Long> expected = Sets.newHashSet(); expected.addAll(Lists.transform(file1Records, record -> (Long) record.getField("id"))); expected.addAll(Lists.transform(file2Records, record -> (Long) record.getField("id"))); expected.addAll(Lists.transform(file3Records, record -> (Long) record.getField("id"))); results.forEach(record -> Assert.assertEquals("Record should have one projected field", 1, record.size())); Assert.assertEquals("Should project only id columns", expected, Sets.newHashSet(transform(results, record -> (Long) record.getField("id")))); } @Test public void testProjectWithMissingFilterColumn() { Iterable<Record> results = IcebergGenerics.read(sharedTable) .where(Expressions.greaterThanOrEqual("id", 1)) .where(Expressions.lessThan("id", 21)) .select("data").build(); Set<String> expected = Sets.newHashSet(); for (Record record : concat(file1Records, file2Records, file3Records)) { Long id = (Long) record.getField("id"); if (id >= 1 && id < 21) { expected.add(record.getField("data").toString()); } } results.forEach(record -> Assert.assertEquals("Record should have two projected fields", 2, record.size())); Assert.assertEquals("Should project correct rows", expected, Sets.newHashSet(transform(results, record -> record.getField("data").toString()))); } private InputFile writeFile(String location, String filename, List<Record> records) throws IOException { Path path = new Path(location, filename); FileFormat format = FileFormat.fromFileName(filename); Preconditions.checkNotNull(format, "Cannot determine format for file: %s", filename); switch (format) { case AVRO: try (FileAppender<Record> appender = Avro.write(fromPath(path, CONF)) .schema(SCHEMA) .createWriterFunc(DataWriter::create) .named(format.name()) .build()) { appender.addAll(records); } return HadoopInputFile.fromPath(path, CONF); case PARQUET: try (FileAppender<Record> appender = Parquet.write(fromPath(path, CONF)) .schema(SCHEMA) .createWriterFunc(GenericParquetWriter::buildWriter) .build()) { appender.addAll(records); } return HadoopInputFile.fromPath(path, CONF); default: throw new UnsupportedOperationException("Cannot write format: " + format); } } private static ByteBuffer longToBuffer(long value) { return ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN).putLong(0, value); } }
2,190
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/TestReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Comparators; import com.netflix.iceberg.types.Types; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.IOException; import java.util.List; import java.util.Map; public abstract class TestReadProjection { protected abstract Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException; @Rule public TemporaryFolder temp = new TemporaryFolder(); @Test public void testFullProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = GenericRecord.create(schema.asStruct()); record.setField("id", 34L); record.setField("data", "test"); Record projected = writeAndRead("full_projection", schema, schema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.getField("data")); Assert.assertTrue("Should contain the correct data value", cmp == 0); } @Test public void testReorderedFullProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = GenericRecord.create(schema.asStruct()); record.setField("id", 34L); record.setField("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("full_projection", schema, reordered, record); Assert.assertEquals("Should contain the correct 0 value", "test", projected.get(0).toString()); Assert.assertEquals("Should contain the correct 1 value", 34L, projected.get(1)); } @Test public void testReorderedProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = GenericRecord.create(schema.asStruct()); record.setField("id", 34L); record.setField("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(2, "missing_1", Types.StringType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.optional(3, "missing_2", Types.LongType.get()) ); Record projected = writeAndRead("full_projection", schema, reordered, record); Assert.assertNull("Should contain the correct 0 value", projected.get(0)); Assert.assertEquals("Should contain the correct 1 value", "test", projected.get(1).toString()); Assert.assertNull("Should contain the correct 2 value", projected.get(2)); } @Test public void testEmptyProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = GenericRecord.create(schema.asStruct()); record.setField("id", 34L); record.setField("data", "test"); Record projected = writeAndRead("empty_projection", schema, schema.select(), record); Assert.assertNotNull("Should read a non-null record", projected); try { projected.get(0); Assert.fail("Should not retrieve value with ordinal 0"); } catch (ArrayIndexOutOfBoundsException e) { // this is expected because there are no values } } @Test public void testBasicProjection() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); record.setField("data", "test"); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("basic_projection_id", writeSchema, idOnly, record); Assert.assertNull("Should not project data", projected.getField("data")); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); Schema dataOnly = new Schema( Types.NestedField.optional(1, "data", Types.StringType.get()) ); projected = writeAndRead("basic_projection_data", writeSchema, dataOnly, record); Assert.assertNull("Should not project id", projected.getField("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.getField("data")); Assert.assertTrue("Should contain the correct data value", cmp == 0); } @Test public void testRename() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); record.setField("data", "test"); Schema readSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "renamed", Types.StringType.get()) ); Record projected = writeAndRead("project_and_rename", writeSchema, readSchema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.getField("renamed")); Assert.assertTrue("Should contain the correct data/renamed value", cmp == 0); } @Test public void testNestedStructProjection() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()), Types.NestedField.required(2, "long", Types.FloatType.get()) )) ); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); Record location = GenericRecord.create(writeSchema.findType("location").asStructType()); location.setField("lat", 52.995143f); location.setField("long", -1.539054f); record.setField("location", location); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Record projectedLocation = (Record) projected.getField("location"); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); Assert.assertNull("Should not project location", projectedLocation); Schema latOnly = new Schema( Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()) )) ); projected = writeAndRead("latitude_only", writeSchema, latOnly, record); projectedLocation = (Record) projected.getField("location"); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertNotNull("Should project location", projected.getField("location")); Assert.assertNull("Should not project longitude", projectedLocation.getField("long")); Assert.assertEquals("Should project latitude", 52.995143f, (float) projectedLocation.getField("lat"), 0.000001f); Schema longOnly = new Schema( Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(2, "long", Types.FloatType.get()) )) ); projected = writeAndRead("longitude_only", writeSchema, longOnly, record); projectedLocation = (Record) projected.getField("location"); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertNotNull("Should project location", projected.getField("location")); Assert.assertNull("Should not project latitutde", projectedLocation.getField("lat")); Assert.assertEquals("Should project longitude", -1.539054f, (float) projectedLocation.getField("long"), 0.000001f); Schema locationOnly = writeSchema.select("location"); projected = writeAndRead("location_only", writeSchema, locationOnly, record); projectedLocation = (Record) projected.getField("location"); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertNotNull("Should project location", projected.getField("location")); Assert.assertEquals("Should project latitude", 52.995143f, (float) projectedLocation.getField("lat"), 0.000001f); Assert.assertEquals("Should project longitude", -1.539054f, (float) projectedLocation.getField("long"), 0.000001f); } @Test public void testMapProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(5, "properties", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StringType.get())) ); Map<String, String> properties = ImmutableMap.of("a", "A", "b", "B"); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); record.setField("properties", properties); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); Assert.assertNull("Should not project properties map", projected.getField("properties")); Schema keyOnly = writeSchema.select("properties.key"); projected = writeAndRead("key_only", writeSchema, keyOnly, record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.getField("properties"))); Schema valueOnly = writeSchema.select("properties.value"); projected = writeAndRead("value_only", writeSchema, valueOnly, record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.getField("properties"))); Schema mapOnly = writeSchema.select("properties"); projected = writeAndRead("map_only", writeSchema, mapOnly, record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.getField("properties"))); } private Map<String, ?> toStringMap(Map<?, ?> map) { Map<String, Object> stringMap = Maps.newHashMap(); for (Map.Entry<?, ?> entry : map.entrySet()) { if (entry.getValue() instanceof CharSequence) { stringMap.put(entry.getKey().toString(), entry.getValue().toString()); } else { stringMap.put(entry.getKey().toString(), entry.getValue()); } } return stringMap; } @Test public void testMapOfStructsProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()), Types.NestedField.required(2, "long", Types.FloatType.get()) ) )) ); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); Record l1 = GenericRecord.create(writeSchema.findType("locations").asMapType().valueType().asStructType()); l1.setField("lat", 53.992811f); l1.setField("long", -1.542616f); Record l2 = GenericRecord.create(writeSchema.findType("locations").asMapType().valueType().asStructType()); l2.setField("lat", 52.995143f); l2.setField("long", -1.539054f); record.setField("locations", ImmutableMap.of("L1", l1, "L2", l2)); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); Assert.assertNull("Should not project locations map", projected.getField("locations")); projected = writeAndRead("all_locations", writeSchema, writeSchema.select("locations"), record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project locations map", record.getField("locations"), toStringMap((Map) projected.getField("locations"))); projected = writeAndRead("lat_only", writeSchema, writeSchema.select("locations.lat"), record); Assert.assertNull("Should not project id", projected.getField("id")); Map<String, ?> locations = toStringMap((Map) projected.getField("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); Record projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertEquals("L1 should contain lat", 53.992811f, (float) projectedL1.getField("lat"), 0.000001); Assert.assertNull("L1 should not contain long", projectedL1.getField("long")); Record projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertEquals("L2 should contain lat", 52.995143f, (float) projectedL2.getField("lat"), 0.000001); Assert.assertNull("L2 should not contain long", projectedL2.getField("long")); projected = writeAndRead("long_only", writeSchema, writeSchema.select("locations.long"), record); Assert.assertNull("Should not project id", projected.getField("id")); locations = toStringMap((Map) projected.getField("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertNull("L1 should not contain lat", projectedL1.getField("lat")); Assert.assertEquals("L1 should contain long", -1.542616f, (float) projectedL1.getField("long"), 0.000001); projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertNull("L2 should not contain lat", projectedL2.getField("lat")); Assert.assertEquals("L2 should contain long", -1.539054f, (float) projectedL2.getField("long"), 0.000001); Schema latitiudeRenamed = new Schema( Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StructType.of( Types.NestedField.required(1, "latitude", Types.FloatType.get()) ) )) ); projected = writeAndRead("latitude_renamed", writeSchema, latitiudeRenamed, record); Assert.assertNull("Should not project id", projected.getField("id")); locations = toStringMap((Map) projected.getField("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertEquals("L1 should contain latitude", 53.992811f, (float) projectedL1.getField("latitude"), 0.000001); Assert.assertNull("L1 should not contain lat", projectedL1.getField("lat")); Assert.assertNull("L1 should not contain long", projectedL1.getField("long")); projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertEquals("L2 should contain latitude", 52.995143f, (float) projectedL2.getField("latitude"), 0.000001); Assert.assertNull("L2 should not contain lat", projectedL2.getField("lat")); Assert.assertNull("L2 should not contain long", projectedL2.getField("long")); } @Test public void testListProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(10, "values", Types.ListType.ofOptional(11, Types.LongType.get())) ); List<Long> values = ImmutableList.of(56L, 57L, 58L); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); record.setField("values", values); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); Assert.assertNull("Should not project values list", projected.getField("values")); Schema elementOnly = writeSchema.select("values.element"); projected = writeAndRead("element_only", writeSchema, elementOnly, record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project entire list", values, projected.getField("values")); Schema listOnly = writeSchema.select("values"); projected = writeAndRead("list_only", writeSchema, listOnly, record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project entire list", values, projected.getField("values")); } @Test @SuppressWarnings("unchecked") public void testListOfStructsProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.required(19, "x", Types.IntegerType.get()), Types.NestedField.optional(18, "y", Types.IntegerType.get()) )) ) ); Record record = GenericRecord.create(writeSchema.asStruct()); record.setField("id", 34L); Record p1 = GenericRecord.create(writeSchema.findType("points").asListType().elementType().asStructType()); p1.setField("x", 1); p1.setField("y", 2); Record p2 = GenericRecord.create(writeSchema.findType("points").asListType().elementType().asStructType()); p2.setField("x", 3); p2.setField("y", null); record.setField("points", ImmutableList.of(p1, p2)); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.getField("id")); Assert.assertNull("Should not project points list", projected.getField("points")); projected = writeAndRead("all_points", writeSchema, writeSchema.select("points"), record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertEquals("Should project points list", record.getField("points"), projected.getField("points")); projected = writeAndRead("x_only", writeSchema, writeSchema.select("points.x"), record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertNotNull("Should project points list", projected.getField("points")); List<Record> points = (List<Record>) projected.getField("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); Record projectedP1 = points.get(0); Assert.assertEquals("Should project x", 1, (int) projectedP1.getField("x")); Assert.assertNull("Should not project y", projectedP1.getField("y")); Record projectedP2 = points.get(1); Assert.assertEquals("Should project x", 3, (int) projectedP2.getField("x")); Assert.assertNull("Should not project y", projectedP2.getField("y")); projected = writeAndRead("y_only", writeSchema, writeSchema.select("points.y"), record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertNotNull("Should project points list", projected.getField("points")); points = (List<Record>) projected.getField("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertNull("Should not project x", projectedP1.getField("x")); Assert.assertEquals("Should project y", 2, (int) projectedP1.getField("y")); projectedP2 = points.get(1); Assert.assertNull("Should not project x", projectedP2.getField("x")); Assert.assertEquals("Should project null y", null, projectedP2.getField("y")); Schema yRenamed = new Schema( Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.optional(18, "z", Types.IntegerType.get()) )) ) ); projected = writeAndRead("y_renamed", writeSchema, yRenamed, record); Assert.assertNull("Should not project id", projected.getField("id")); Assert.assertNotNull("Should project points list", projected.getField("points")); points = (List<Record>) projected.getField("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertNull("Should not project x", projectedP1.getField("x")); Assert.assertNull("Should not project y", projectedP1.getField("y")); Assert.assertEquals("Should project z", 2, (int) projectedP1.getField("z")); projectedP2 = points.get(1); Assert.assertNull("Should not project x", projectedP2.getField("x")); Assert.assertNull("Should not project y", projectedP2.getField("y")); Assert.assertEquals("Should project null z", null, projectedP2.getField("z")); } }
2,191
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/RandomGenericData.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.google.common.base.Charsets; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.time.Instant; import java.time.LocalDate; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.util.List; import java.util.Map; import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.function.Supplier; import static java.time.temporal.ChronoUnit.MICROS; public class RandomGenericData { public static List<Record> generate(Schema schema, int numRecords, long seed) { RandomDataGenerator generator = new RandomDataGenerator(seed); List<Record> records = Lists.newArrayListWithExpectedSize(numRecords); for (int i = 0; i < numRecords; i += 1) { records.add((Record) TypeUtil.visit(schema, generator)); } return records; } private static class RandomDataGenerator extends TypeUtil.CustomOrderSchemaVisitor<Object> { private final Random random; private RandomDataGenerator(long seed) { this.random = new Random(seed); } @Override public Record schema(Schema schema, Supplier<Object> structResult) { return (Record) structResult.get(); } @Override public Record struct(Types.StructType struct, Iterable<Object> fieldResults) { Record rec = GenericRecord.create(struct); List<Object> values = Lists.newArrayList(fieldResults); for (int i = 0; i < values.size(); i += 1) { rec.set(i, values.get(i)); } return rec; } @Override public Object field(Types.NestedField field, Supplier<Object> fieldResult) { // return null 5% of the time when the value is optional if (field.isOptional() && random.nextInt(20) == 1) { return null; } return fieldResult.get(); } @Override public Object list(Types.ListType list, Supplier<Object> elementResult) { int numElements = random.nextInt(20); List<Object> result = Lists.newArrayListWithExpectedSize(numElements); for (int i = 0; i < numElements; i += 1) { // return null 5% of the time when the value is optional if (list.isElementOptional() && random.nextInt(20) == 1) { result.add(null); } else { result.add(elementResult.get()); } } return result; } @Override public Object map(Types.MapType map, Supplier<Object> keyResult, Supplier<Object> valueResult) { int numEntries = random.nextInt(20); Map<Object, Object> result = Maps.newLinkedHashMap(); Supplier<Object> keyFunc; if (map.keyType() == Types.StringType.get()) { keyFunc = () -> keyResult.get().toString(); } else { keyFunc = keyResult; } Set<Object> keySet = Sets.newHashSet(); for (int i = 0; i < numEntries; i += 1) { Object key = keyFunc.get(); // ensure no collisions while (keySet.contains(key)) { key = keyFunc.get(); } keySet.add(key); // return null 5% of the time when the value is optional if (map.isValueOptional() && random.nextInt(20) == 1) { result.put(key, null); } else { result.put(key, valueResult.get()); } } return result; } @Override public Object primitive(Type.PrimitiveType primitive) { Object result = generatePrimitive(primitive, random); switch (primitive.typeId()) { case BINARY: return ByteBuffer.wrap((byte[]) result); case UUID: return UUID.nameUUIDFromBytes((byte[]) result); default: return result; } } } private static Object generatePrimitive(Type.PrimitiveType primitive, Random random) { int choice = random.nextInt(20); switch (primitive.typeId()) { case BOOLEAN: return choice < 10; case INTEGER: switch (choice) { case 1: return Integer.MIN_VALUE; case 2: return Integer.MAX_VALUE; case 3: return 0; default: return random.nextInt(); } case LONG: switch (choice) { case 1: return Long.MIN_VALUE; case 2: return Long.MAX_VALUE; case 3: return 0L; default: return random.nextLong(); } case FLOAT: switch (choice) { case 1: return Float.MIN_VALUE; case 2: return -Float.MIN_VALUE; case 3: return Float.MAX_VALUE; case 4: return -Float.MAX_VALUE; case 5: return Float.NEGATIVE_INFINITY; case 6: return Float.POSITIVE_INFINITY; case 7: return 0.0F; case 8: return Float.NaN; default: return random.nextFloat(); } case DOUBLE: switch (choice) { case 1: return Double.MIN_VALUE; case 2: return -Double.MIN_VALUE; case 3: return Double.MAX_VALUE; case 4: return -Double.MAX_VALUE; case 5: return Double.NEGATIVE_INFINITY; case 6: return Double.POSITIVE_INFINITY; case 7: return 0.0D; case 8: return Double.NaN; default: return random.nextDouble(); } case DATE: // this will include negative values (dates before 1970-01-01) return EPOCH_DAY.plusDays(random.nextInt() % ABOUT_380_YEARS_IN_DAYS); case TIME: return LocalTime.ofNanoOfDay( ((random.nextLong() & Integer.MAX_VALUE) % ONE_DAY_IN_MICROS) * 1000); case TIMESTAMP: Types.TimestampType ts = (Types.TimestampType) primitive; if (ts.shouldAdjustToUTC()) { return EPOCH.plus(random.nextLong() % FIFTY_YEARS_IN_MICROS, MICROS); } else { return EPOCH.plus(random.nextLong() % FIFTY_YEARS_IN_MICROS, MICROS).toLocalDateTime(); } case STRING: return randomString(random); case UUID: byte[] uuidBytes = new byte[16]; random.nextBytes(uuidBytes); // this will hash the uuidBytes return uuidBytes; case FIXED: byte[] fixed = new byte[((Types.FixedType) primitive).length()]; random.nextBytes(fixed); return fixed; case BINARY: byte[] binary = new byte[random.nextInt(50)]; random.nextBytes(binary); return binary; case DECIMAL: Types.DecimalType type = (Types.DecimalType) primitive; BigInteger unscaled = randomUnscaled(type.precision(), random); return new BigDecimal(unscaled, type.scale()); default: throw new IllegalArgumentException( "Cannot generate random value for unknown type: " + primitive); } } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); private static final long FIFTY_YEARS_IN_MICROS = (50L * (365 * 3 + 366) * 24 * 60 * 60 * 1_000_000) / 4; private static final int ABOUT_380_YEARS_IN_DAYS = 380 * 365; private static final long ONE_DAY_IN_MICROS = 24 * 60 * 60 * 1_000_000L; private static final String CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.!?"; private static String randomString(Random random) { int length = random.nextInt(50); byte[] buffer = new byte[length]; for (int i = 0; i < length; i += 1) { buffer[i] = (byte) CHARS.charAt(random.nextInt(CHARS.length())); } return new String(buffer, Charsets.UTF_8); } private static final String DIGITS = "0123456789"; private static BigInteger randomUnscaled(int precision, Random random) { int length = random.nextInt(precision); if (length == 0) { return BigInteger.ZERO; } StringBuilder sb = new StringBuilder(); for (int i = 0; i < length; i += 1) { sb.append(DIGITS.charAt(random.nextInt(DIGITS.length()))); } return new BigInteger(sb.toString()); } }
2,192
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/DataTest.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.ListType; import com.netflix.iceberg.types.Types.LongType; import com.netflix.iceberg.types.Types.MapType; import com.netflix.iceberg.types.Types.StructType; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.IOException; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public abstract class DataTest { protected abstract void writeAndValidate(Schema schema) throws IOException; private static final StructType SUPPORTED_PRIMITIVES = StructType.of( required(100, "id", LongType.get()), optional(101, "data", Types.StringType.get()), required(102, "b", Types.BooleanType.get()), optional(103, "i", Types.IntegerType.get()), required(104, "l", LongType.get()), optional(105, "f", Types.FloatType.get()), required(106, "d", Types.DoubleType.get()), optional(107, "date", Types.DateType.get()), required(108, "ts", Types.TimestampType.withZone()), required(110, "s", Types.StringType.get()), required(112, "fixed", Types.FixedType.ofLength(7)), optional(113, "bytes", Types.BinaryType.get()), required(114, "dec_9_0", Types.DecimalType.of(9, 0)), required(115, "dec_11_2", Types.DecimalType.of(11, 2)), required(116, "dec_38_10", Types.DecimalType.of(38, 10)) // maximum precision ); @Rule public TemporaryFolder temp = new TemporaryFolder(); @Test public void testSimpleStruct() throws IOException { writeAndValidate(new Schema(SUPPORTED_PRIMITIVES.fields())); } @Test public void testArray() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", ListType.ofOptional(2, Types.StringType.get()))); writeAndValidate(schema); } @Test public void testArrayOfStructs() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", ListType.ofOptional(2, SUPPORTED_PRIMITIVES))); writeAndValidate(schema); } @Test public void testMap() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, Types.StringType.get(), Types.StringType.get()))); writeAndValidate(schema); } @Test public void testNumericMapKey() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, LongType.get(), Types.StringType.get()))); writeAndValidate(schema); } @Test public void testComplexMapKey() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, StructType.of( required(4, "i", Types.IntegerType.get()), optional(5, "s", Types.StringType.get())), Types.StringType.get()))); writeAndValidate(schema); } @Test public void testMapOfStructs() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, Types.StringType.get(), SUPPORTED_PRIMITIVES))); writeAndValidate(schema); } @Test public void testMixedTypes() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "list_of_maps", ListType.ofOptional(2, MapType.ofOptional(3, 4, Types.StringType.get(), SUPPORTED_PRIMITIVES))), optional(5, "map_of_lists", MapType.ofOptional(6, 7, Types.StringType.get(), ListType.ofOptional(8, SUPPORTED_PRIMITIVES))), required(9, "list_of_lists", ListType.ofOptional(10, ListType.ofOptional(11, SUPPORTED_PRIMITIVES))), required(12, "map_of_maps", MapType.ofOptional(13, 14, Types.StringType.get(), MapType.ofOptional(15, 16, Types.StringType.get(), SUPPORTED_PRIMITIVES))), required(17, "list_of_struct_of_nested_types", ListType.ofOptional(19, StructType.of( Types.NestedField.required(20, "m1", MapType.ofOptional(21, 22, Types.StringType.get(), SUPPORTED_PRIMITIVES)), Types.NestedField.optional(23, "l1", ListType.ofRequired(24, SUPPORTED_PRIMITIVES)), Types.NestedField.required(25, "l2", ListType.ofRequired(26, SUPPORTED_PRIMITIVES)), Types.NestedField.optional(27, "m2", MapType.ofOptional(28, 29, Types.StringType.get(), SUPPORTED_PRIMITIVES)) ))) ); writeAndValidate(schema); } }
2,193
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/avro/TestGenericReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.collect.Iterables; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.data.TestReadProjection; import com.netflix.iceberg.io.FileAppender; import java.io.File; import java.io.IOException; public class TestGenericReadProjection extends TestReadProjection { protected Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException { File file = temp.newFile(desc + ".avro"); file.delete(); try (FileAppender<Record> appender = Avro.write(Files.localOutput(file)) .schema(writeSchema) .createWriterFunc(DataWriter::create) .build()) { appender.add(record); } Iterable<Record> records = Avro.read(Files.localInput(file)) .project(readSchema) .createReaderFunc(DataReader::create) .build(); return Iterables.getOnlyElement(records); } }
2,194
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/avro/TestSingleMessageEncoding.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.collect.Ordering; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.data.GenericRecord; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.types.Types; import org.apache.avro.AvroRuntimeException; import org.apache.avro.message.BadHeaderException; import org.apache.avro.message.MessageDecoder; import org.apache.avro.message.MessageEncoder; import org.apache.avro.message.MissingSchemaException; import org.apache.avro.message.SchemaStore; import org.junit.Assert; import org.junit.Test; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; import java.util.Set; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestSingleMessageEncoding { private static final Schema SCHEMA_V1 = new Schema( required(0, "id", Types.IntegerType.get()), optional(1, "msg", Types.StringType.get()) ); private static Record v1Record(int id, String msg) { Record rec = GenericRecord.create(SCHEMA_V1.asStruct()); rec.setField("id", id); rec.setField("msg", msg); return rec; } private static final List<Record> V1_RECORDS = Arrays.asList( v1Record(1, "m-1"), v1Record(2, "m-2"), v1Record(4, "m-4"), v1Record(6, "m-6") ); private static final Schema SCHEMA_V2 = new Schema( required(0, "id", Types.LongType.get()), optional(1, "message", Types.StringType.get()), optional(2, "data", Types.DoubleType.get()) ); private static Record v2Record(long id, String message, Double data) { Record rec = GenericRecord.create(SCHEMA_V2.asStruct()); rec.setField("id", id); rec.setField("message", message); rec.setField("data", data); return rec; } private static final List<Record> V2_RECORDS = Arrays.asList( v2Record(3L, "m-3", 12.3), v2Record(5L, "m-5", 23.4), v2Record(7L, "m-7", 34.5), v2Record(8L, "m-8", 35.6) ); @Test public void testByteBufferRoundTrip() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V2); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V2); Record copy = decoder.decode(encoder.encode(V2_RECORDS.get(0))); Assert.assertTrue("Copy should not be the same object", copy != V2_RECORDS.get(0)); Assert.assertEquals("Record should be identical after round-trip", V2_RECORDS.get(0), copy); } @Test public void testSchemaEvolution() throws Exception { List<ByteBuffer> buffers = Lists.newArrayList(); List<Record> records = Ordering.usingToString().sortedCopy( Iterables.concat(V1_RECORDS, V2_RECORDS)); MessageEncoder<Record> v1Encoder = new IcebergEncoder<>(SCHEMA_V1); MessageEncoder<Record> v2Encoder = new IcebergEncoder<>(SCHEMA_V2); for (Record record : records) { if (record.struct() == SCHEMA_V1.asStruct()) { buffers.add(v1Encoder.encode(record)); } else { buffers.add(v2Encoder.encode(record)); } } Set<Record> allAsV2 = Sets.newHashSet(V2_RECORDS); allAsV2.add(v2Record(1L, "m-1", null)); allAsV2.add(v2Record(2L, "m-2", null)); allAsV2.add(v2Record(4L, "m-4", null)); allAsV2.add(v2Record(6L, "m-6", null)); IcebergDecoder<Record> v2Decoder = new IcebergDecoder<>(SCHEMA_V2); v2Decoder.addSchema(SCHEMA_V1); Set<Record> decodedUsingV2 = Sets.newHashSet(); for (ByteBuffer buffer : buffers) { decodedUsingV2.add(v2Decoder.decode(buffer)); } Assert.assertEquals(allAsV2, decodedUsingV2); } @Test(expected = MissingSchemaException.class) public void testCompatibleReadFailsWithoutSchema() throws Exception { MessageEncoder<Record> v1Encoder = new IcebergEncoder<>(SCHEMA_V1); MessageDecoder<Record> v2Decoder = new IcebergDecoder<>(SCHEMA_V2); ByteBuffer v1Buffer = v1Encoder.encode(V1_RECORDS.get(3)); v2Decoder.decode(v1Buffer); } @Test public void testCompatibleReadWithSchema() throws Exception { MessageEncoder<Record> v1Encoder = new IcebergEncoder<>(SCHEMA_V1); IcebergDecoder<Record> v2Decoder = new IcebergDecoder<>(SCHEMA_V2); v2Decoder.addSchema(SCHEMA_V1); ByteBuffer v1Buffer = v1Encoder.encode(V1_RECORDS.get(3)); Record record = v2Decoder.decode(v1Buffer); Assert.assertEquals(v2Record(6L, "m-6", null), record); } @Test public void testCompatibleReadWithSchemaFromLookup() throws Exception { MessageEncoder<Record> v1Encoder = new IcebergEncoder<>(SCHEMA_V1); SchemaStore.Cache schemaCache = new SchemaStore.Cache(); schemaCache.addSchema(AvroSchemaUtil.convert(SCHEMA_V1, "table")); IcebergDecoder<Record> v2Decoder = new IcebergDecoder<>(SCHEMA_V2, schemaCache); ByteBuffer v1Buffer = v1Encoder.encode(V1_RECORDS.get(2)); Record record = v2Decoder.decode(v1Buffer); Assert.assertEquals(v2Record(4L, "m-4", null), record); } @Test public void testBufferReuse() throws Exception { // This test depends on the serialized version of record 1 being smaller or // the same size as record 0 so that the reused ByteArrayOutputStream won't // expand its internal buffer. MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V1, false); ByteBuffer b0 = encoder.encode(V1_RECORDS.get(0)); ByteBuffer b1 = encoder.encode(V1_RECORDS.get(1)); Assert.assertEquals(b0.array(), b1.array()); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V1); Assert.assertEquals("Buffer was reused, decode(b0) should be record 1", V1_RECORDS.get(1), decoder.decode(b0)); } @Test public void testBufferCopy() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V1); ByteBuffer b0 = encoder.encode(V1_RECORDS.get(0)); ByteBuffer b1 = encoder.encode(V1_RECORDS.get(1)); Assert.assertNotEquals(b0.array(), b1.array()); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V1); // bytes are not changed by reusing the encoder Assert.assertEquals("Buffer was copied, decode(b0) should be record 0", V1_RECORDS.get(0), decoder.decode(b0)); } @Test(expected = AvroRuntimeException.class) public void testByteBufferMissingPayload() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V2); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V2); ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0)); buffer.limit(12); decoder.decode(buffer); } @Test(expected = BadHeaderException.class) public void testByteBufferMissingFullHeader() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V2); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V2); ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0)); buffer.limit(8); decoder.decode(buffer); } @Test(expected = BadHeaderException.class) public void testByteBufferBadMarkerByte() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V2); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V2); ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0)); buffer.array()[0] = 0x00; decoder.decode(buffer); } @Test(expected = BadHeaderException.class) public void testByteBufferBadVersionByte() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V2); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V2); ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0)); buffer.array()[1] = 0x00; decoder.decode(buffer); } @Test(expected = MissingSchemaException.class) public void testByteBufferUnknownSchema() throws Exception { MessageEncoder<Record> encoder = new IcebergEncoder<>(SCHEMA_V2); MessageDecoder<Record> decoder = new IcebergDecoder<>(SCHEMA_V2); ByteBuffer buffer = encoder.encode(V2_RECORDS.get(0)); buffer.array()[4] = 0x00; decoder.decode(buffer); } }
2,195
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/avro/TestGenericData.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.collect.Lists; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.avro.AvroIterable; import com.netflix.iceberg.data.DataTest; import com.netflix.iceberg.data.DataTestHelpers; import com.netflix.iceberg.data.RandomGenericData; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.io.FileAppender; import org.junit.Assert; import java.io.File; import java.io.IOException; import java.util.List; public class TestGenericData extends DataTest { protected void writeAndValidate(Schema schema) throws IOException { List<Record> expected = RandomGenericData.generate(schema, 100, 0L); File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> writer = Avro.write(Files.localOutput(testFile)) .schema(schema) .createWriterFunc(DataWriter::create) .named("test") .build()) { for (Record rec : expected) { writer.add(rec); } } List<Record> rows; try (AvroIterable<Record> reader = Avro.read(Files.localInput(testFile)) .project(schema) .createReaderFunc(DataReader::create) .build()) { rows = Lists.newArrayList(reader); } for (int i = 0; i < expected.size(); i += 1) { DataTestHelpers.assertEquals(schema.asStruct(), expected.get(i), rows.get(i)); } } }
2,196
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/parquet/TestGenericReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.parquet; import com.google.common.collect.Iterables; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.data.TestReadProjection; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import java.io.File; import java.io.IOException; public class TestGenericReadProjection extends TestReadProjection { protected Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException { File file = temp.newFile(desc + ".parquet"); file.delete(); try (FileAppender<Record> appender = Parquet.write(Files.localOutput(file)) .schema(writeSchema) .createWriterFunc(GenericParquetWriter::buildWriter) .build()) { appender.add(record); } Iterable<Record> records = Parquet.read(Files.localInput(file)) .project(readSchema) .createReaderFunc(fileSchema -> GenericParquetReaders.buildReader(readSchema, fileSchema)) .build(); return Iterables.getOnlyElement(records); } }
2,197
0
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/test/java/com/netflix/iceberg/data/parquet/TestGenericData.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.parquet; import com.google.common.collect.Lists; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.data.DataTest; import com.netflix.iceberg.data.DataTestHelpers; import com.netflix.iceberg.data.RandomGenericData; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import org.junit.Assert; import java.io.File; import java.io.IOException; import java.util.List; public class TestGenericData extends DataTest { protected void writeAndValidate(Schema schema) throws IOException { List<Record> expected = RandomGenericData.generate(schema, 100, 0L); File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> appender = Parquet.write(Files.localOutput(testFile)) .schema(schema) .createWriterFunc(GenericParquetWriter::buildWriter) .build()) { appender.addAll(expected); } List<Record> rows; try (CloseableIterable<Record> reader = Parquet.read(Files.localInput(testFile)) .project(schema) .createReaderFunc(fileSchema -> GenericParquetReaders.buildReader(schema, fileSchema)) .build()) { rows = Lists.newArrayList(reader); } for (int i = 0; i < expected.size(); i += 1) { DataTestHelpers.assertEquals(schema.asStruct(), expected.get(i), rows.get(i)); } } }
2,198
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/GenericRecord.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.google.common.base.Objects; import com.google.common.base.Preconditions; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import com.netflix.iceberg.StructLike; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import java.util.Arrays; import java.util.List; import java.util.Map; public class GenericRecord implements Record, StructLike { private static final LoadingCache<StructType, Map<String, Integer>> NAME_MAP_CACHE = CacheBuilder.newBuilder() .weakKeys() .build(new CacheLoader<StructType, Map<String, Integer>>() { @Override public Map<String, Integer> load(StructType struct) { Map<String, Integer> idToPos = Maps.newHashMap(); List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { idToPos.put(fields.get(i).name(), i); } return idToPos; } }); public static GenericRecord create(Schema schema) { return new GenericRecord(schema.asStruct()); } public static GenericRecord create(StructType struct) { return new GenericRecord(struct); } private final StructType struct; private final int size; private final Object[] values; private final Map<String, Integer> nameToPos; private GenericRecord(StructType struct) { this.struct = struct; this.size = struct.fields().size(); this.values = new Object[size]; this.nameToPos = NAME_MAP_CACHE.getUnchecked(struct); } private GenericRecord(GenericRecord toCopy) { this.struct = toCopy.struct; this.size = toCopy.size; this.values = Arrays.copyOf(toCopy.values, toCopy.values.length); this.nameToPos = toCopy.nameToPos; } private GenericRecord(GenericRecord toCopy, Map<String, Object> overwrite) { this.struct = toCopy.struct; this.size = toCopy.size; this.values = Arrays.copyOf(toCopy.values, toCopy.values.length); this.nameToPos = toCopy.nameToPos; for (Map.Entry<String, Object> entry : overwrite.entrySet()) { setField(entry.getKey(), entry.getValue()); } } @Override public StructType struct() { return struct; } @Override public Object getField(String name) { Integer pos = nameToPos.get(name); if (pos != null) { return values[pos]; } return null; } @Override public void setField(String name, Object value) { Integer pos = nameToPos.get(name); Preconditions.checkArgument(pos != null, "Cannot set unknown field named: " + name); values[pos] = value; } @Override public int size() { return size; } @Override public Object get(int pos) { return values[pos]; } @Override public <T> T get(int pos, Class<T> javaClass) { Object value = get(pos); if (javaClass.isInstance(value)) { return javaClass.cast(value); } else { throw new IllegalStateException("Not an instance of " + javaClass.getName() + ": " + value); } } @Override public <T> void set(int pos, T value) { values[pos] = value; } @Override public GenericRecord copy() { return new GenericRecord(this); } @Override public GenericRecord copy(Map<String, Object> overwriteValues) { return new GenericRecord(this, overwriteValues); } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("Record("); for (int i = 0; i < values.length; i += 1) { if (i != 0) { sb.append(", "); } sb.append(values[i]); } sb.append(")"); return sb.toString(); } @Override public boolean equals(Object other) { if (this == other) { return true; } if (other == null || getClass() != other.getClass()) { return false; } GenericRecord that = (GenericRecord) other; return Arrays.deepEquals(this.values, that.values); } @Override public int hashCode() { return Objects.hashCode(values); } }
2,199