index
int64
0
0
repo_id
stringlengths
26
205
file_path
stringlengths
51
246
content
stringlengths
8
433k
__index_level_0__
int64
0
10k
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestDates.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.types.Types; import org.junit.Assert; import org.junit.Test; public class TestDates { @Test public void testDateToHumanString() { Types.DateType type = Types.DateType.get(); Literal<Integer> date = Literal.of("2017-12-01").to(type); Transform<Integer, Integer> year = Transforms.year(type); Assert.assertEquals("Should produce the correct Human string", "2017", year.toHumanString(year.apply(date.value()))); Transform<Integer, Integer> month = Transforms.month(type); Assert.assertEquals("Should produce the correct Human string", "2017-12", month.toHumanString(month.apply(date.value()))); Transform<Integer, Integer> day = Transforms.day(type); Assert.assertEquals("Should produce the correct Human string", "2017-12-01", day.toHumanString(day.apply(date.value()))); } @Test public void testNullHumanString() { Types.DateType type = Types.DateType.get(); Assert.assertEquals("Should produce \"null\" for null", "null", Transforms.year(type).toHumanString(null)); Assert.assertEquals("Should produce \"null\" for null", "null", Transforms.month(type).toHumanString(null)); Assert.assertEquals("Should produce \"null\" for null", "null", Transforms.day(type).toHumanString(null)); } }
6,400
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestIdentity.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.types.Types; import org.junit.Assert; import org.junit.Test; import java.math.BigDecimal; import java.nio.ByteBuffer; public class TestIdentity { @Test public void testNullHumanString() { Types.LongType longType = Types.LongType.get(); Transform<Long, Long> identity = Transforms.identity(longType); Assert.assertEquals("Should produce \"null\" for null", "null", identity.toHumanString(null)); } @Test public void testBinaryHumanString() { Types.BinaryType binary = Types.BinaryType.get(); Transform<ByteBuffer, ByteBuffer> identity = Transforms.identity(binary); Assert.assertEquals("Should base64-encode binary", "AQID", identity.toHumanString(ByteBuffer.wrap(new byte[] {1, 2, 3}))); } @Test public void testFixedHumanString() { Types.FixedType fixed3 = Types.FixedType.ofLength(3); Transform<byte[], byte[]> identity = Transforms.identity(fixed3); Assert.assertEquals("Should base64-encode binary", "AQID", identity.toHumanString(new byte[] {1, 2, 3})); } @Test public void testDateHumanString() { Types.DateType date = Types.DateType.get(); Transform<Integer, Integer> identity = Transforms.identity(date); String dateString = "2017-12-01"; Literal<Integer> d = Literal.of(dateString).to(date); Assert.assertEquals("Should produce identical date", dateString, identity.toHumanString(d.value())); } @Test public void testTimeHumanString() { Types.TimeType time = Types.TimeType.get(); Transform<Long, Long> identity = Transforms.identity(time); String timeString = "10:12:55.038194"; Literal<Long> d = Literal.of(timeString).to(time); Assert.assertEquals("Should produce identical time", timeString, identity.toHumanString(d.value())); } @Test public void testTimestampWithZoneHumanString() { Types.TimestampType timestamptz = Types.TimestampType.withZone(); Transform<Long, Long> identity = Transforms.identity(timestamptz); Literal<Long> ts = Literal.of("2017-12-01T10:12:55.038194-08:00").to(timestamptz); // value will always be in UTC Assert.assertEquals("Should produce timestamp with time zone adjusted to UTC", "2017-12-01T18:12:55.038194Z", identity.toHumanString(ts.value())); } @Test public void testTimestampWithoutZoneHumanString() { Types.TimestampType timestamp = Types.TimestampType.withoutZone(); Transform<Long, Long> identity = Transforms.identity(timestamp); String tsString = "2017-12-01T10:12:55.038194"; Literal<Long> ts = Literal.of(tsString).to(timestamp); // value is not changed Assert.assertEquals("Should produce identical timestamp without time zone", tsString, identity.toHumanString(ts.value())); } @Test public void testLongToHumanString() { Types.LongType longType = Types.LongType.get(); Transform<Long, Long> identity = Transforms.identity(longType); Assert.assertEquals("Should use Long toString", "-1234567890000", identity.toHumanString(-1234567890000L)); } @Test public void testStringToHumanString() { Types.StringType string = Types.StringType.get(); Transform<String, String> identity = Transforms.identity(string); String withSlash = "a/b/c=d"; Assert.assertEquals("Should not modify Strings", withSlash, identity.toHumanString(withSlash)); } @Test public void testBigDecimalToHumanString() { Types.DecimalType decimal = Types.DecimalType.of(9, 2); Transform<BigDecimal, BigDecimal> identity = Transforms.identity(decimal); String decimalString = "-1.50"; BigDecimal d = new BigDecimal(decimalString); Assert.assertEquals("Should not modify Strings", decimalString, identity.toHumanString(d)); } }
6,401
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestResiduals.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.netflix.iceberg.Schema; import com.netflix.iceberg.TestHelpers.Row; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.ResidualEvaluator; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.types.Types; import org.junit.Assert; import org.junit.Test; import static com.netflix.iceberg.TestHelpers.assertAndUnwrapUnbound; import static com.netflix.iceberg.expressions.Expression.Operation.GT; import static com.netflix.iceberg.expressions.Expression.Operation.LT; import static com.netflix.iceberg.expressions.Expressions.alwaysFalse; import static com.netflix.iceberg.expressions.Expressions.alwaysTrue; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.greaterThan; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.or; public class TestResiduals { @Test public void testIdentityTransformResiduals() { Schema schema = new Schema( Types.NestedField.optional(50, "dateint", Types.IntegerType.get()), Types.NestedField.optional(51, "hour", Types.IntegerType.get()) ); PartitionSpec spec = PartitionSpec.builderFor(schema) .identity("dateint") .build(); ResidualEvaluator resEval = new ResidualEvaluator(spec, or(or( and(lessThan("dateint", 20170815), greaterThan("dateint", 20170801)), and(equal("dateint", 20170815), lessThan("hour", 12))), and(equal("dateint", 20170801), greaterThan("hour", 11))) ); // equal to the upper date bound Expression residual = resEval.residualFor(Row.of(20170815)); UnboundPredicate<?> unbound = assertAndUnwrapUnbound(residual); Assert.assertEquals("Residual should be hour < 12", LT, unbound.op()); Assert.assertEquals("Residual should be hour < 12", "hour", unbound.ref().name()); Assert.assertEquals("Residual should be hour < 12", 12, unbound.literal().value()); // equal to the lower date bound residual = resEval.residualFor(Row.of(20170801)); unbound = assertAndUnwrapUnbound(residual); Assert.assertEquals("Residual should be hour > 11", GT, unbound.op()); Assert.assertEquals("Residual should be hour > 11", "hour", unbound.ref().name()); Assert.assertEquals("Residual should be hour > 11", 11, unbound.literal().value()); // inside the date range residual = resEval.residualFor(Row.of(20170812)); Assert.assertEquals("Residual should be alwaysTrue", alwaysTrue(), residual); // outside the date range residual = resEval.residualFor(Row.of(20170817)); Assert.assertEquals("Residual should be alwaysFalse", alwaysFalse(), residual); } }
6,402
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.collect.Lists; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.Or; import com.netflix.iceberg.expressions.Projections; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.types.Types; import org.junit.Assert; import org.junit.Test; import java.util.List; import static com.netflix.iceberg.TestHelpers.assertAndUnwrap; import static com.netflix.iceberg.TestHelpers.assertAndUnwrapUnbound; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.or; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestProjection { private static final Schema SCHEMA = new Schema( optional(16, "id", Types.LongType.get()) ); @Test public void testIdentityProjection() { List<UnboundPredicate<?>> predicates = Lists.newArrayList( Expressions.notNull("id"), Expressions.isNull("id"), Expressions.lessThan("id", 100), Expressions.lessThanOrEqual("id", 101), Expressions.greaterThan("id", 102), Expressions.greaterThanOrEqual("id", 103), Expressions.equal("id", 104), Expressions.notEqual("id", 105) ); PartitionSpec spec = PartitionSpec.builderFor(SCHEMA) .identity("id") .build(); for (UnboundPredicate<?> predicate : predicates) { // get the projected predicate Expression expr = Projections.inclusive(spec).project(predicate); UnboundPredicate<?> projected = assertAndUnwrapUnbound(expr); // check inclusive the bound predicate to ensure the types are correct BoundPredicate<?> bound = assertAndUnwrap(predicate.bind(spec.schema().asStruct())); Assert.assertEquals("Field name should match partition struct field", "id", projected.ref().name()); Assert.assertEquals("Operation should match", bound.op(), projected.op()); if (bound.literal() != null) { Assert.assertEquals("Literal should be equal", bound.literal().value(), projected.literal().value()); } else { Assert.assertNull("Literal should be null", projected.literal()); } } } @Test public void testStrictIdentityProjection() { List<UnboundPredicate<?>> predicates = Lists.newArrayList( Expressions.notNull("id"), Expressions.isNull("id"), Expressions.lessThan("id", 100), Expressions.lessThanOrEqual("id", 101), Expressions.greaterThan("id", 102), Expressions.greaterThanOrEqual("id", 103), Expressions.equal("id", 104), Expressions.notEqual("id", 105) ); PartitionSpec spec = PartitionSpec.builderFor(SCHEMA) .identity("id") .build(); for (UnboundPredicate<?> predicate : predicates) { // get the projected predicate Expression expr = Projections.strict(spec).project(predicate); UnboundPredicate<?> projected = assertAndUnwrapUnbound(expr); // check inclusive the bound predicate to ensure the types are correct BoundPredicate<?> bound = assertAndUnwrap(predicate.bind(spec.schema().asStruct())); Assert.assertEquals("Field name should match partition struct field", "id", projected.ref().name()); Assert.assertEquals("Operation should match", bound.op(), projected.op()); if (bound.literal() != null) { Assert.assertEquals("Literal should be equal", bound.literal().value(), projected.literal().value()); } else { Assert.assertNull("Literal should be null", projected.literal()); } } } @Test public void testBadSparkPartitionFilter() { // this tests a case that results in a full table scan in Spark with Hive tables. because the // hour field is not a partition, mixing it with partition columns in the filter expression // prevents the day/hour boundaries from being pushed to the metastore. this is an easy mistake // when tables are normally partitioned by both hour and dateint. the the filter is: // // WHERE dateint = 20180416 // OR (dateint = 20180415 and hour >= 20) // OR (dateint = 20180417 and hour <= 4) Schema schema = new Schema( required(1, "id", Types.LongType.get()), optional(2, "data", Types.StringType.get()), required(3, "hour", Types.IntegerType.get()), required(4, "dateint", Types.IntegerType.get())); PartitionSpec spec = PartitionSpec.builderFor(schema) .identity("dateint") .build(); Expression filter = or(equal("dateint", 20180416), or( and(equal("dateint", 20180415), greaterThanOrEqual("hour", 20)), and(equal("dateint", 20180417), lessThanOrEqual("hour", 4)))); Expression projection = Projections.inclusive(spec).project(filter); Assert.assertTrue(projection instanceof Or); Or or1 = (Or) projection; UnboundPredicate<?> dateint1 = assertAndUnwrapUnbound(or1.left()); Assert.assertEquals("Should be a dateint predicate", "dateint", dateint1.ref().name()); Assert.assertEquals("Should be dateint=20180416", 20180416, dateint1.literal().value()); Assert.assertTrue(or1.right() instanceof Or); Or or2 = (Or) or1.right(); UnboundPredicate<?> dateint2 = assertAndUnwrapUnbound(or2.left()); Assert.assertEquals("Should be a dateint predicate", "dateint", dateint2.ref().name()); Assert.assertEquals("Should be dateint=20180415", 20180415, dateint2.literal().value()); UnboundPredicate<?> dateint3 = assertAndUnwrapUnbound(or2.right()); Assert.assertEquals("Should be a dateint predicate", "dateint", dateint3.ref().name()); Assert.assertEquals("Should be dateint=20180417", 20180417, dateint3.literal().value()); } }
6,403
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestTransformSerialization.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.TestHelpers; import com.netflix.iceberg.types.Types; import org.junit.Assert; import org.junit.Test; public class TestTransformSerialization { @Test public void testTransforms() throws Exception { Schema schema = new Schema( Types.NestedField.required(1, "i", Types.IntegerType.get()), Types.NestedField.required(2, "l", Types.LongType.get()), Types.NestedField.required(3, "d", Types.DateType.get()), Types.NestedField.required(4, "t", Types.TimeType.get()), Types.NestedField.required(5, "ts", Types.TimestampType.withoutZone()), Types.NestedField.required(6, "dec", Types.DecimalType.of(9, 2)), Types.NestedField.required(7, "s", Types.StringType.get()), Types.NestedField.required(8, "u", Types.UUIDType.get()), Types.NestedField.required(9, "f", Types.FixedType.ofLength(3)), Types.NestedField.required(10, "b", Types.BinaryType.get()) ); // a spec with all of the allowed transform/type pairs PartitionSpec spec = PartitionSpec.builderFor(schema) .identity("i") .identity("l") .identity("d") .identity("t") .identity("ts") .identity("dec") .identity("s") .identity("u") .identity("f") .identity("b") .bucket("i", 128) .bucket("l", 128) .bucket("d", 128) .bucket("t", 128) .bucket("ts", 128) .bucket("dec", 128) .bucket("s", 128) .bucket("u", 128) .bucket("f", 128) .bucket("b", 128) .year("d") .month("d") .day("d") .year("ts") .month("ts") .day("ts") .hour("ts") .truncate("i", 10) .truncate("l", 10) .truncate("dec", 10) .truncate("s", 10) .build(); Assert.assertEquals("Deserialization should produce equal partition spec", spec, TestHelpers.roundTripSerialize(spec)); } }
6,404
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestLiteralSerialization.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.TestHelpers; import com.netflix.iceberg.types.Types; import org.junit.Assert; import org.junit.Test; import java.math.BigDecimal; import java.util.UUID; public class TestLiteralSerialization { @Test public void testLiterals() throws Exception { Literal[] literals = new Literal[] { Literal.of(false), Literal.of(34), Literal.of(35L), Literal.of(36.75F), Literal.of(8.75D), Literal.of("2017-11-29").to(Types.DateType.get()), Literal.of("11:30:07").to(Types.TimeType.get()), Literal.of("2017-11-29T11:30:07.123").to(Types.TimestampType.withoutZone()), Literal.of("2017-11-29T11:30:07.123+01:00").to(Types.TimestampType.withZone()), Literal.of("abc"), Literal.of(UUID.randomUUID()), Literal.of(new byte[] { 1, 2, 3 }).to(Types.FixedType.ofLength(3)), Literal.of(new byte[] { 3, 4, 5, 6 }).to(Types.BinaryType.get()), Literal.of(new BigDecimal("122.50")), }; for (Literal<?> lit : literals) { checkValue(lit); } } private <T> void checkValue(Literal<T> lit) throws Exception { Literal<T> copy = TestHelpers.roundTripSerialize(lit); Assert.assertEquals("Literal's comparator should consider values equal", 0, lit.comparator().compare(lit.value(), copy.value())); Assert.assertEquals("Copy's comparator should consider values equal", 0, copy.comparator().compare(lit.value(), copy.value())); } }
6,405
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestStringLiteralConversions.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.types.Types; import org.apache.avro.LogicalTypes; import org.apache.avro.Schema; import org.apache.avro.data.TimeConversions; import org.joda.time.DateTimeZone; import org.joda.time.LocalDate; import org.joda.time.LocalDateTime; import org.joda.time.LocalTime; import org.junit.Assert; import org.junit.Test; import java.math.BigDecimal; import java.time.DateTimeException; import java.util.UUID; public class TestStringLiteralConversions { @Test public void testStringToStringLiteral() { Literal<CharSequence> string = Literal.of("abc"); Assert.assertSame("Should return same instance", string, string.to(Types.StringType.get())); } @Test public void testStringToDateLiteral() { Literal<CharSequence> dateStr = Literal.of("2017-08-18"); Literal<Integer> date = dateStr.to(Types.DateType.get()); // use Avro's date conversion to validate the result Schema avroSchema = LogicalTypes.date().addToSchema(Schema.create(Schema.Type.INT)); TimeConversions.DateConversion avroConversion = new TimeConversions.DateConversion(); int avroValue = avroConversion.toInt( new LocalDate(2017, 8, 18), avroSchema, avroSchema.getLogicalType()); Assert.assertEquals("Date should match", avroValue, (int) date.value()); } @Test public void testStringToTimeLiteral() { // use Avro's time conversion to validate the result Schema avroSchema = LogicalTypes.timeMicros().addToSchema(Schema.create(Schema.Type.LONG)); TimeConversions.LossyTimeMicrosConversion avroConversion = new TimeConversions.LossyTimeMicrosConversion(); Literal<CharSequence> timeStr = Literal.of("14:21:01.919"); Literal<Long> time = timeStr.to(Types.TimeType.get()); long avroValue = avroConversion.toLong( new LocalTime(14, 21, 1, 919), avroSchema, avroSchema.getLogicalType()); Assert.assertEquals("Time should match", avroValue, (long) time.value()); } @Test public void testStringToTimestampLiteral() { // use Avro's timestamp conversion to validate the result Schema avroSchema = LogicalTypes.timestampMicros().addToSchema(Schema.create(Schema.Type.LONG)); TimeConversions.LossyTimestampMicrosConversion avroConversion = new TimeConversions.LossyTimestampMicrosConversion(); // Timestamp with explicit UTC offset, +00:00 Literal<CharSequence> timestampStr = Literal.of("2017-08-18T14:21:01.919+00:00"); Literal<Long> timestamp = timestampStr.to(Types.TimestampType.withZone()); long avroValue = avroConversion.toLong( new LocalDateTime(2017, 8, 18, 14, 21, 1, 919).toDateTime(DateTimeZone.UTC), avroSchema, avroSchema.getLogicalType()); Assert.assertEquals("Timestamp should match", avroValue, (long) timestamp.value()); // Timestamp without an explicit zone should be UTC (equal to the previous converted value) timestampStr = Literal.of("2017-08-18T14:21:01.919"); timestamp = timestampStr.to(Types.TimestampType.withoutZone()); Assert.assertEquals("Timestamp without zone should match UTC", avroValue, (long) timestamp.value()); // Timestamp with an explicit offset should be adjusted to UTC timestampStr = Literal.of("2017-08-18T14:21:01.919-07:00"); timestamp = timestampStr.to(Types.TimestampType.withZone()); avroValue = avroConversion.toLong( new LocalDateTime(2017, 8, 18, 21, 21, 1, 919).toDateTime(DateTimeZone.UTC), avroSchema, avroSchema.getLogicalType()); Assert.assertEquals("Timestamp without zone should match UTC", avroValue, (long) timestamp.value()); } @Test(expected = DateTimeException.class) public void testTimestampWithZoneWithoutZoneInLiteral() { // Zone must be present in literals when converting to timestamp with zone Literal<CharSequence> timestampStr = Literal.of("2017-08-18T14:21:01.919"); timestampStr.to(Types.TimestampType.withZone()); } @Test(expected = DateTimeException.class) public void testTimestampWithoutZoneWithZoneInLiteral() { // Zone must not be present in literals when converting to timestamp without zone Literal<CharSequence> timestampStr = Literal.of("2017-08-18T14:21:01.919+07:00"); timestampStr.to(Types.TimestampType.withoutZone()); } @Test public void testStringToUUIDLiteral() { UUID expected = UUID.randomUUID(); Literal<CharSequence> uuidStr = Literal.of(expected.toString()); Literal<UUID> uuid = uuidStr.to(Types.UUIDType.get()); Assert.assertEquals("UUID should match", expected, uuid.value()); } @Test public void testStringToDecimalLiteral() { BigDecimal expected = new BigDecimal("34.560"); Literal<CharSequence> decimalStr = Literal.of("34.560"); Literal<BigDecimal> decimal = decimalStr.to(Types.DecimalType.of(9, 3)); Assert.assertEquals("Decimal should have scale 3", 3, decimal.value().scale()); Assert.assertEquals("Decimal should match", expected, decimal.value()); Assert.assertNull("Wrong scale in conversion should return null", decimalStr.to(Types.DecimalType.of(9, 2))); Assert.assertNull("Wrong scale in conversion should return null", decimalStr.to(Types.DecimalType.of(9, 4))); } }
6,406
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestPredicateBinding.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import org.junit.Assert; import org.junit.Test; import java.math.BigDecimal; import java.util.Arrays; import java.util.List; import static com.netflix.iceberg.expressions.Expression.Operation.EQ; import static com.netflix.iceberg.expressions.Expression.Operation.GT; import static com.netflix.iceberg.expressions.Expression.Operation.GT_EQ; import static com.netflix.iceberg.expressions.Expression.Operation.IS_NULL; import static com.netflix.iceberg.expressions.Expression.Operation.LT; import static com.netflix.iceberg.expressions.Expression.Operation.LT_EQ; import static com.netflix.iceberg.expressions.Expression.Operation.NOT_EQ; import static com.netflix.iceberg.expressions.Expression.Operation.NOT_NULL; import static com.netflix.iceberg.expressions.Expressions.ref; import static com.netflix.iceberg.TestHelpers.assertAndUnwrap; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestPredicateBinding { private static List<Expression.Operation> COMPARISONS = Arrays.asList( LT, LT_EQ, GT, GT_EQ, EQ, NOT_EQ); @Test @SuppressWarnings("unchecked") public void testMultipleFields() { StructType struct = StructType.of( required(10, "x", Types.IntegerType.get()), required(11, "y", Types.IntegerType.get()), required(12, "z", Types.IntegerType.get()) ); UnboundPredicate<Integer> unbound = new UnboundPredicate<>(LT, ref("y"), 6); Expression expr = unbound.bind(struct); BoundPredicate<Integer> bound = assertAndUnwrap(expr); Assert.assertEquals("Should reference correct field ID", 11, bound.ref().fieldId()); Assert.assertEquals("Should not change the comparison operation", LT, bound.op()); Assert.assertEquals("Should not alter literal value", Integer.valueOf(6), bound.literal().value()); } @Test public void testMissingField() { StructType struct = StructType.of( required(13, "x", Types.IntegerType.get()) ); UnboundPredicate<Integer> unbound = new UnboundPredicate<>(LT, ref("missing"), 6); try { unbound.bind(struct); Assert.fail("Binding a missing field should fail"); } catch (ValidationException e) { Assert.assertTrue("Validation should complain about missing field", e.getMessage().contains("Cannot find field 'missing' in struct:")); } } @Test @SuppressWarnings("unchecked") public void testComparisonPredicateBinding() { StructType struct = StructType.of(required(14, "x", Types.IntegerType.get())); for (Expression.Operation op : COMPARISONS) { UnboundPredicate<Integer> unbound = new UnboundPredicate<>(op, ref("x"), 5); Expression expr = unbound.bind(struct); BoundPredicate<Integer> bound = assertAndUnwrap(expr); Assert.assertEquals("Should not alter literal value", Integer.valueOf(5), bound.literal().value()); Assert.assertEquals("Should reference correct field ID", 14, bound.ref().fieldId()); Assert.assertEquals("Should not change the comparison operation", op, bound.op()); } } @Test @SuppressWarnings("unchecked") public void testLiteralConversion() { StructType struct = StructType.of(required(15, "d", Types.DecimalType.of(9, 2))); for (Expression.Operation op : COMPARISONS) { UnboundPredicate<String> unbound = new UnboundPredicate<>(op, ref("d"), "12.40"); Expression expr = unbound.bind(struct); BoundPredicate<BigDecimal> bound = assertAndUnwrap(expr); Assert.assertEquals("Should convert literal value to decimal", new BigDecimal("12.40"), bound.literal().value()); Assert.assertEquals("Should reference correct field ID", 15, bound.ref().fieldId()); Assert.assertEquals("Should not change the comparison operation", op, bound.op()); } } @Test public void testInvalidConversions() { StructType struct = StructType.of(required(16, "f", Types.FloatType.get())); for (Expression.Operation op : COMPARISONS) { UnboundPredicate<String> unbound = new UnboundPredicate<>(op, ref("f"), "12.40"); try { unbound.bind(struct); Assert.fail("Should not convert string to float"); } catch (ValidationException e) { Assert.assertEquals("Should ", e.getMessage(), "Invalid value for comparison inclusive type float: 12.40 (java.lang.String)"); } } } @Test @SuppressWarnings("unchecked") public void testLongToIntegerConversion() { StructType struct = StructType.of(required(17, "i", Types.IntegerType.get())); UnboundPredicate<Long> lt = new UnboundPredicate<>( LT, ref("i"), (long) Integer.MAX_VALUE + 1L); Assert.assertEquals("Less than above max should be alwaysTrue", Expressions.alwaysTrue(), lt.bind(struct)); UnboundPredicate<Long> lteq = new UnboundPredicate<>( LT_EQ, ref("i"), (long) Integer.MAX_VALUE + 1L); Assert.assertEquals("Less than or equal above max should be alwaysTrue", Expressions.alwaysTrue(), lteq.bind(struct)); UnboundPredicate<Long> gt = new UnboundPredicate<>( GT, ref("i"), (long) Integer.MIN_VALUE - 1L); Assert.assertEquals("Greater than below min should be alwaysTrue", Expressions.alwaysTrue(), gt.bind(struct)); UnboundPredicate<Long> gteq = new UnboundPredicate<>( GT_EQ, ref("i"), (long) Integer.MIN_VALUE - 1L); Assert.assertEquals("Greater than or equal below min should be alwaysTrue", Expressions.alwaysTrue(), gteq.bind(struct)); UnboundPredicate<Long> gtMax = new UnboundPredicate<>( GT, ref("i"), (long) Integer.MAX_VALUE + 1L); Assert.assertEquals("Greater than above max should be alwaysFalse", Expressions.alwaysFalse(), gtMax.bind(struct)); UnboundPredicate<Long> gteqMax = new UnboundPredicate<>( GT_EQ, ref("i"), (long) Integer.MAX_VALUE + 1L); Assert.assertEquals("Greater than or equal above max should be alwaysFalse", Expressions.alwaysFalse(), gteqMax.bind(struct)); UnboundPredicate<Long> ltMin = new UnboundPredicate<>( LT, ref("i"), (long) Integer.MIN_VALUE - 1L); Assert.assertEquals("Less than below min should be alwaysFalse", Expressions.alwaysFalse(), ltMin.bind(struct)); UnboundPredicate<Long> lteqMin = new UnboundPredicate<>( LT_EQ, ref("i"), (long) Integer.MIN_VALUE - 1L); Assert.assertEquals("Less than or equal below min should be alwaysFalse", Expressions.alwaysFalse(), lteqMin.bind(struct)); Expression ltExpr = new UnboundPredicate<>(LT, ref("i"), (long) Integer.MAX_VALUE).bind(struct); BoundPredicate<Integer> ltMax = assertAndUnwrap(ltExpr); Assert.assertEquals("Should translate bound to Integer", (Integer) Integer.MAX_VALUE, ltMax.literal().value()); Expression lteqExpr = new UnboundPredicate<>(LT_EQ, ref("i"), (long) Integer.MAX_VALUE) .bind(struct); BoundPredicate<Integer> lteqMax = assertAndUnwrap(lteqExpr); Assert.assertEquals("Should translate bound to Integer", (Integer) Integer.MAX_VALUE, lteqMax.literal().value()); Expression gtExpr = new UnboundPredicate<>(GT, ref("i"), (long) Integer.MIN_VALUE).bind(struct); BoundPredicate<Integer> gtMin = assertAndUnwrap(gtExpr); Assert.assertEquals("Should translate bound to Integer", (Integer) Integer.MIN_VALUE, gtMin.literal().value()); Expression gteqExpr = new UnboundPredicate<>(GT_EQ, ref("i"), (long) Integer.MIN_VALUE) .bind(struct); BoundPredicate<Integer> gteqMin = assertAndUnwrap(gteqExpr); Assert.assertEquals("Should translate bound to Integer", (Integer) Integer.MIN_VALUE, gteqMin.literal().value()); } @Test @SuppressWarnings("unchecked") public void testDoubleToFloatConversion() { StructType struct = StructType.of(required(18, "f", Types.FloatType.get())); UnboundPredicate<Double> lt = new UnboundPredicate<>( LT, ref("f"), (double) Float.MAX_VALUE * 2); Assert.assertEquals("Less than above max should be alwaysTrue", Expressions.alwaysTrue(), lt.bind(struct)); UnboundPredicate<Double> lteq = new UnboundPredicate<>( LT_EQ, ref("f"), (double) Float.MAX_VALUE * 2); Assert.assertEquals("Less than or equal above max should be alwaysTrue", Expressions.alwaysTrue(), lteq.bind(struct)); UnboundPredicate<Double> gt = new UnboundPredicate<>( GT, ref("f"), (double) Float.MAX_VALUE * -2); Assert.assertEquals("Greater than below min should be alwaysTrue", Expressions.alwaysTrue(), gt.bind(struct)); UnboundPredicate<Double> gteq = new UnboundPredicate<>( GT_EQ, ref("f"), (double) Float.MAX_VALUE * -2); Assert.assertEquals("Greater than or equal below min should be alwaysTrue", Expressions.alwaysTrue(), gteq.bind(struct)); UnboundPredicate<Double> gtMax = new UnboundPredicate<>( GT, ref("f"), (double) Float.MAX_VALUE * 2); Assert.assertEquals("Greater than above max should be alwaysFalse", Expressions.alwaysFalse(), gtMax.bind(struct)); UnboundPredicate<Double> gteqMax = new UnboundPredicate<>( GT_EQ, ref("f"), (double) Float.MAX_VALUE * 2); Assert.assertEquals("Greater than or equal above max should be alwaysFalse", Expressions.alwaysFalse(), gteqMax.bind(struct)); UnboundPredicate<Double> ltMin = new UnboundPredicate<>( LT, ref("f"), (double) Float.MAX_VALUE * -2); Assert.assertEquals("Less than below min should be alwaysFalse", Expressions.alwaysFalse(), ltMin.bind(struct)); UnboundPredicate<Double> lteqMin = new UnboundPredicate<>( LT_EQ, ref("f"), (double) Float.MAX_VALUE * -2); Assert.assertEquals("Less than or equal below min should be alwaysFalse", Expressions.alwaysFalse(), lteqMin.bind(struct)); Expression ltExpr = new UnboundPredicate<>(LT, ref("f"), (double) Float.MAX_VALUE).bind(struct); BoundPredicate<Float> ltMax = assertAndUnwrap(ltExpr); Assert.assertEquals("Should translate bound to Float", (Float) Float.MAX_VALUE, ltMax.literal().value()); Expression lteqExpr = new UnboundPredicate<>(LT_EQ, ref("f"), (double) Float.MAX_VALUE) .bind(struct); BoundPredicate<Float> lteqMax = assertAndUnwrap(lteqExpr); Assert.assertEquals("Should translate bound to Float", (Float) Float.MAX_VALUE, lteqMax.literal().value()); Expression gtExpr = new UnboundPredicate<>(GT, ref("f"), (double) -Float.MAX_VALUE).bind(struct); BoundPredicate<Float> gtMin = assertAndUnwrap(gtExpr); Assert.assertEquals("Should translate bound to Float", Float.valueOf(-Float.MAX_VALUE), gtMin.literal().value()); Expression gteqExpr = new UnboundPredicate<>(GT_EQ, ref("f"), (double) -Float.MAX_VALUE) .bind(struct); BoundPredicate<Float> gteqMin = assertAndUnwrap(gteqExpr); Assert.assertEquals("Should translate bound to Float", Float.valueOf(-Float.MAX_VALUE), gteqMin.literal().value()); } @Test @SuppressWarnings("unchecked") public void testIsNull() { StructType optional = StructType.of(optional(19, "s", Types.StringType.get())); UnboundPredicate<?> unbound = new UnboundPredicate<>(IS_NULL, ref("s")); Expression expr = unbound.bind(optional); BoundPredicate<?> bound = assertAndUnwrap(expr); Assert.assertEquals("Should use the same operation", IS_NULL, bound.op()); Assert.assertEquals("Should use the correct field", 19, bound.ref().fieldId()); Assert.assertNull("Should not have a literal value", bound.literal()); StructType required = StructType.of(required(20, "s", Types.StringType.get())); Assert.assertEquals("IsNull inclusive a required field should be alwaysFalse", Expressions.alwaysFalse(), unbound.bind(required)); } @Test public void testNotNull() { StructType optional = StructType.of(optional(21, "s", Types.StringType.get())); UnboundPredicate<?> unbound = new UnboundPredicate<>(NOT_NULL, ref("s")); Expression expr = unbound.bind(optional); BoundPredicate<?> bound = assertAndUnwrap(expr); Assert.assertEquals("Should use the same operation", NOT_NULL, bound.op()); Assert.assertEquals("Should use the correct field", 21, bound.ref().fieldId()); Assert.assertNull("Should not have a literal value", bound.literal()); StructType required = StructType.of(required(22, "s", Types.StringType.get())); Assert.assertEquals("NotNull inclusive a required field should be alwaysTrue", Expressions.alwaysTrue(), unbound.bind(required)); } }
6,407
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestStrictMetricsEvaluator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.collect.ImmutableMap; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.Schema; import com.netflix.iceberg.TestHelpers; import com.netflix.iceberg.TestHelpers.Row; import com.netflix.iceberg.TestHelpers.TestDataFile; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.IntegerType; import com.netflix.iceberg.types.Types.StringType; import org.junit.Assert; import org.junit.Ignore; import org.junit.Test; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.greaterThan; import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.isNull; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.notEqual; import static com.netflix.iceberg.expressions.Expressions.notNull; import static com.netflix.iceberg.expressions.Expressions.or; import static com.netflix.iceberg.types.Conversions.toByteBuffer; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestStrictMetricsEvaluator { private static final Schema SCHEMA = new Schema( required(1, "id", IntegerType.get()), optional(2, "no_stats", IntegerType.get()), required(3, "required", StringType.get()), optional(4, "all_nulls", StringType.get()), optional(5, "some_nulls", StringType.get()), optional(6, "no_nulls", StringType.get()), required(7, "always_5", IntegerType.get()) ); private static final DataFile FILE = new TestDataFile("file.avro", Row.of(), 50, // any value counts, including nulls ImmutableMap.of( 4, 50L, 5, 50L, 6, 50L), // null value counts ImmutableMap.of( 4, 50L, 5, 10L, 6, 0L), // lower bounds ImmutableMap.of( 1, toByteBuffer(IntegerType.get(), 30), 7, toByteBuffer(IntegerType.get(), 5)), // upper bounds ImmutableMap.of( 1, toByteBuffer(IntegerType.get(), 79), 7, toByteBuffer(IntegerType.get(), 5))); @Test public void testAllNulls() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, notNull("all_nulls")).eval(FILE); Assert.assertFalse("Should not match: no non-null value in all null column", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, notNull("some_nulls")).eval(FILE); Assert.assertFalse("Should not match: column with some nulls contains a non-null value", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, notNull("no_nulls")).eval(FILE); Assert.assertTrue("Should match: non-null column contains no null values", shouldRead); } @Test public void testNoNulls() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, isNull("all_nulls")).eval(FILE); Assert.assertTrue("Should match: all values are null", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, isNull("some_nulls")).eval(FILE); Assert.assertFalse("Should not match: not all values are null", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, isNull("no_nulls")).eval(FILE); Assert.assertFalse("Should not match: no values are null", shouldRead); } @Test public void testRequiredColumn() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, notNull("required")).eval(FILE); Assert.assertTrue("Should match: required columns are always non-null", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, isNull("required")).eval(FILE); Assert.assertFalse("Should not match: required columns never contain null", shouldRead); } @Test public void testMissingColumn() { TestHelpers.assertThrows("Should complain about missing column in expression", ValidationException.class, "Cannot find field 'missing'", () -> new StrictMetricsEvaluator(SCHEMA, lessThan("missing", 5)).eval(FILE)); } @Test public void testMissingStats() { DataFile missingStats = new TestDataFile("file.parquet", Row.of(), 50); Expression[] exprs = new Expression[] { lessThan("no_stats", 5), lessThanOrEqual("no_stats", 30), equal("no_stats", 70), greaterThan("no_stats", 78), greaterThanOrEqual("no_stats", 90), notEqual("no_stats", 101), isNull("no_stats"), notNull("no_stats") }; for (Expression expr : exprs) { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, expr).eval(missingStats); Assert.assertFalse("Should never match when stats are missing for expr: " + expr, shouldRead); } } @Test public void testZeroRecordFile() { DataFile empty = new TestDataFile("file.parquet", Row.of(), 0); Expression[] exprs = new Expression[] { lessThan("id", 5), lessThanOrEqual("id", 30), equal("id", 70), greaterThan("id", 78), greaterThanOrEqual("id", 90), notEqual("id", 101), isNull("some_nulls"), notNull("some_nulls") }; for (Expression expr : exprs) { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, expr).eval(empty); Assert.assertTrue("Should always match 0-record file: " + expr, shouldRead); } } @Test public void testNot() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, not(lessThan("id", 5))).eval(FILE); Assert.assertTrue("Should not match: not(false)", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(greaterThan("id", 5))).eval(FILE); Assert.assertFalse("Should match: not(true)", shouldRead); } @Test public void testAnd() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, and(greaterThan("id", 5), lessThanOrEqual("id", 30))).eval(FILE); Assert.assertFalse("Should not match: range overlaps data", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, and(lessThan("id", 5), greaterThanOrEqual("id", 0))).eval(FILE); Assert.assertFalse("Should match: range does not overlap data", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, and(lessThan("id", 85), greaterThanOrEqual("id", 0))).eval(FILE); Assert.assertTrue("Should match: range includes all data", shouldRead); } @Test public void testOr() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 80))).eval(FILE); Assert.assertFalse("Should not match: no matching values", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 60))).eval(FILE); Assert.assertFalse("Should not match: some values do not match", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 30))).eval(FILE); Assert.assertTrue("Should match: all values match > 30", shouldRead); } @Test public void testIntegerLt() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThan("id", 30)).eval(FILE); Assert.assertFalse("Should not match: always false", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThan("id", 31)).eval(FILE); Assert.assertFalse("Should not match: 32 and greater not in range", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThan("id", 79)).eval(FILE); Assert.assertFalse("Should not match: 79 not in range", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThan("id", 80)).eval(FILE); Assert.assertTrue("Should match: all values in range", shouldRead); } @Test public void testIntegerLtEq() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 29)).eval(FILE); Assert.assertFalse("Should not match: always false", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 30)).eval(FILE); Assert.assertFalse("Should not match: 31 and greater not in range", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 79)).eval(FILE); Assert.assertTrue("Should match: all values in range", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 80)).eval(FILE); Assert.assertTrue("Should match: all values in range", shouldRead); } @Test public void testIntegerGt() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThan("id", 79)).eval(FILE); Assert.assertFalse("Should not match: always false", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThan("id", 78)).eval(FILE); Assert.assertFalse("Should not match: 77 and less not in range", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThan("id", 30)).eval(FILE); Assert.assertFalse("Should not match: 30 not in range", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThan("id", 29)).eval(FILE); Assert.assertTrue("Should match: all values in range", shouldRead); } @Test public void testIntegerGtEq() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 80)).eval(FILE); Assert.assertFalse("Should not match: no values in range", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 79)).eval(FILE); Assert.assertFalse("Should not match: 78 and lower are not in range", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 31)).eval(FILE); Assert.assertFalse("Should not match: 30 not in range", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 30)).eval(FILE); Assert.assertTrue("Should match: all values in range", shouldRead); } @Test public void testIntegerEq() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", 5)).eval(FILE); Assert.assertFalse("Should not match: all values != 5", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", 30)).eval(FILE); Assert.assertFalse("Should not match: some values != 30", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", 75)).eval(FILE); Assert.assertFalse("Should not match: some values != 75", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", 79)).eval(FILE); Assert.assertFalse("Should not match: some values != 79", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", 80)).eval(FILE); Assert.assertFalse("Should not match: some values != 80", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("always_5", 5)).eval(FILE); Assert.assertTrue("Should match: all values == 5", shouldRead); } @Test public void testIntegerNotEq() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 5)).eval(FILE); Assert.assertTrue("Should match: no values == 5", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 29)).eval(FILE); Assert.assertTrue("Should match: no values == 39", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 30)).eval(FILE); Assert.assertFalse("Should not match: some value may be == 30", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 75)).eval(FILE); Assert.assertFalse("Should not match: some value may be == 75", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 79)).eval(FILE); Assert.assertFalse("Should not match: some value may be == 79", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 80)).eval(FILE); Assert.assertTrue("Should match: no values == 80", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, notEqual("id", 85)).eval(FILE); Assert.assertTrue("Should read: no values == 85", shouldRead); } @Test public void testIntegerNotEqRewritten() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 5))).eval(FILE); Assert.assertTrue("Should match: no values == 5", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 29))).eval(FILE); Assert.assertTrue("Should match: no values == 39", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 30))).eval(FILE); Assert.assertFalse("Should not match: some value may be == 30", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 75))).eval(FILE); Assert.assertFalse("Should not match: some value may be == 75", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 79))).eval(FILE); Assert.assertFalse("Should not match: some value may be == 79", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 80))).eval(FILE); Assert.assertTrue("Should match: no values == 80", shouldRead); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", 85))).eval(FILE); Assert.assertTrue("Should read: no values == 85", shouldRead); } }
6,408
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestExpressionHelpers.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.TestHelpers; import org.junit.Assert; import org.junit.Test; import static com.netflix.iceberg.expressions.Expressions.alwaysFalse; import static com.netflix.iceberg.expressions.Expressions.alwaysTrue; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.or; public class TestExpressionHelpers { private final UnboundPredicate<?> pred = lessThan("x", 7); @Test public void testSimplifyOr() { Assert.assertEquals("alwaysTrue or pred => alwaysTrue", alwaysTrue(), or(alwaysTrue(), pred)); Assert.assertEquals("pred or alwaysTrue => alwaysTrue", alwaysTrue(), or(pred, alwaysTrue())); Assert.assertEquals("alwaysFalse or pred => pred", pred, or(alwaysFalse(), pred)); Assert.assertEquals("pred or alwaysTrue => pred", pred, or(pred, alwaysFalse())); } @Test public void testSimplifyAnd() { Assert.assertEquals("alwaysTrue and pred => pred", pred, and(alwaysTrue(), pred)); Assert.assertEquals("pred and alwaysTrue => pred", pred, and(pred, alwaysTrue())); Assert.assertEquals("alwaysFalse and pred => alwaysFalse", alwaysFalse(), and(alwaysFalse(), pred)); Assert.assertEquals("pred and alwaysFalse => alwaysFalse", alwaysFalse(), and(pred, alwaysFalse())); } @Test public void testSimplifyNot() { Assert.assertEquals("not(alwaysTrue) => alwaysFalse", alwaysFalse(), not(alwaysTrue())); Assert.assertEquals("not(alwaysFalse) => alwaysTrue", alwaysTrue(), not(alwaysFalse())); Assert.assertEquals("not(not(pred)) => pred", pred, not(not(pred))); } @Test public void testNullName() { TestHelpers.assertThrows("Should catch null column names when creating expressions", NullPointerException.class, "Name cannot be null", () -> equal(null, 5)); } }
6,409
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestNumericLiteralConversions.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.types.Types; import org.junit.Assert; import org.junit.Test; import java.math.BigDecimal; public class TestNumericLiteralConversions { @Test public void testIntegerToLongConversion() { Literal<Integer> lit = Literal.of(34); Literal<Long> longLit = lit.to(Types.LongType.get()); Assert.assertEquals("Value should match", 34L, (long) longLit.value()); } @Test public void testIntegerToFloatConversion() { Literal<Integer> lit = Literal.of(34); Literal<Float> floatLit = lit.to(Types.FloatType.get()); Assert.assertEquals("Value should match", 34.0F, floatLit.value(), 0.0000000001D); } @Test public void testIntegerToDoubleConversion() { Literal<Integer> lit = Literal.of(34); Literal<Double> doubleLit = lit.to(Types.DoubleType.get()); Assert.assertEquals("Value should match", 34.0D, doubleLit.value(), 0.0000000001D); } @Test public void testIntegerToDecimalConversion() { Literal<Integer> lit = Literal.of(34); Assert.assertEquals("Value should match", new BigDecimal("34"), lit.to(Types.DecimalType.of(9, 0)).value()); Assert.assertEquals("Value should match", new BigDecimal("34.00"), lit.to(Types.DecimalType.of(9, 2)).value()); Assert.assertEquals("Value should match", new BigDecimal("34.0000"), lit.to(Types.DecimalType.of(9, 4)).value()); } @Test public void testLongToIntegerConversion() { Literal<Long> lit = Literal.of(34L); Literal<Integer> intLit = lit.to(Types.IntegerType.get()); Assert.assertEquals("Value should match", 34, (int) intLit.value()); Assert.assertEquals("Values above Integer.MAX_VALUE should be Literals.aboveMax()", Literals.aboveMax(), Literal.of((long) Integer.MAX_VALUE + 1L).to(Types.IntegerType.get())); Assert.assertEquals("Values below Integer.MIN_VALUE should be Literals.belowMin()", Literals.belowMin(), Literal.of((long) Integer.MIN_VALUE - 1L).to(Types.IntegerType.get())); } @Test public void testLongToFloatConversion() { Literal<Long> lit = Literal.of(34L); Literal<Float> floatLit = lit.to(Types.FloatType.get()); Assert.assertEquals("Value should match", 34.0F, floatLit.value(), 0.0000000001D); } @Test public void testLongToDoubleConversion() { Literal<Long> lit = Literal.of(34L); Literal<Double> doubleLit = lit.to(Types.DoubleType.get()); Assert.assertEquals("Value should match", 34.0D, doubleLit.value(), 0.0000000001D); } @Test public void testLongToDecimalConversion() { Literal<Long> lit = Literal.of(34L); Assert.assertEquals("Value should match", new BigDecimal("34"), lit.to(Types.DecimalType.of(9, 0)).value()); Assert.assertEquals("Value should match", new BigDecimal("34.00"), lit.to(Types.DecimalType.of(9, 2)).value()); Assert.assertEquals("Value should match", new BigDecimal("34.0000"), lit.to(Types.DecimalType.of(9, 4)).value()); } @Test public void testFloatToDoubleConversion() { Literal<Float> lit = Literal.of(34.56F); Literal<Double> doubleLit = lit.to(Types.DoubleType.get()); Assert.assertEquals("Value should match", 34.56D, doubleLit.value(), 0.001D); } @Test public void testFloatToDecimalConversion() { Literal<Float> lit = Literal.of(34.56F); Assert.assertEquals("Value should round using HALF_UP", new BigDecimal("34.6"), lit.to(Types.DecimalType.of(9, 1)).value()); Assert.assertEquals("Value should match", new BigDecimal("34.56"), lit.to(Types.DecimalType.of(9, 2)).value()); Assert.assertEquals("Value should match", new BigDecimal("34.5600"), lit.to(Types.DecimalType.of(9, 4)).value()); } @Test public void testDoubleToFloatConversion() { Literal<Double> lit = Literal.of(34.56D); Literal<Float> doubleLit = lit.to(Types.FloatType.get()); Assert.assertEquals("Value should match", 34.56F, doubleLit.value(), 0.001D); // this adjusts Float.MAX_VALUE using multipliers because most integer adjustments are lost by // floating point precision. Assert.assertEquals("Values above Float.MAX_VALUE should be Literals.aboveMax()", Literals.aboveMax(), Literal.of(2 * ((double) Float.MAX_VALUE)).to(Types.FloatType.get())); Assert.assertEquals("Values below Float.MIN_VALUE should be Literals.belowMin()", Literals.belowMin(), Literal.of(-2 * ((double) Float.MAX_VALUE)).to(Types.FloatType.get())); } @Test public void testDoubleToDecimalConversion() { Literal<Double> lit = Literal.of(34.56D); Assert.assertEquals("Value should round using HALF_UP", new BigDecimal("34.6"), lit.to(Types.DecimalType.of(9, 1)).value()); Assert.assertEquals("Value should match", new BigDecimal("34.56"), lit.to(Types.DecimalType.of(9, 2)).value()); Assert.assertEquals("Value should match", new BigDecimal("34.5600"), lit.to(Types.DecimalType.of(9, 4)).value()); } @Test public void testDecimalToDecimalConversion() { Literal<BigDecimal> lit = Literal.of(new BigDecimal("34.11")); Assert.assertSame("Should return identical object when converting to same scale", lit, lit.to(Types.DecimalType.of(9, 2))); Assert.assertSame("Should return identical object when converting to same scale", lit, lit.to(Types.DecimalType.of(11, 2))); Assert.assertNull("Changing decimal scale is not allowed", lit.to(Types.DecimalType.of(9, 0))); Assert.assertNull("Changing decimal scale is not allowed", lit.to(Types.DecimalType.of(9, 1))); Assert.assertNull("Changing decimal scale is not allowed", lit.to(Types.DecimalType.of(9, 3))); } }
6,410
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestExpressionBinding.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.TestHelpers; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import org.junit.Assert; import org.junit.Test; import static com.netflix.iceberg.expressions.Expressions.alwaysFalse; import static com.netflix.iceberg.expressions.Expressions.alwaysTrue; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.greaterThan; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.or; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestExpressionBinding { private static final StructType STRUCT = StructType.of( required(0, "x", Types.IntegerType.get()), required(1, "y", Types.IntegerType.get()), required(2, "z", Types.IntegerType.get()) ); @Test public void testMissingReference() { Expression expr = and(equal("t", 5), equal("x", 7)); try { Binder.bind(STRUCT, expr); Assert.fail("Should not successfully bind to struct without field 't'"); } catch (ValidationException e) { Assert.assertTrue("Should complain about missing field", e.getMessage().contains("Cannot find field 't' in struct:")); } } @Test(expected = IllegalStateException.class) public void testBoundExpressionFails() { Expression expr = not(equal("x", 7)); Binder.bind(STRUCT, Binder.bind(STRUCT, expr)); } @Test public void testSingleReference() { Expression expr = not(equal("x", 7)); TestHelpers.assertAllReferencesBound("Single reference", Binder.bind(STRUCT, expr)); } @Test public void testMultipleReferences() { Expression expr = or(and(equal("x", 7), lessThan("y", 100)), greaterThan("z", -100)); TestHelpers.assertAllReferencesBound("Multiple references", Binder.bind(STRUCT, expr)); } @Test public void testAnd() { Expression expr = and(equal("x", 7), lessThan("y", 100)); Expression boundExpr = Binder.bind(STRUCT, expr); TestHelpers.assertAllReferencesBound("And", boundExpr); // make sure the result is an And And and = TestHelpers.assertAndUnwrap(boundExpr, And.class); // make sure the refs are for the right fields BoundPredicate<?> left = TestHelpers.assertAndUnwrap(and.left()); Assert.assertEquals("Should bind x correctly", 0, left.ref().fieldId()); BoundPredicate<?> right = TestHelpers.assertAndUnwrap(and.right()); Assert.assertEquals("Should bind y correctly", 1, right.ref().fieldId()); } @Test public void testOr() { Expression expr = or(greaterThan("z", -100), lessThan("y", 100)); Expression boundExpr = Binder.bind(STRUCT, expr); TestHelpers.assertAllReferencesBound("Or", boundExpr); // make sure the result is an Or Or or = TestHelpers.assertAndUnwrap(boundExpr, Or.class); // make sure the refs are for the right fields BoundPredicate<?> left = TestHelpers.assertAndUnwrap(or.left()); Assert.assertEquals("Should bind z correctly", 2, left.ref().fieldId()); BoundPredicate<?> right = TestHelpers.assertAndUnwrap(or.right()); Assert.assertEquals("Should bind y correctly", 1, right.ref().fieldId()); } @Test public void testNot() { Expression expr = not(equal("x", 7)); Expression boundExpr = Binder.bind(STRUCT, expr); TestHelpers.assertAllReferencesBound("Not", boundExpr); // make sure the result is a Not Not not = TestHelpers.assertAndUnwrap(boundExpr, Not.class); // make sure the refs are for the right fields BoundPredicate<?> child = TestHelpers.assertAndUnwrap(not.child()); Assert.assertEquals("Should bind x correctly", 0, child.ref().fieldId()); } @Test public void testAlwaysTrue() { Assert.assertEquals("Should not change alwaysTrue", alwaysTrue(), Binder.bind(STRUCT, alwaysTrue())); } @Test public void testAlwaysFalse() { Assert.assertEquals("Should not change alwaysFalse", alwaysFalse(), Binder.bind(STRUCT, alwaysFalse())); } @Test public void testBasicSimplification() { // this tests that a basic simplification is done by calling the helpers in Expressions. those // are more thoroughly tested in TestExpressionHelpers. // the second predicate is always true once it is bound because z is an integer and the literal // is less than any 32-bit integer value Assert.assertEquals("Should simplify or expression to alwaysTrue", alwaysTrue(), Binder.bind(STRUCT, or(lessThan("y", 100), greaterThan("z", -9999999999L)))); // similarly, the second predicate is always false Assert.assertEquals("Should simplify and expression to predicate", alwaysFalse(), Binder.bind(STRUCT, and(lessThan("y", 100), lessThan("z", -9999999999L)))); Expression bound = Binder.bind(STRUCT, not(not(lessThan("y", 100)))); BoundPredicate<?> pred = TestHelpers.assertAndUnwrap(bound); Assert.assertEquals("Should have the correct bound field", 1, pred.ref().fieldId()); } }
6,411
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestInclusiveManifestEvaluator.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.netflix.iceberg.expressions; import com.google.common.collect.ImmutableList; import com.netflix.iceberg.ManifestFile; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.TestHelpers; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.types.Types; import org.junit.Assert; import org.junit.Test; import java.nio.ByteBuffer; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.greaterThan; import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.isNull; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.notEqual; import static com.netflix.iceberg.expressions.Expressions.notNull; import static com.netflix.iceberg.expressions.Expressions.or; import static com.netflix.iceberg.types.Conversions.toByteBuffer; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestInclusiveManifestEvaluator { private static final Schema SCHEMA = new Schema( required(1, "id", Types.IntegerType.get()), optional(4, "all_nulls", Types.StringType.get()), optional(5, "some_nulls", Types.StringType.get()), optional(6, "no_nulls", Types.StringType.get()) ); private static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA) .withSpecId(0) .identity("id") .identity("all_nulls") .identity("some_nulls") .identity("no_nulls") .build(); private static final ByteBuffer INT_MIN = toByteBuffer(Types.IntegerType.get(), 30); private static final ByteBuffer INT_MAX = toByteBuffer(Types.IntegerType.get(), 79); private static final ByteBuffer STRING_MIN = toByteBuffer(Types.StringType.get(), "a"); private static final ByteBuffer STRING_MAX = toByteBuffer(Types.StringType.get(), "z"); private static final ManifestFile NO_STATS = new TestHelpers.TestManifestFile( "manifest-list.avro", 1024, 0, System.currentTimeMillis(), null, null, null, null); private static final ManifestFile FILE = new TestHelpers.TestManifestFile("manifest-list.avro", 1024, 0, System.currentTimeMillis(), 5, 10, 0, ImmutableList.of( new TestHelpers.TestFieldSummary(false, INT_MIN, INT_MAX), new TestHelpers.TestFieldSummary(true, null, null), new TestHelpers.TestFieldSummary(true, STRING_MIN, STRING_MAX), new TestHelpers.TestFieldSummary(false, STRING_MIN, STRING_MAX))); @Test public void testAllNulls() { boolean shouldRead = new InclusiveManifestEvaluator(SPEC, notNull("all_nulls")).eval(FILE); Assert.assertFalse("Should skip: no non-null value in all null column", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, notNull("some_nulls")).eval(FILE); Assert.assertTrue("Should read: column with some nulls contains a non-null value", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, notNull("no_nulls")).eval(FILE); Assert.assertTrue("Should read: non-null column contains a non-null value", shouldRead); } @Test public void testNoNulls() { boolean shouldRead = new InclusiveManifestEvaluator(SPEC, isNull("all_nulls")).eval(FILE); Assert.assertTrue("Should read: at least one null value in all null column", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, isNull("some_nulls")).eval(FILE); Assert.assertTrue("Should read: column with some nulls contains a null value", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, isNull("no_nulls")).eval(FILE); Assert.assertFalse("Should skip: non-null column contains no null values", shouldRead); } @Test public void testMissingColumn() { TestHelpers.assertThrows("Should complain about missing column in expression", ValidationException.class, "Cannot find field 'missing'", () -> new InclusiveManifestEvaluator(SPEC, lessThan("missing", 5)).eval(FILE)); } @Test public void testMissingStats() { Expression[] exprs = new Expression[] { lessThan("id", 5), lessThanOrEqual("id", 30), equal("id", 70), greaterThan("id", 78), greaterThanOrEqual("id", 90), notEqual("id", 101), isNull("id"), notNull("id") }; for (Expression expr : exprs) { boolean shouldRead = new InclusiveManifestEvaluator(SPEC, expr).eval(NO_STATS); Assert.assertTrue("Should read when missing stats for expr: " + expr, shouldRead); } } @Test public void testNot() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new InclusiveManifestEvaluator(SPEC, not(lessThan("id", 5))).eval(FILE); Assert.assertTrue("Should read: not(false)", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, not(greaterThan("id", 5))).eval(FILE); Assert.assertFalse("Should skip: not(true)", shouldRead); } @Test public void testAnd() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new InclusiveManifestEvaluator( SPEC, and(lessThan("id", 5), greaterThanOrEqual("id", 0))).eval(FILE); Assert.assertFalse("Should skip: and(false, false)", shouldRead); shouldRead = new InclusiveManifestEvaluator( SPEC, and(greaterThan("id", 5), lessThanOrEqual("id", 30))).eval(FILE); Assert.assertTrue("Should read: and(true, true)", shouldRead); } @Test public void testOr() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new InclusiveManifestEvaluator( SPEC, or(lessThan("id", 5), greaterThanOrEqual("id", 80))).eval(FILE); Assert.assertFalse("Should skip: or(false, false)", shouldRead); shouldRead = new InclusiveManifestEvaluator( SPEC, or(lessThan("id", 5), greaterThanOrEqual("id", 60))).eval(FILE); Assert.assertTrue("Should read: or(false, true)", shouldRead); } @Test public void testIntegerLt() { boolean shouldRead = new InclusiveManifestEvaluator(SPEC, lessThan("id", 5)).eval(FILE); Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, lessThan("id", 30)).eval(FILE); Assert.assertFalse("Should not read: id range below lower bound (30 is not < 30)", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, lessThan("id", 31)).eval(FILE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, lessThan("id", 79)).eval(FILE); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerLtEq() { boolean shouldRead = new InclusiveManifestEvaluator(SPEC, lessThanOrEqual("id", 5)).eval(FILE); Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, lessThanOrEqual("id", 29)).eval(FILE); Assert.assertFalse("Should not read: id range below lower bound (29 < 30)", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, lessThanOrEqual("id", 30)).eval(FILE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, lessThanOrEqual("id", 79)).eval(FILE); Assert.assertTrue("Should read: many possible ids", shouldRead); } @Test public void testIntegerGt() { boolean shouldRead = new InclusiveManifestEvaluator(SPEC, greaterThan("id", 85)).eval(FILE); Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, greaterThan("id", 79)).eval(FILE); Assert.assertFalse("Should not read: id range above upper bound (79 is not > 79)", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, greaterThan("id", 78)).eval(FILE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, greaterThan("id", 75)).eval(FILE); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerGtEq() { boolean shouldRead = new InclusiveManifestEvaluator( SPEC, greaterThanOrEqual("id", 85)).eval(FILE); Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); shouldRead = new InclusiveManifestEvaluator( SPEC, greaterThanOrEqual("id", 80)).eval(FILE); Assert.assertFalse("Should not read: id range above upper bound (80 > 79)", shouldRead); shouldRead = new InclusiveManifestEvaluator( SPEC, greaterThanOrEqual("id", 79)).eval(FILE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new InclusiveManifestEvaluator( SPEC, greaterThanOrEqual("id", 75)).eval(FILE); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerEq() { boolean shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 5)).eval(FILE); Assert.assertFalse("Should not read: id below lower bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 29)).eval(FILE); Assert.assertFalse("Should not read: id below lower bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 30)).eval(FILE); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 75)).eval(FILE); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 79)).eval(FILE); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 80)).eval(FILE); Assert.assertFalse("Should not read: id above upper bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, equal("id", 85)).eval(FILE); Assert.assertFalse("Should not read: id above upper bound", shouldRead); } @Test public void testIntegerNotEq() { boolean shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 5)).eval(FILE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 29)).eval(FILE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 30)).eval(FILE); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 75)).eval(FILE); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 79)).eval(FILE); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 80)).eval(FILE); Assert.assertTrue("Should read: id above upper bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, notEqual("id", 85)).eval(FILE); Assert.assertTrue("Should read: id above upper bound", shouldRead); } @Test public void testIntegerNotEqRewritten() { boolean shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 5))).eval(FILE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 29))).eval(FILE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 30))).eval(FILE); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 75))).eval(FILE); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 79))).eval(FILE); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 80))).eval(FILE); Assert.assertTrue("Should read: id above upper bound", shouldRead); shouldRead = new InclusiveManifestEvaluator(SPEC, not(equal("id", 85))).eval(FILE); Assert.assertTrue("Should read: id above upper bound", shouldRead); } }
6,412
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestExpressionSerialization.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.Schema; import com.netflix.iceberg.TestHelpers; import com.netflix.iceberg.expressions.Expression.Operation; import com.netflix.iceberg.types.Types; import org.junit.Assert; import org.junit.Test; public class TestExpressionSerialization { @Test public void testExpressions() throws Exception { Schema schema = new Schema( Types.NestedField.optional(34, "a", Types.IntegerType.get()) ); Expression[] expressions = new Expression[] { Expressions.alwaysFalse(), Expressions.alwaysTrue(), Expressions.lessThan("x", 5), Expressions.lessThanOrEqual("y", -3), Expressions.greaterThan("z", 0), Expressions.greaterThanOrEqual("t", 129), Expressions.equal("col", "data"), Expressions.notEqual("col", "abc"), Expressions.notNull("maybeNull"), Expressions.isNull("maybeNull2"), Expressions.not(Expressions.greaterThan("a", 10)), Expressions.and(Expressions.greaterThanOrEqual("a", 0), Expressions.lessThan("a", 3)), Expressions.or(Expressions.lessThan("a", 0), Expressions.greaterThan("a", 10)), Expressions.equal("a", 5).bind(schema.asStruct()) }; for (Expression expression : expressions) { Expression copy = TestHelpers.roundTripSerialize(expression); Assert.assertTrue( "Expression should equal the deserialized copy: " + expression + " != " + copy, equals(expression, copy)); } } // You may be wondering why this isn't implemented as Expression.equals. The reason is that // expression equality implies equivalence, which is wider than structural equality. For example, // lessThan("a", 3) is equivalent to not(greaterThanOrEqual("a", 4)). To avoid confusion, equals // only guarantees object identity. private static boolean equals(Expression left, Expression right) { if (left.op() != right.op()) { return false; } if (left instanceof Predicate) { if (!(left.getClass().isInstance(right))) { return false; } return equals((Predicate) left, (Predicate) right); } switch (left.op()) { case FALSE: case TRUE: return true; case NOT: return equals(((Not) left).child(), ((Not) right).child()); case AND: return ( equals(((And) left).left(), ((And) right).left()) && equals(((And) left).right(), ((And) right).right()) ); case OR: return ( equals(((Or) left).left(), ((Or) right).left()) && equals(((Or) left).right(), ((Or) right).right()) ); default: return false; } } @SuppressWarnings("unchecked") private static boolean equals(Predicate left, Predicate right) { if (left.op() != right.op()) { return false; } if (!equals(left.ref(), right.ref())) { return false; } if (left.op() == Operation.IS_NULL || left.op() == Operation.NOT_NULL) { return true; } return left.literal().comparator() .compare(left.literal().value(), right.literal().value()) == 0; } private static boolean equals(Reference left, Reference right) { if (left instanceof NamedReference) { if (!(right instanceof NamedReference)) { return false; } NamedReference lref = (NamedReference) left; NamedReference rref = (NamedReference) right; return lref.name.equals(rref.name); } else if (left instanceof BoundReference) { if (!(right instanceof BoundReference)) { return false; } BoundReference lref = (BoundReference) left; BoundReference rref = (BoundReference) right; return ( lref.fieldId() == rref.fieldId() && lref.type().equals(rref.type()) ); } return false; } }
6,413
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestInclusiveMetricsEvaluator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.collect.ImmutableMap; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.Schema; import com.netflix.iceberg.TestHelpers; import com.netflix.iceberg.TestHelpers.Row; import com.netflix.iceberg.TestHelpers.TestDataFile; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.IntegerType; import org.junit.Assert; import org.junit.Test; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.greaterThan; import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.isNull; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.notEqual; import static com.netflix.iceberg.expressions.Expressions.notNull; import static com.netflix.iceberg.expressions.Expressions.or; import static com.netflix.iceberg.types.Conversions.toByteBuffer; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestInclusiveMetricsEvaluator { private static final Schema SCHEMA = new Schema( required(1, "id", IntegerType.get()), optional(2, "no_stats", Types.IntegerType.get()), required(3, "required", Types.StringType.get()), optional(4, "all_nulls", Types.StringType.get()), optional(5, "some_nulls", Types.StringType.get()), optional(6, "no_nulls", Types.StringType.get()) ); private static final DataFile FILE = new TestDataFile("file.avro", Row.of(), 50, // any value counts, including nulls ImmutableMap.of( 4, 50L, 5, 50L, 6, 50L), // null value counts ImmutableMap.of( 4, 50L, 5, 10L, 6, 0L), // lower bounds ImmutableMap.of( 1, toByteBuffer(IntegerType.get(), 30)), // upper bounds ImmutableMap.of( 1, toByteBuffer(IntegerType.get(), 79))); @Test public void testAllNulls() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notNull("all_nulls")).eval(FILE); Assert.assertFalse("Should skip: no non-null value in all null column", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notNull("some_nulls")).eval(FILE); Assert.assertTrue("Should read: column with some nulls contains a non-null value", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notNull("no_nulls")).eval(FILE); Assert.assertTrue("Should read: non-null column contains a non-null value", shouldRead); } @Test public void testNoNulls() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNull("all_nulls")).eval(FILE); Assert.assertTrue("Should read: at least one null value in all null column", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNull("some_nulls")).eval(FILE); Assert.assertTrue("Should read: column with some nulls contains a null value", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNull("no_nulls")).eval(FILE); Assert.assertFalse("Should skip: non-null column contains no null values", shouldRead); } @Test public void testRequiredColumn() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notNull("required")).eval(FILE); Assert.assertTrue("Should read: required columns are always non-null", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, isNull("required")).eval(FILE); Assert.assertFalse("Should skip: required columns are always non-null", shouldRead); } @Test public void testMissingColumn() { TestHelpers.assertThrows("Should complain about missing column in expression", ValidationException.class, "Cannot find field 'missing'", () -> new InclusiveMetricsEvaluator(SCHEMA, lessThan("missing", 5)).eval(FILE)); } @Test public void testMissingStats() { DataFile missingStats = new TestDataFile("file.parquet", Row.of(), 50); Expression[] exprs = new Expression[] { lessThan("no_stats", 5), lessThanOrEqual("no_stats", 30), equal("no_stats", 70), greaterThan("no_stats", 78), greaterThanOrEqual("no_stats", 90), notEqual("no_stats", 101), isNull("no_stats"), notNull("no_stats") }; for (Expression expr : exprs) { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, expr).eval(missingStats); Assert.assertTrue("Should read when missing stats for expr: " + expr, shouldRead); } } @Test public void testZeroRecordFile() { DataFile empty = new TestDataFile("file.parquet", Row.of(), 0); Expression[] exprs = new Expression[] { lessThan("id", 5), lessThanOrEqual("id", 30), equal("id", 70), greaterThan("id", 78), greaterThanOrEqual("id", 90), notEqual("id", 101), isNull("some_nulls"), notNull("some_nulls") }; for (Expression expr : exprs) { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, expr).eval(empty); Assert.assertFalse("Should never read 0-record file: " + expr, shouldRead); } } @Test public void testNot() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(lessThan("id", 5))).eval(FILE); Assert.assertTrue("Should read: not(false)", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(greaterThan("id", 5))).eval(FILE); Assert.assertFalse("Should skip: not(true)", shouldRead); } @Test public void testAnd() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, and(lessThan("id", 5), greaterThanOrEqual("id", 0))).eval(FILE); Assert.assertFalse("Should skip: and(false, false)", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, and(greaterThan("id", 5), lessThanOrEqual("id", 30))).eval(FILE); Assert.assertTrue("Should read: and(true, true)", shouldRead); } @Test public void testOr() { // this test case must use a real predicate, not alwaysTrue(), or binding will simplify it out boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 80))).eval(FILE); Assert.assertFalse("Should skip: or(false, false)", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, or(lessThan("id", 5), greaterThanOrEqual("id", 60))).eval(FILE); Assert.assertTrue("Should read: or(false, true)", shouldRead); } @Test public void testIntegerLt() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThan("id", 5)).eval(FILE); Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThan("id", 30)).eval(FILE); Assert.assertFalse("Should not read: id range below lower bound (30 is not < 30)", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThan("id", 31)).eval(FILE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThan("id", 79)).eval(FILE); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerLtEq() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 5)).eval(FILE); Assert.assertFalse("Should not read: id range below lower bound (5 < 30)", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 29)).eval(FILE); Assert.assertFalse("Should not read: id range below lower bound (29 < 30)", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 30)).eval(FILE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, lessThanOrEqual("id", 79)).eval(FILE); Assert.assertTrue("Should read: many possible ids", shouldRead); } @Test public void testIntegerGt() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThan("id", 85)).eval(FILE); Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThan("id", 79)).eval(FILE); Assert.assertFalse("Should not read: id range above upper bound (79 is not > 79)", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThan("id", 78)).eval(FILE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThan("id", 75)).eval(FILE); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerGtEq() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 85)).eval(FILE); Assert.assertFalse("Should not read: id range above upper bound (85 < 79)", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 80)).eval(FILE); Assert.assertFalse("Should not read: id range above upper bound (80 > 79)", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 79)).eval(FILE); Assert.assertTrue("Should read: one possible id", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", 75)).eval(FILE); Assert.assertTrue("Should read: may possible ids", shouldRead); } @Test public void testIntegerEq() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 5)).eval(FILE); Assert.assertFalse("Should not read: id below lower bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 29)).eval(FILE); Assert.assertFalse("Should not read: id below lower bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 30)).eval(FILE); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 75)).eval(FILE); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 79)).eval(FILE); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 80)).eval(FILE); Assert.assertFalse("Should not read: id above upper bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", 85)).eval(FILE); Assert.assertFalse("Should not read: id above upper bound", shouldRead); } @Test public void testIntegerNotEq() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 5)).eval(FILE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 29)).eval(FILE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 30)).eval(FILE); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 75)).eval(FILE); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 79)).eval(FILE); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 80)).eval(FILE); Assert.assertTrue("Should read: id above upper bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, notEqual("id", 85)).eval(FILE); Assert.assertTrue("Should read: id above upper bound", shouldRead); } @Test public void testIntegerNotEqRewritten() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 5))).eval(FILE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 29))).eval(FILE); Assert.assertTrue("Should read: id below lower bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 30))).eval(FILE); Assert.assertTrue("Should read: id equal to lower bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 75))).eval(FILE); Assert.assertTrue("Should read: id between lower and upper bounds", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 79))).eval(FILE); Assert.assertTrue("Should read: id equal to upper bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 80))).eval(FILE); Assert.assertTrue("Should read: id above upper bound", shouldRead); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, not(equal("id", 85))).eval(FILE); Assert.assertTrue("Should read: id above upper bound", shouldRead); } }
6,414
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestMiscLiteralConversions.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.junit.Assert; import org.junit.Test; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; import java.util.UUID; public class TestMiscLiteralConversions { @Test public void testIdentityConversions() { List<Pair<Literal<?>, Type>> pairs = Arrays.asList( Pair.of(Literal.of(true), Types.BooleanType.get()), Pair.of(Literal.of(34), Types.IntegerType.get()), Pair.of(Literal.of(34L), Types.LongType.get()), Pair.of(Literal.of(34.11F), Types.FloatType.get()), Pair.of(Literal.of(34.55D), Types.DoubleType.get()), Pair.of(Literal.of("34.55"), Types.DecimalType.of(9, 2)), Pair.of(Literal.of("2017-08-18"), Types.DateType.get()), Pair.of(Literal.of("14:21:01.919"), Types.TimeType.get()), Pair.of(Literal.of("2017-08-18T14:21:01.919"), Types.TimestampType.withoutZone()), Pair.of(Literal.of("abc"), Types.StringType.get()), Pair.of(Literal.of(UUID.randomUUID()), Types.UUIDType.get()), Pair.of(Literal.of(new byte[] {0, 1, 2}), Types.FixedType.ofLength(3)), Pair.of(Literal.of(ByteBuffer.wrap(new byte[] {0, 1, 2})), Types.BinaryType.get()) ); for (Pair<Literal<?>, Type> pair : pairs) { Literal<?> lit = pair.first(); Type type = pair.second(); // first, convert the literal to the target type (date/times start as strings) Literal<?> expected = lit.to(type); // then check that converting again to the same type results in an identical literal Assert.assertSame("Converting twice should produce identical values", expected, expected.to(type)); } } @Test public void testBinaryToFixed() { Literal<ByteBuffer> lit = Literal.of(ByteBuffer.wrap(new byte[] {0, 1, 2})); Literal<ByteBuffer> fixedLit = lit.to(Types.FixedType.ofLength(3)); Assert.assertNotNull("Should allow conversion to correct fixed length", fixedLit); Assert.assertEquals("Conversion should not change value", lit.value().duplicate(), fixedLit.value().duplicate()); Assert.assertNull("Should not allow conversion to different fixed length", lit.to(Types.FixedType.ofLength(4))); Assert.assertNull("Should not allow conversion to different fixed length", lit.to(Types.FixedType.ofLength(2))); } @Test public void testFixedToBinary() { Literal<ByteBuffer> lit = Literal.of(new byte[] {0, 1, 2}); Literal<ByteBuffer> binaryLit = lit.to(Types.BinaryType.get()); Assert.assertNotNull("Should allow conversion to binary", binaryLit); Assert.assertEquals("Conversion should not change value", lit.value().duplicate(), binaryLit.value().duplicate()); } @Test public void testInvalidBooleanConversions() { testInvalidConversions(Literal.of(true), Types.IntegerType.get(), Types.LongType.get(), Types.FloatType.get(), Types.DoubleType.get(), Types.DateType.get(), Types.TimeType.get(), Types.TimestampType.withZone(), Types.TimestampType.withoutZone(), Types.DecimalType.of(9, 2), Types.StringType.get(), Types.UUIDType.get(), Types.FixedType.ofLength(1), Types.BinaryType.get() ); } @Test public void testInvalidIntegerConversions() { testInvalidConversions(Literal.of(34), Types.BooleanType.get(), Types.TimeType.get(), Types.TimestampType.withZone(), Types.TimestampType.withoutZone(), Types.StringType.get(), Types.UUIDType.get(), Types.FixedType.ofLength(1), Types.BinaryType.get() ); } @Test public void testInvalidLongConversions() { testInvalidConversions(Literal.of(34L), Types.BooleanType.get(), Types.DateType.get(), Types.StringType.get(), Types.UUIDType.get(), Types.FixedType.ofLength(1), Types.BinaryType.get() ); } @Test public void testInvalidFloatConversions() { testInvalidConversions(Literal.of(34.11F), Types.BooleanType.get(), Types.IntegerType.get(), Types.LongType.get(), Types.DateType.get(), Types.TimeType.get(), Types.TimestampType.withZone(), Types.TimestampType.withoutZone(), Types.StringType.get(), Types.UUIDType.get(), Types.FixedType.ofLength(1), Types.BinaryType.get() ); } @Test public void testInvalidDoubleConversions() { testInvalidConversions(Literal.of(34.11D), Types.BooleanType.get(), Types.IntegerType.get(), Types.LongType.get(), Types.DateType.get(), Types.TimeType.get(), Types.TimestampType.withZone(), Types.TimestampType.withoutZone(), Types.StringType.get(), Types.UUIDType.get(), Types.FixedType.ofLength(1), Types.BinaryType.get() ); } @Test public void testInvalidDateConversions() { testInvalidConversions(Literal.of("2017-08-18").to(Types.DateType.get()), Types.BooleanType.get(), Types.IntegerType.get(), Types.LongType.get(), Types.FloatType.get(), Types.DoubleType.get(), Types.TimeType.get(), Types.TimestampType.withZone(), Types.TimestampType.withoutZone(), Types.DecimalType.of(9, 4), Types.StringType.get(), Types.UUIDType.get(), Types.FixedType.ofLength(1), Types.BinaryType.get() ); } @Test public void testInvalidTimeConversions() { testInvalidConversions( Literal.of("14:21:01.919").to(Types.TimeType.get()), Types.BooleanType.get(), Types.IntegerType.get(), Types.LongType.get(), Types.FloatType.get(), Types.DoubleType.get(), Types.DateType.get(), Types.TimestampType.withZone(), Types.TimestampType.withoutZone(), Types.DecimalType.of(9, 4), Types.StringType.get(), Types.UUIDType.get(), Types.FixedType.ofLength(1), Types.BinaryType.get() ); } @Test public void testInvalidTimestampConversions() { testInvalidConversions( Literal.of("2017-08-18T14:21:01.919").to(Types.TimestampType.withoutZone()), Types.BooleanType.get(), Types.IntegerType.get(), Types.LongType.get(), Types.FloatType.get(), Types.DoubleType.get(), Types.TimeType.get(), Types.DecimalType.of(9, 4), Types.StringType.get(), Types.UUIDType.get(), Types.FixedType.ofLength(1), Types.BinaryType.get() ); } @Test public void testInvalidDecimalConversions() { testInvalidConversions(Literal.of(new BigDecimal("34.11")), Types.BooleanType.get(), Types.IntegerType.get(), Types.LongType.get(), Types.FloatType.get(), Types.DoubleType.get(), Types.DateType.get(), Types.TimeType.get(), Types.TimestampType.withZone(), Types.TimestampType.withoutZone(), Types.DecimalType.of(9, 4), Types.StringType.get(), Types.UUIDType.get(), Types.FixedType.ofLength(1), Types.BinaryType.get() ); } @Test public void testInvalidStringConversions() { // Strings can be used for types that are difficult to construct, like decimal or timestamp, // but are not intended to support parsing strings to any type testInvalidConversions(Literal.of("abc"), Types.BooleanType.get(), Types.IntegerType.get(), Types.LongType.get(), Types.FloatType.get(), Types.DoubleType.get(), Types.FixedType.ofLength(1), Types.BinaryType.get() ); } @Test public void testInvalidUUIDConversions() { testInvalidConversions(Literal.of(UUID.randomUUID()), Types.BooleanType.get(), Types.IntegerType.get(), Types.LongType.get(), Types.FloatType.get(), Types.DoubleType.get(), Types.DateType.get(), Types.TimeType.get(), Types.TimestampType.withZone(), Types.TimestampType.withoutZone(), Types.DecimalType.of(9, 2), Types.StringType.get(), Types.FixedType.ofLength(1), Types.BinaryType.get() ); } @Test public void testInvalidFixedConversions() { testInvalidConversions(Literal.of(new byte[] {0, 1, 2}), Types.BooleanType.get(), Types.IntegerType.get(), Types.LongType.get(), Types.FloatType.get(), Types.DoubleType.get(), Types.DateType.get(), Types.TimeType.get(), Types.TimestampType.withZone(), Types.TimestampType.withoutZone(), Types.DecimalType.of(9, 2), Types.StringType.get(), Types.UUIDType.get(), Types.FixedType.ofLength(1) ); } @Test public void testInvalidBinaryConversions() { testInvalidConversions(Literal.of(ByteBuffer.wrap(new byte[] {0, 1, 2})), Types.BooleanType.get(), Types.IntegerType.get(), Types.LongType.get(), Types.FloatType.get(), Types.DoubleType.get(), Types.DateType.get(), Types.TimeType.get(), Types.TimestampType.withZone(), Types.TimestampType.withoutZone(), Types.DecimalType.of(9, 2), Types.StringType.get(), Types.UUIDType.get(), Types.FixedType.ofLength(1) ); } private void testInvalidConversions(Literal<?> lit, Type... invalidTypes) { for (Type type : invalidTypes) { Assert.assertNull( lit.value().getClass().getName() + " literal to " + type + " is not allowed", lit.to(type)); } } private static class Pair<X, Y> { public static <X, Y> Pair<X, Y> of(X x, Y y) { return new Pair<>(x, y); } private final X x; private final Y y; private Pair(X x, Y y) { this.x = x; this.y = y; } public X first() { return x; } public Y second() { return y; } } }
6,415
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/expressions/TestEvaluatior.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.TestHelpers; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import org.apache.avro.util.Utf8; import org.junit.Assert; import org.junit.Test; import static com.netflix.iceberg.expressions.Expressions.alwaysFalse; import static com.netflix.iceberg.expressions.Expressions.alwaysTrue; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.greaterThan; import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.isNull; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.notEqual; import static com.netflix.iceberg.expressions.Expressions.notNull; import static com.netflix.iceberg.expressions.Expressions.or; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestEvaluatior { private static final StructType STRUCT = StructType.of( required(13, "x", Types.IntegerType.get()), required(14, "y", Types.IntegerType.get()), optional(15, "z", Types.IntegerType.get()) ); @Test public void testLessThan() { Evaluator evaluator = new Evaluator(STRUCT, lessThan("x", 7)); Assert.assertFalse("7 < 7 => false", evaluator.eval(TestHelpers.Row.of(7, 8, null))); Assert.assertTrue("6 < 7 => true", evaluator.eval(TestHelpers.Row.of(6, 8, null))); } @Test public void testLessThanOrEqual() { Evaluator evaluator = new Evaluator(STRUCT, lessThanOrEqual("x", 7)); Assert.assertTrue("7 <= 7 => true", evaluator.eval(TestHelpers.Row.of(7, 8, null))); Assert.assertTrue("6 <= 7 => true", evaluator.eval(TestHelpers.Row.of(6, 8, null))); Assert.assertFalse("8 <= 7 => false", evaluator.eval(TestHelpers.Row.of(8, 8, null))); } @Test public void testGreaterThan() { Evaluator evaluator = new Evaluator(STRUCT, greaterThan("x", 7)); Assert.assertFalse("7 > 7 => false", evaluator.eval(TestHelpers.Row.of(7, 8, null))); Assert.assertFalse("6 > 7 => false", evaluator.eval(TestHelpers.Row.of(6, 8, null))); Assert.assertTrue("8 > 7 => true", evaluator.eval(TestHelpers.Row.of(8, 8, null))); } @Test public void testGreaterThanOrEqual() { Evaluator evaluator = new Evaluator(STRUCT, greaterThanOrEqual("x", 7)); Assert.assertTrue("7 >= 7 => true", evaluator.eval(TestHelpers.Row.of(7, 8, null))); Assert.assertFalse("6 >= 7 => false", evaluator.eval(TestHelpers.Row.of(6, 8, null))); Assert.assertTrue("8 >= 7 => true", evaluator.eval(TestHelpers.Row.of(8, 8, null))); } @Test public void testEqual() { Evaluator evaluator = new Evaluator(STRUCT, equal("x", 7)); Assert.assertTrue("7 == 7 => true", evaluator.eval(TestHelpers.Row.of(7, 8, null))); Assert.assertFalse("6 == 7 => false", evaluator.eval(TestHelpers.Row.of(6, 8, null))); } @Test public void testNotEqual() { Evaluator evaluator = new Evaluator(STRUCT, notEqual("x", 7)); Assert.assertFalse("7 != 7 => false", evaluator.eval(TestHelpers.Row.of(7, 8, null))); Assert.assertTrue("6 != 7 => true", evaluator.eval(TestHelpers.Row.of(6, 8, null))); } @Test public void testAlwaysTrue() { Evaluator evaluator = new Evaluator(STRUCT, alwaysTrue()); Assert.assertTrue("always true", evaluator.eval(TestHelpers.Row.of())); } @Test public void testAlwaysFalse() { Evaluator evaluator = new Evaluator(STRUCT, alwaysFalse()); Assert.assertFalse("always false", evaluator.eval(TestHelpers.Row.of())); } @Test public void testIsNull() { Evaluator evaluator = new Evaluator(STRUCT, isNull("z")); Assert.assertTrue("null is null", evaluator.eval(TestHelpers.Row.of(1, 2, null))); Assert.assertFalse("3 is not null", evaluator.eval(TestHelpers.Row.of(1, 2, 3))); } @Test public void testNotNull() { Evaluator evaluator = new Evaluator(STRUCT, notNull("z")); Assert.assertFalse("null is null", evaluator.eval(TestHelpers.Row.of(1, 2, null))); Assert.assertTrue("3 is not null", evaluator.eval(TestHelpers.Row.of(1, 2, 3))); } @Test public void testAnd() { Evaluator evaluator = new Evaluator(STRUCT, and(equal("x", 7), notNull("z"))); Assert.assertTrue("7, 3 => true", evaluator.eval(TestHelpers.Row.of(7, 0, 3))); Assert.assertFalse("8, 3 => false", evaluator.eval(TestHelpers.Row.of(8, 0, 3))); Assert.assertFalse("7, null => false", evaluator.eval(TestHelpers.Row.of(7, 0, null))); Assert.assertFalse("8, null => false", evaluator.eval(TestHelpers.Row.of(8, 0, null))); } @Test public void testOr() { Evaluator evaluator = new Evaluator(STRUCT, or(equal("x", 7), notNull("z"))); Assert.assertTrue("7, 3 => true", evaluator.eval(TestHelpers.Row.of(7, 0, 3))); Assert.assertTrue("8, 3 => true", evaluator.eval(TestHelpers.Row.of(8, 0, 3))); Assert.assertTrue("7, null => true", evaluator.eval(TestHelpers.Row.of(7, 0, null))); Assert.assertFalse("8, null => false", evaluator.eval(TestHelpers.Row.of(8, 0, null))); } @Test public void testNot() { Evaluator evaluator = new Evaluator(STRUCT, not(equal("x", 7))); Assert.assertFalse("not(7 == 7) => false", evaluator.eval(TestHelpers.Row.of(7))); Assert.assertTrue("not(8 == 7) => false", evaluator.eval(TestHelpers.Row.of(8))); } @Test public void testCharSeqValue() { StructType struct = StructType.of(required(34, "s", Types.StringType.get())); Evaluator evaluator = new Evaluator(struct, equal("s", "abc")); Assert.assertTrue("string(abc) == utf8(abc) => true", evaluator.eval(TestHelpers.Row.of(new Utf8("abc")))); Assert.assertFalse("string(abc) == utf8(abcd) => false", evaluator.eval(TestHelpers.Row.of(new Utf8("abcd")))); } }
6,416
0
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg
Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/events/TestListeners.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.events; import org.junit.Assert; import org.junit.Test; public class TestListeners { static { Listeners.register(TestListener.get()::event1, Event1.class); Listeners.register(TestListener.get()::event2, Event2.class); } public static class Event1 { } public static class Event2 { } public static class TestListener { private static final TestListener INSTANCE = new TestListener(); public static TestListener get() { return INSTANCE; } private Event1 e1 = null; private Event2 e2 = null; public void event1(Event1 e) { this.e1 = e; } public void event2(Event2 e) { this.e2 = e; } } @Test public void testEvent1() { Event1 e1 = new Event1(); Listeners.notifyAll(e1); Assert.assertEquals(e1, TestListener.get().e1); } @Test public void testEvent2() { Event2 e2 = new Event2(); Listeners.notifyAll(e2); Assert.assertEquals(e2, TestListener.get().e2); } @Test public void testMultipleListeners() { TestListener other = new TestListener(); Listeners.register(other::event1, Event1.class); Event1 e1 = new Event1(); Listeners.notifyAll(e1); Assert.assertEquals(e1, TestListener.get().e1); Assert.assertEquals(e1, other.e1); } }
6,417
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/FileFormat.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.netflix.iceberg.types.Comparators; /** * Enum of supported file formats. */ public enum FileFormat { ORC("orc"), PARQUET("parquet"), AVRO("avro"); private final String ext; FileFormat(String ext) { this.ext = "." + ext; } /** * Returns filename with this format's extension added, if necessary. * * @param filename a filename or path * @return if the ext is present, the filename, otherwise the filename with ext added */ public String addExtension(String filename) { if (filename.endsWith(ext)) { return filename; } return filename + ext; } public static FileFormat fromFileName(CharSequence filename) { int lastIndex = lastIndexOf('.', filename); if (lastIndex < 0) { return null; } CharSequence ext = filename.subSequence(lastIndex, filename.length()); for (FileFormat format : FileFormat.values()) { if (Comparators.charSequences().compare(format.ext, ext) == 0) { return format; } } return null; } private static int lastIndexOf(char c, CharSequence seq) { for (int i = seq.length() - 1; i >= 0; i -= 1) { if (seq.charAt(i) == c) { return i; } } return -1; } }
6,418
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/CombinedScanTask.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import java.util.Collection; /** * A scan task made of several ranges from files. */ public interface CombinedScanTask extends ScanTask { /** * Return the {@link FileScanTask tasks} in this combined task. * @return a Collection of FileScanTask instances. */ Collection<FileScanTask> files(); @Override default CombinedScanTask asCombinedScanTask() { return this; } }
6,419
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Filterable.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.google.common.collect.Lists; import com.netflix.iceberg.expressions.Expression; import java.util.Collection; /** * Methods to filter files in a snapshot or manifest when reading. * * @param <T> Java class returned by filter methods, also filterable */ public interface Filterable<T extends Filterable<T>> extends Iterable<DataFile> { /** * Selects the columns of a file manifest to read. * <p> * If columns are set multiple times, the last set of columns will be read. * <p> * If the Filterable object has partition filters, they will be added to the returned partial. * <p> * For a list of column names, see the table format specification. * * @param columns String column names to load from the manifest file * @return a Filterable that will load only the given columns */ default T select(String... columns) { return select(Lists.newArrayList(columns)); } /** * Selects the columns of a file manifest to read. * <p> * If columns are set multiple times, the last set of columns will be read. * <p> * If the Filterable object has partition filters, they will be added to the returned partial. * <p> * For a list of column names, see the table format specification. * * @param columns String column names to load from the manifest file * @return a Filterable that will load only the given columns */ T select(Collection<String> columns); /** * Adds a filter expression on partition data for matching files. * <p> * If the Filterable object already has partition filters, the new filter will be added as an * additional requirement. The result filter expression will be the result of expr and any * existing filters. * <p> * If the Filterable object has columns selected, they will be added to the returned partial. * * @param expr An expression for filtering this Filterable using partition data * @return a Filterable that will load only rows that match expr */ T filterPartitions(Expression expr); /** * Adds a filter expression on data rows for matching files. * <p> * Expressions passed to this function will be converted to partition expressions before they are * used to filter data files. * <p> * If the Filterable object already has partition filters, the new filter will be added as an * additional requirement. The result filter expression will be the result of expr and any * existing filters. * <p> * If the Filterable object has columns selected, they will be added to the returned partial. * * @param expr An expression for filtering this Filterable using row data * @return a Filterable that will load only rows that match expr */ T filterRows(Expression expr); }
6,420
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/UpdateProperties.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import java.util.Map; /** * API for updating table properties. * <p> * Apply returns the updated table properties as a map for validation. * <p> * When committing, these changes will be applied to the current table metadata. Commit conflicts * will be resolved by applying the pending changes to the new table metadata. */ public interface UpdateProperties extends PendingUpdate<Map<String, String>> { /** * Add a key/value property to the table. * * @param key a String key * @param value a String value * @return this for method chaining * @throws NullPointerException If either the key or value is null */ UpdateProperties set(String key, String value); /** * Remove the given property key from the table. * * @param key a String key * @return this for method chaining * @throws NullPointerException If the key is null */ UpdateProperties remove(String key); /** * Set the default file format for the table. * @param format a file format * @return this */ UpdateProperties defaultFormat(FileFormat format); }
6,421
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/ReplacePartitions.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; /** * Not recommended: API for overwriting files in a table by partition. * <p> * This is provided to implement SQL compatible with Hive table operations but is not recommended. * Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data. * <p> * This API accumulates file additions and produces a new {@link Snapshot} of the table by replacing * all files in partitions with new data with the new additions. This operation is used to implement * dynamic partition replacement. * <p> * When committing, these changes will be applied to the latest table snapshot. Commit conflicts * will be resolved by applying the changes to the new latest snapshot and reattempting the commit. * This has no requirements for the latest snapshot and will not fail based on other snapshot * changes. */ public interface ReplacePartitions extends PendingUpdate<Snapshot> { /** * Add a {@link DataFile} to the table. * * @param file a data file * @return this for method chaining */ ReplacePartitions addFile(DataFile file); /** * Validate that no partitions will be replaced and the operation is append-only. * * @return this for method chaining */ ReplacePartitions validateAppendOnly(); }
6,422
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Schema.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.BiMap; import com.google.common.collect.ImmutableBiMap; import com.google.common.collect.Sets; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import java.io.Serializable; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; /** * The schema of a data table. */ public class Schema implements Serializable { private static final Joiner NEWLINE = Joiner.on('\n'); private static final String ALL_COLUMNS = "*"; private final Types.StructType struct; private transient BiMap<String, Integer> aliasToId = null; private transient Map<Integer, Types.NestedField> idToField = null; private transient BiMap<String, Integer> nameToId = null; public Schema(List<Types.NestedField> columns, Map<String, Integer> aliases) { this.struct = Types.StructType.of(columns); this.aliasToId = aliases != null ? ImmutableBiMap.copyOf(aliases) : null; } public Schema(List<Types.NestedField> columns) { this.struct = Types.StructType.of(columns); } private Map<Integer, Types.NestedField> lazyIdToField() { if (idToField == null) { this.idToField = TypeUtil.indexById(struct); } return idToField; } private BiMap<String, Integer> lazyNameToId() { if (nameToId == null) { this.nameToId = ImmutableBiMap.copyOf(TypeUtil.indexByName(struct)); } return nameToId; } public Schema(Types.NestedField... columns) { this(Arrays.asList(columns)); } /** * Returns an alias map for this schema, if set. * <p> * Alias maps are created when translating an external schema, like an Avro Schema, to this * format. The original column names can be provided in a Map when constructing this Schema. * * @return a Map of column aliases to field ids */ public Map<String, Integer> getAliases() { return aliasToId; } /** * Returns the underlying {@link Types.StructType struct type} for this schema. * * @return the StructType version of this schema. */ public Types.StructType asStruct() { return struct; } /** * @return a List of the {@link Types.NestedField columns} in this Schema. */ public List<Types.NestedField> columns() { return struct.fields(); } public Type findType(String name) { Preconditions.checkArgument(!name.isEmpty(), "Invalid column name: (empty)"); return findType(lazyNameToId().get(name)); } /** * Returns the {@link Type} of a sub-field identified by the field id. * * @param id a field id * @return a Type for the sub-field or null if it is not found */ public Type findType(int id) { Types.NestedField field = lazyIdToField().get(id); if (field != null) { return field.type(); } return null; } /** * Returns the sub-field identified by the field id as a {@link Types.NestedField}. * * @param id a field id * @return the sub-field or null if it is not found */ public Types.NestedField findField(int id) { return lazyIdToField().get(id); } /** * Returns a sub-field field by name as a {@link Types.NestedField}. * <p> * The result may be a nested field. * * @param name a String name * @return a Type for the sub-field or null if it is not found */ public Types.NestedField findField(String name) { Preconditions.checkArgument(!name.isEmpty(), "Invalid column name: (empty)"); Integer id = lazyNameToId().get(name); if (id != null) { return lazyIdToField().get(id); } return null; } /** * Returns the full column name for the given id. * * @param id a field id * @return the full column name in this schema that resolves to the id */ public String findColumnName(int id) { return lazyNameToId().inverse().get(id); } /** * Returns the column id for the given column alias. Column aliases are set * by conversions from Parquet or Avro to this Schema type. * * @param alias a full column name in the unconverted data schema * @return the column id in this schema, or null if the column wasn't found */ public Integer aliasToId(String alias) { if (aliasToId != null) { return aliasToId.get(alias); } return null; } /** * Returns the column id for the given column alias. Column aliases are set * by conversions from Parquet or Avro to this Schema type. * * @param fieldId a column id in this schema * @return the full column name in the unconverted data schema, or null if one wasn't found */ public String idToAlias(Integer fieldId) { if (aliasToId != null) { return aliasToId.inverse().get(fieldId); } return null; } /** * Creates a projection schema for a subset of columns, selected by name. * <p> * Names that identify nested fields will select part or all of the field's top-level column. * * @param names String names for selected columns * @return a projection schema from this schema, by name */ public Schema select(String... names) { return select(Arrays.asList(names)); } /** * Creates a projection schema for a subset of columns, selected by name. * <p> * Names that identify nested fields will select part or all of the field's top-level column. * * @param names a List of String names for selected columns * @return a projection schema from this schema, by name */ public Schema select(Collection<String> names) { if (names.contains(ALL_COLUMNS)) { return this; } Set<Integer> selected = Sets.newHashSet(); for (String name : names) { Integer id = lazyNameToId().get(name); if (id != null) { selected.add(id); } } return TypeUtil.select(this, selected); } @Override public String toString() { return String.format("table {\n%s\n}", NEWLINE.join(struct.fields().stream() .map(f -> " " + f) .collect(Collectors.toList()))); } }
6,423
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/PendingUpdate.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.netflix.iceberg.exceptions.CommitFailedException; import com.netflix.iceberg.exceptions.ValidationException; /** * API for table metadata changes. * * @param <T> Java class of changes from this update; returned by {@link #apply} for validation. */ public interface PendingUpdate<T> { /** * Apply the pending changes and return the uncommitted changes for validation. * <p> * This does not result in a permanent update. * * @return the uncommitted changes that would be committed by calling {@link #commit()} * @throws ValidationException If the pending changes cannot be applied to the current metadata * @throws IllegalArgumentException If the pending changes are conflicting or invalid */ T apply(); /** * Apply the pending changes and commit. * <p> * Changes are committed by calling the underlying table's commit method. * <p> * Once the commit is successful, the updated table will be refreshed. * * @throws ValidationException If the update cannot be applied to the current table metadata. * @throws CommitFailedException If the update cannot be committed due to conflicts. */ void commit(); }
6,424
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Table.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import java.util.Map; /** * Represents a table. */ public interface Table { /** * Refresh the current table metadata. */ void refresh(); /** * Create a new {@link TableScan scan} for this table. * <p> * Once a table scan is created, it can be refined to project columns and filter data. * * @return a table scan for this table */ TableScan newScan(); /** * Return the {@link Schema schema} for this table. * * @return this table's schema */ Schema schema(); /** * Return the {@link PartitionSpec partition spec} for this table. * * @return this table's partition spec */ PartitionSpec spec(); /** * Return a map of string properties for this table. * * @return this table's properties map */ Map<String, String> properties(); /** * Return the table's base location. * * @return this table's location */ String location(); /** * Get the current {@link Snapshot snapshot} for this table, or null if there are no snapshots. * * @return the current table Snapshot. */ Snapshot currentSnapshot(); /** * Get the {@link Snapshot snapshots} of this table. * * @return an Iterable of snapshots of this table. */ Iterable<Snapshot> snapshots(); /** * Create a new {@link UpdateSchema} to alter the columns of this table and commit the change. * * @return a new {@link UpdateSchema} */ UpdateSchema updateSchema(); /** * Create a new {@link UpdateProperties} to update table properties and commit the changes. * * @return a new {@link UpdateProperties} */ UpdateProperties updateProperties(); /** * Create a new {@link AppendFiles append API} to add files to this table and commit. * * @return a new {@link AppendFiles} */ AppendFiles newAppend(); /** * Create a new {@link AppendFiles append API} to add files to this table and commit. * <p> * Using this method signals to the underlying implementation that the append should not perform * extra work in order to commit quickly. Fast appends are not recommended for normal writes * because the fast commit may cause split planning to slow down over time. * <p> * Implementations may not support fast appends, in which case this will return the same appender * as {@link #newAppend()}. * * @return a new {@link AppendFiles} */ default AppendFiles newFastAppend() { return newAppend(); } /** * Create a new {@link RewriteFiles rewrite API} to replace files in this table and commit. * * @return a new {@link RewriteFiles} */ RewriteFiles newRewrite(); /** * Create a new {@link OverwriteFiles overwrite API} to overwrite files by a filter expression. * * @return a new {@link OverwriteFiles} */ OverwriteFiles newOverwrite(); /** * Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically * overwrite partitions in the table with new data. * <p> * This is provided to implement SQL compatible with Hive table operations but is not recommended. * Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data. * * @return a new {@link ReplacePartitions} */ ReplacePartitions newReplacePartitions(); /** * Create a new {@link DeleteFiles delete API} to replace files in this table and commit. * * @return a new {@link DeleteFiles} */ DeleteFiles newDelete(); /** * Create a new {@link ExpireSnapshots expire API} to manage snapshots in this table and commit. * * @return a new {@link ExpireSnapshots} */ ExpireSnapshots expireSnapshots(); /** * Create a new {@link Rollback rollback API} to roll back to a previous snapshot and commit. * * @return a new {@link Rollback} */ Rollback rollback(); /** * Create a new {@link Transaction transaction API} to commit multiple table operations at once. * * @return a new {@link Transaction} */ Transaction newTransaction(); }
6,425
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Files.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.netflix.iceberg.exceptions.AlreadyExistsException; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import com.netflix.iceberg.io.PositionOutputStream; import com.netflix.iceberg.io.SeekableInputStream; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.file.Paths; public class Files { public static OutputFile localOutput(File file) { return new LocalOutputFile(file); } public static OutputFile localOutput(String file) { return localOutput(Paths.get(file).toAbsolutePath().toFile()); } private static class LocalOutputFile implements OutputFile { private final File file; private LocalOutputFile(File file) { this.file = file; } @Override public PositionOutputStream create() { if (file.exists()) { throw new AlreadyExistsException("File already exists: %s", file); } if (!file.getParentFile().isDirectory() && !file.getParentFile().mkdirs()) { throw new RuntimeIOException( String.format( "Failed to create the file's directory at %s.", file.getParentFile().getAbsolutePath())); } try { return new PositionFileOutputStream(new RandomAccessFile(file, "rw")); } catch (FileNotFoundException e) { throw new RuntimeIOException(e, "Failed to create file: %s", file); } } @Override public PositionOutputStream createOrOverwrite() { if (file.exists()) { if (!file.delete()) { throw new RuntimeIOException("Failed to delete: " + file); } } return create(); } @Override public String location() { return file.toString(); } @Override public InputFile toInputFile() { return localInput(file); } @Override public String toString() { return location(); } } public static InputFile localInput(File file) { return new LocalInputFile(file); } public static InputFile localInput(String file) { if (file.startsWith("file:")) { return localInput(new File(file.replaceFirst("file:", ""))); } return localInput(new File(file)); } private static class LocalInputFile implements InputFile { private final File file; private LocalInputFile(File file) { this.file = file; } @Override public long getLength() { return file.length(); } @Override public SeekableInputStream newStream() { try { return new SeekableFileInputStream(new RandomAccessFile(file, "r")); } catch (FileNotFoundException e) { throw new RuntimeIOException(e, "Failed to read file: %s", file); } } @Override public String location() { return file.toString(); } @Override public String toString() { return location(); } } private static class SeekableFileInputStream extends SeekableInputStream { private final RandomAccessFile stream; private SeekableFileInputStream(RandomAccessFile stream) { this.stream = stream; } @Override public long getPos() throws IOException { return stream.getFilePointer(); } @Override public void seek(long newPos) throws IOException { stream.seek(newPos); } @Override public int read() throws IOException { return stream.read(); } @Override public int read(byte[] b) throws IOException { return stream.read(b); } @Override public int read(byte[] b, int off, int len) throws IOException { return stream.read(b, off, len); } @Override public long skip(long n) throws IOException { if (n > Integer.MAX_VALUE) { return stream.skipBytes(Integer.MAX_VALUE); } else { return stream.skipBytes((int) n); } } @Override public void close() throws IOException { stream.close(); } } private static class PositionFileOutputStream extends PositionOutputStream { private final RandomAccessFile stream; private PositionFileOutputStream(RandomAccessFile stream) { this.stream = stream; } @Override public long getPos() throws IOException { return stream.getFilePointer(); } @Override public void write(byte[] b) throws IOException { stream.write(b); } @Override public void write(byte[] b, int off, int len) throws IOException { stream.write(b, off, len); } @Override public void close() throws IOException { stream.close(); } @Override public void write(int b) throws IOException { stream.write(b); } } }
6,426
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/UpdateSchema.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.netflix.iceberg.exceptions.CommitFailedException; import com.netflix.iceberg.types.Type; /** * API for schema evolution. * <p> * When committing, these changes will be applied to the current table metadata. Commit conflicts * will not be resolved and will result in a {@link CommitFailedException}. */ public interface UpdateSchema extends PendingUpdate<Schema> { /** * Add a new top-level column. * <p> * Because "." may be interpreted as a column path separator or may be used in field names, it is * not allowed in names passed to this method. To add to nested structures or to add fields with * names that contain ".", use {@link #addColumn(String, String, Type)}. * <p> * If type is a nested type, its field IDs are reassigned when added to the existing schema. * * @param name name for the new column * @param type type for the new column * @return this for method chaining * @throws IllegalArgumentException If name contains "." */ UpdateSchema addColumn(String name, Type type); /** * Add a new column to a nested struct. * <p> * The parent name is used to find the parent using {@link Schema#findField(String)}. If the * parent name is null, the new column will be added to the root as a top-level column. If parent * identifies a struct, a new column is added to that struct. If it identifies a list, the column * is added to the list element struct, and if it identifies a map, the new column is added to * the map's value struct. * <p> * The given name is used to name the new column and names containing "." are not handled * differently. * <p> * If type is a nested type, its field IDs are reassigned when added to the existing schema. * * @param parent name of the parent struct to the column will be added to * @param name name for the new column * @param type type for the new column * @return this for method chaining * @throws IllegalArgumentException If parent doesn't identify a struct */ UpdateSchema addColumn(String parent, String name, Type type); /** * Rename a column in the schema. * <p> * The name is used to find the column to rename using {@link Schema#findField(String)}. * <p> * The new name may contain "." and such names are not parsed or handled differently. * <p> * Columns may be updated and renamed in the same schema update. * * @param name name of the column to rename * @param newName replacement name for the column * @return this for method chaining * @throws IllegalArgumentException If name doesn't identify a column in the schema or if this * change conflicts with other additions, renames, or updates. */ UpdateSchema renameColumn(String name, String newName); /** * Update a column in the schema to a new primitive type. * <p> * The name is used to find the column to update using {@link Schema#findField(String)}. * <p> * Only updates that widen types are allowed. * <p> * Columns may be updated and renamed in the same schema update. * * @param name name of the column to rename * @param newType replacement type for the column * @return this for method chaining * @throws IllegalArgumentException If name doesn't identify a column in the schema or if this * change introduces a type incompatibility or if it conflicts * with other additions, renames, or updates. */ UpdateSchema updateColumn(String name, Type.PrimitiveType newType); /** * Delete a column in the schema. * <p> * The name is used to find the column to delete using {@link Schema#findField(String)}. * * @param name name of the column to delete * @return this for method chaining * @throws IllegalArgumentException If name doesn't identify a column in the schema or if this * change conflicts with other additions, renames, or updates. */ UpdateSchema deleteColumn(String name); }
6,427
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Rollback.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.netflix.iceberg.exceptions.CommitFailedException; /** * API for rolling table data back to the state at an older table {@link Snapshot snapshot}. * <p> * This API does not allow conflicting calls to {@link #toSnapshotId(long)} and * {@link #toSnapshotAtTime(long)}. * <p> * When committing, these changes will be applied to the current table metadata. Commit conflicts * will not be resolved and will result in a {@link CommitFailedException}. */ public interface Rollback extends PendingUpdate<Snapshot> { /** * Roll this table's data back to a specific {@link Snapshot} identified by id. * * @param snapshotId long id of the snapshot to roll back table data to * @return this for method chaining * @throws IllegalArgumentException If the table has no snapshot with the given id */ Rollback toSnapshotId(long snapshotId); /** * Roll this table's data back to the last {@link Snapshot} before the given timestamp. * * @param timestampMillis a long timestamp, as returned by {@link System#currentTimeMillis()} * @return this for method chaining * @throws IllegalArgumentException If the table has no old snapshot before the given timestamp */ Rollback toSnapshotAtTime(long timestampMillis); }
6,428
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Snapshot.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import java.util.List; /** * A snapshot of the data in a table at a point in time. * <p> * A snapshot consist of one or more file manifests, and the complete table contents is the union * of all the data files in those manifests. * <p> * Snapshots are created by table operations, like {@link AppendFiles} and {@link RewriteFiles}. */ public interface Snapshot { /** * Return this snapshot's ID. * * @return a long ID */ long snapshotId(); /** * Return this snapshot's parent ID or null. * * @return a long ID for this snapshot's parent, or null if it has no parent */ Long parentId(); /** * Return this snapshot's timestamp. * <p> * This timestamp is the same as those produced by {@link System#currentTimeMillis()}. * * @return a long timestamp in milliseconds */ long timestampMillis(); /** * Return the location of all manifests in this snapshot. * <p> * The current table is made of the union of the data files in these manifests. * * @return a list of fully-qualified manifest locations */ List<ManifestFile> manifests(); /** * Return all files added to the table in this snapshot. * <p> * The files returned include the following columns: file_path, file_format, partition, * record_count, and file_size_in_bytes. Other columns will be null. * * @return all files added to the table in this snapshot. */ Iterable<DataFile> addedFiles(); /** * Return all files deleted from the table in this snapshot. * <p> * The files returned include the following columns: file_path, file_format, partition, * record_count, and file_size_in_bytes. Other columns will be null. * * @return all files deleted from the table in this snapshot. */ Iterable<DataFile> deletedFiles(); /** * Return the location of this snapshot's manifest list, or null if it is not separate. * * @return the location of the manifest list for this Snapshot */ String manifestListLocation(); }
6,429
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Tables.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.google.common.collect.ImmutableMap; import java.util.Map; /** * Generic interface for creating and loading a table implementation. * * The 'tableIdentifier' field should be interpreted by the underlying * implementation (e.g. database.table_name) */ public interface Tables { default Table create(Schema schema, String tableIdentifier) { return create(schema, PartitionSpec.unpartitioned(), ImmutableMap.of(), tableIdentifier); } default Table create(Schema schema, PartitionSpec spec, String tableIdentifier) { return create(schema, spec, ImmutableMap.of(), tableIdentifier); } Table create(Schema schema, PartitionSpec spec, Map<String, String> properties, String tableIdentifier); Table load(String tableIdentifier); }
6,430
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/ManifestFile.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package com.netflix.iceberg; import com.netflix.iceberg.types.Types; import java.nio.ByteBuffer; import java.util.List; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; /** * Represents a manifest file that can be scanned to find data files in a table. */ public interface ManifestFile { Schema SCHEMA = new Schema( required(500, "manifest_path", Types.StringType.get()), required(501, "manifest_length", Types.LongType.get()), required(502, "partition_spec_id", Types.IntegerType.get()), optional(503, "added_snapshot_id", Types.LongType.get()), optional(504, "added_data_files_count", Types.IntegerType.get()), optional(505, "existing_data_files_count", Types.IntegerType.get()), optional(506, "deleted_data_files_count", Types.IntegerType.get()), optional(507, "partitions", Types.ListType.ofRequired(508, Types.StructType.of( required(509, "contains_null", Types.BooleanType.get()), optional(510, "lower_bound", Types.BinaryType.get()), // null if no non-null values optional(511, "upper_bound", Types.BinaryType.get()) )))); static Schema schema() { return SCHEMA; } /** * @return fully qualified path to the file, suitable for constructing a Hadoop Path */ String path(); /** * @return length of the manifest file */ long length(); /** * @return ID of the {@link PartitionSpec} used to write the manifest file */ int partitionSpecId(); /** * @return ID of the snapshot that added the manifest file to table metadata */ Long snapshotId(); /** * @return the number of data files with status ADDED in the manifest file */ Integer addedFilesCount(); /** * @return the number of data files with status EXISTING in the manifest file */ Integer existingFilesCount(); /** * @return the number of data files with status DELETED in the manifest file */ Integer deletedFilesCount(); /** * Returns a list of {@link PartitionFieldSummary partition field summaries}. * <p> * Each summary corresponds to a field in the manifest file's partition spec, by ordinal. For * example, the partition spec [ ts_day=date(ts), type=identity(type) ] will have 2 summaries. * The first summary is for the ts_day partition field and the second is for the type partition * field. * * @return a list of partition field summaries, one for each field in the manifest's spec */ List<PartitionFieldSummary> partitions(); /** * Copies this {@link ManifestFile manifest file}. Readers can reuse manifest file instances; use * this method to make defensive copies. * * @return a copy of this manifest file */ ManifestFile copy(); /** * Summarizes the values of one partition field stored in a manifest file. */ interface PartitionFieldSummary { Types.StructType TYPE = ManifestFile.schema() .findType("partitions") .asListType() .elementType() .asStructType(); static Types.StructType getType() { return TYPE; } /** * @return true if at least one data file in the manifest has a null value for the field */ boolean containsNull(); /** * @return a ByteBuffer that contains a serialized bound lower than all values of the field */ ByteBuffer lowerBound(); /** * @return a ByteBuffer that contains a serialized bound higher than all values of the field */ ByteBuffer upperBound(); /** * Copies this {@link PartitionFieldSummary summary}. Readers can reuse instances; use this * method to make defensive copies. * * @return a copy of this partition field summary */ PartitionFieldSummary copy(); } }
6,431
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/OverwriteFiles.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.Projections; /** * API for overwriting files in a table by filter expression. * <p> * This API accumulates file additions and produces a new {@link Snapshot} of the table by replacing * all the files that match the filter expression with the set of additions. This operation is used * to implement idempotent writes that always replace a section of a table with new data. * <p> * Overwrites can be validated * <p> * When committing, these changes will be applied to the latest table snapshot. Commit conflicts * will be resolved by applying the changes to the new latest snapshot and reattempting the commit. * This has no requirements for the latest snapshot and will not fail based on other snapshot * changes. */ public interface OverwriteFiles extends PendingUpdate<Snapshot> { /** * Delete files that match an {@link Expression} on data rows from the table. * <p> * A file is selected to be deleted by the expression if it could contain any rows that match the * expression (candidate files are selected using an * {@link Projections#inclusive(PartitionSpec) inclusive projection}). These candidate files are * deleted if all of the rows in the file must match the expression (the partition data matches * the expression's {@link Projections#strict(PartitionSpec)} strict projection}). This guarantees * that files are deleted if and only if all rows in the file must match the expression. * <p> * Files that may contain some rows that match the expression and some rows that do not will * result in a {@link ValidationException}. * * @param expr an expression on rows in the table * @return this for method chaining * @throws ValidationException If a file can contain both rows that match and rows that do not */ OverwriteFiles overwriteByRowFilter(Expression expr); /** * Add a {@link DataFile} to the table. * * @param file a data file * @return this for method chaining */ OverwriteFiles addFile(DataFile file); /** * Signal that each file added to the table must match the overwrite expression. * <p> * If this method is called, each added file is validated on commit to ensure that it matches the * overwrite row filter. This is used to ensure that writes are idempotent: that files cannot * be added during a commit that would not be removed if the operation were run a second time. * * @return this for method chaining */ OverwriteFiles validateAddedFiles(); }
6,432
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/FileScanTask.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.netflix.iceberg.expressions.Expression; /** * A scan task over a range of a single file. */ public interface FileScanTask extends ScanTask { /** * The {@link DataFile file} to scan. * * @return the file to scan */ DataFile file(); /** * The {@link PartitionSpec spec} used to store this file. * * @return the partition spec from this file's manifest */ PartitionSpec spec(); /** * The starting position of this scan range in the file. * * @return the start position of this scan range */ long start(); /** * The number of bytes to scan from the {@link #start()} position in the file. * * @return the length of this scan range in bytes */ long length(); /** * Returns the residual expression that should be applied to rows in this file scan. * <p> * The residual expression for a file is a filter expression created from the scan's filter, inclusive * any predicates that are true or false for the entire file removed, based on the file's * partition data. * * @return a residual expression to apply to rows from this scan */ Expression residual(); @Override default boolean isFileScanTask() { return true; } @Override default FileScanTask asFileScanTask() { return this; } }
6,433
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/PartitionField.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.google.common.base.Objects; import com.netflix.iceberg.transforms.Transform; import java.io.Serializable; /** * Represents a single field in a {@link PartitionSpec}. */ public class PartitionField implements Serializable { private final int sourceId; private final String name; private final Transform<?, ?> transform; PartitionField(int sourceId, String name, Transform<?, ?> transform) { this.sourceId = sourceId; this.name = name; this.transform = transform; } /** * @return the field id of the source field in the {@link PartitionSpec spec's} table schema */ public int sourceId() { return sourceId; } /** * @return the name of this partition field */ public String name() { return name; } /** * @return the transform used to produce partition values from source values */ public Transform<?, ?> transform() { return transform; } @Override public boolean equals(Object other) { if (this == other) { return true; } if (other == null || getClass() != other.getClass()) { return false; } PartitionField that = (PartitionField) other; return ( sourceId == that.sourceId && name.equals(that.name) && transform.equals(that.transform) ); } @Override public int hashCode() { return Objects.hashCode(sourceId, name, transform); } }
6,434
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/DeleteFiles.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.Projections; /** * API for deleting files from a table. * <p> * This API accumulates file deletions, produces a new {@link Snapshot} of the table, and commits * that snapshot as the current. * <p> * When committing, these changes will be applied to the latest table snapshot. Commit conflicts * will be resolved by applying the changes to the new latest snapshot and reattempting the commit. */ public interface DeleteFiles extends PendingUpdate<Snapshot> { /** * Delete a file path from the underlying table. * <p> * To remove a file from the table, this path must equal a path in the table's metadata. Paths * that are different but equivalent will not be removed. For example, file:/path/file.avro is * equivalent to file:///path/file.avro, but would not remove the latter path from the table. * * @param path a fully-qualified file path to remove from the table * @return this for method chaining */ DeleteFiles deleteFile(CharSequence path); /** * Delete a file tracked by a {@link DataFile} from the underlying table. * * @param file a DataFile to remove from the table * @return this for method chaining */ default DeleteFiles deleteFile(DataFile file) { deleteFile(file.path()); return this; } /** * Delete files that match an {@link Expression} on data rows from the table. * <p> * A file is selected to be deleted by the expression if it could contain any rows that match the * expression (candidate files are selected using an * {@link Projections#inclusive(PartitionSpec) inclusive projection}). These candidate files are * deleted if all of the rows in the file must match the expression (the partition data matches * the expression's {@link Projections#strict(PartitionSpec)} strict projection}). This guarantees * that files are deleted if and only if all rows in the file must match the expression. * <p> * Files that may contain some rows that match the expression and some rows that do not will * result in a {@link ValidationException}. * * @param expr an expression on rows in the table * @return this for method chaining * @throws ValidationException If a file can contain both rows that match and rows that do not */ DeleteFiles deleteFromRowFilter(Expression expr); }
6,435
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/PartitionSpec.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.transforms.Transforms; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.io.Serializable; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; /** * Represents how to produce partition data for a table. * <p> * Partition data is produced by transforming columns in a table. Each column transform is * represented by a named {@link PartitionField}. */ public class PartitionSpec implements Serializable { // start assigning IDs for partition fields at 1000 private static final int PARTITION_DATA_ID_START = 1000; private final Schema schema; // this is ordered so that DataFile has a consistent schema private final int specId; private final PartitionField[] fields; private transient Map<Integer, PartitionField> fieldsBySourceId = null; private transient Map<String, PartitionField> fieldsByName = null; private transient Class<?>[] javaClasses = null; private transient List<PartitionField> fieldList = null; private PartitionSpec(Schema schema, int specId, List<PartitionField> fields) { this.schema = schema; this.specId = specId; this.fields = new PartitionField[fields.size()]; for (int i = 0; i < this.fields.length; i += 1) { this.fields[i] = fields.get(i); } } /** * @return the {@link Schema} for this spec. */ public Schema schema() { return schema; } /** * @return the ID of this spec */ public int specId() { return specId; } /** * @return the list of {@link PartitionField partition fields} for this spec. */ public List<PartitionField> fields() { return lazyFieldList(); } /** * @param fieldId a field id from the source schema * @return the {@link PartitionField field} that partitions the given source field */ public PartitionField getFieldBySourceId(int fieldId) { return lazyFieldsBySourceId().get(fieldId); } /** * @return a {@link Types.StructType} for partition data defined by this spec. */ public Types.StructType partitionType() { List<Types.NestedField> structFields = Lists.newArrayListWithExpectedSize(fields.length); for (int i = 0; i < fields.length; i += 1) { PartitionField field = fields[i]; Type sourceType = schema.findType(field.sourceId()); Type resultType = field.transform().getResultType(sourceType); // assign ids for partition fields starting at 100 to leave room for data file's other fields structFields.add( Types.NestedField.optional(PARTITION_DATA_ID_START + i, field.name(), resultType)); } return Types.StructType.of(structFields); } public Class<?>[] javaClasses() { if (javaClasses == null) { this.javaClasses = new Class<?>[fields.length]; for (int i = 0; i < fields.length; i += 1) { PartitionField field = fields[i]; Type sourceType = schema.findType(field.sourceId()); Type result = field.transform().getResultType(sourceType); javaClasses[i] = result.typeId().javaClass(); } } return javaClasses; } @SuppressWarnings("unchecked") private <T> T get(StructLike data, int pos, Class<?> javaClass) { return data.get(pos, (Class<T>) javaClass); } private String escape(String string) { try { return URLEncoder.encode(string, "UTF-8"); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } } public String partitionToPath(StructLike data) { StringBuilder sb = new StringBuilder(); Class<?>[] javaClasses = javaClasses(); for (int i = 0; i < javaClasses.length; i += 1) { PartitionField field = fields[i]; String valueString = field.transform().toHumanString(get(data, i, javaClasses[i])); if (i > 0) { sb.append("/"); } sb.append(field.name()).append("=").append(escape(valueString)); } return sb.toString(); } /** * Returns true if this spec is equivalent to the other, with field names ignored. That is, if * both specs have the same number of fields, field order, source columns, and transforms. * * @param other another PartitionSpec * @return true if the specs have the same fields, source columns, and transforms. */ public boolean compatibleWith(PartitionSpec other) { if (equals(other)) { return true; } if (fields.length != other.fields.length) { return false; } for (int i = 0; i < fields.length; i += 1) { PartitionField thisField = fields[i]; PartitionField thatField = other.fields[i]; if (thisField.sourceId() != thatField.sourceId() || !thisField.transform().toString().equals(thatField.transform().toString())) { return false; } } return true; } @Override public boolean equals(Object other) { if (this == other) { return true; } if (other == null || getClass() != other.getClass()) { return false; } PartitionSpec that = (PartitionSpec) other; if (this.specId != that.specId) { return false; } return Arrays.equals(fields, that.fields); } @Override public int hashCode() { return Objects.hashCode(Arrays.hashCode(fields)); } private List<PartitionField> lazyFieldList() { if (fieldList == null) { this.fieldList = ImmutableList.copyOf(fields); } return fieldList; } private Map<String, PartitionField> lazyFieldsByName() { if (fieldsByName == null) { ImmutableMap.Builder<String, PartitionField> builder = ImmutableMap.builder(); for (PartitionField field : fields) { builder.put(field.name(), field); } this.fieldsByName = builder.build(); } return fieldsByName; } private Map<Integer, PartitionField> lazyFieldsBySourceId() { if (fieldsBySourceId == null) { ImmutableMap.Builder<Integer, PartitionField> byIdBuilder = ImmutableMap.builder(); for (PartitionField field : fields) { byIdBuilder.put(field.sourceId(), field); } this.fieldsBySourceId = byIdBuilder.build(); } return fieldsBySourceId; } /** * Returns the source field ids for identity partitions. * * @return a set of source ids for the identity partitions. */ public Set<Integer> identitySourceIds() { Set<Integer> sourceIds = Sets.newHashSet(); List<PartitionField> fields = this.fields(); for (PartitionField field : fields) { if ("identity".equals(field.transform().toString())) { sourceIds.add(field.sourceId()); } } return sourceIds; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("["); for (PartitionField field : fields) { sb.append("\n"); sb.append(" ").append(field.name()).append(": ").append(field.transform()) .append("(").append(field.sourceId()).append(")"); } if (fields.length > 0) { sb.append("\n"); } sb.append("]"); return sb.toString(); } private static final PartitionSpec UNPARTITIONED_SPEC = new PartitionSpec(new Schema(), 0, ImmutableList.of()); /** * Returns a spec for unpartitioned tables. * * @return a partition spec with no partitions */ public static PartitionSpec unpartitioned() { return UNPARTITIONED_SPEC; } /** * Creates a new {@link Builder partition spec builder} for the given {@link Schema}. * * @param schema a schema * @return a partition spec builder for the given schema */ public static Builder builderFor(Schema schema) { return new Builder(schema); } /** * Used to create valid {@link PartitionSpec partition specs}. * <p> * Call {@link #builderFor(Schema)} to create a new builder. */ public static class Builder { private final Schema schema; private final List<PartitionField> fields = Lists.newArrayList(); private final Set<String> partitionNames = Sets.newHashSet(); private int specId = 0; private Builder(Schema schema) { this.schema = schema; } private void checkAndAddPartitionName(String name) { Preconditions.checkArgument(name != null && !name.isEmpty(), "Cannot use empty or null partition name: %s", name); Preconditions.checkArgument(!partitionNames.contains(name), "Cannot use partition name more than once: %s", name); partitionNames.add(name); } public Builder withSpecId(int specId) { this.specId = specId; return this; } private Types.NestedField findSourceColumn(String sourceName) { Types.NestedField sourceColumn = schema.findField(sourceName); Preconditions.checkNotNull(sourceColumn, "Cannot find source column: %s", sourceName); return sourceColumn; } public Builder identity(String sourceName) { checkAndAddPartitionName(sourceName); Types.NestedField sourceColumn = findSourceColumn(sourceName); fields.add(new PartitionField( sourceColumn.fieldId(), sourceName, Transforms.identity(sourceColumn.type()))); return this; } public Builder year(String sourceName) { String name = sourceName + "_year"; checkAndAddPartitionName(name); Types.NestedField sourceColumn = findSourceColumn(sourceName); fields.add(new PartitionField( sourceColumn.fieldId(), name, Transforms.year(sourceColumn.type()))); return this; } public Builder month(String sourceName) { String name = sourceName + "_month"; checkAndAddPartitionName(name); Types.NestedField sourceColumn = findSourceColumn(sourceName); fields.add(new PartitionField( sourceColumn.fieldId(), name, Transforms.month(sourceColumn.type()))); return this; } public Builder day(String sourceName) { String name = sourceName + "_day"; checkAndAddPartitionName(name); Types.NestedField sourceColumn = findSourceColumn(sourceName); fields.add(new PartitionField( sourceColumn.fieldId(), name, Transforms.day(sourceColumn.type()))); return this; } public Builder hour(String sourceName) { String name = sourceName + "_hour"; checkAndAddPartitionName(name); Types.NestedField sourceColumn = findSourceColumn(sourceName); fields.add(new PartitionField( sourceColumn.fieldId(), name, Transforms.hour(sourceColumn.type()))); return this; } public Builder bucket(String sourceName, int numBuckets) { String name = sourceName + "_bucket"; checkAndAddPartitionName(name); Types.NestedField sourceColumn = findSourceColumn(sourceName); fields.add(new PartitionField( sourceColumn.fieldId(), name, Transforms.bucket(sourceColumn.type(), numBuckets))); return this; } public Builder truncate(String sourceName, int width) { String name = sourceName + "_trunc"; checkAndAddPartitionName(name); Types.NestedField sourceColumn = findSourceColumn(sourceName); fields.add(new PartitionField( sourceColumn.fieldId(), name, Transforms.truncate(sourceColumn.type(), width))); return this; } public Builder add(int sourceId, String name, String transform) { checkAndAddPartitionName(name); Types.NestedField column = schema.findField(sourceId); Preconditions.checkNotNull(column, "Cannot find source column: %d", sourceId); fields.add(new PartitionField( sourceId, name, Transforms.fromString(column.type(), transform))); return this; } public PartitionSpec build() { PartitionSpec spec = new PartitionSpec(schema, specId, fields); checkCompatibility(spec, schema); return spec; } } public static void checkCompatibility(PartitionSpec spec, Schema schema) { for (PartitionField field : spec.fields) { Type sourceType = schema.findType(field.sourceId()); ValidationException.check(sourceType.isPrimitiveType(), "Cannot partition by non-primitive source field: %s", sourceType); ValidationException.check( field.transform().canTransform(sourceType), "Invalid source type %s for transform: %s", sourceType, field.transform()); } } }
6,436
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/DataFile.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.netflix.iceberg.types.Types.BinaryType; import com.netflix.iceberg.types.Types.IntegerType; import com.netflix.iceberg.types.Types.ListType; import com.netflix.iceberg.types.Types.LongType; import com.netflix.iceberg.types.Types.MapType; import com.netflix.iceberg.types.Types.StringType; import com.netflix.iceberg.types.Types.StructType; import java.nio.ByteBuffer; import java.util.List; import java.util.Map; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; /** * Interface for files listed in a table manifest. */ public interface DataFile { static StructType getType(StructType partitionType) { // IDs start at 100 to leave room for changes to ManifestEntry return StructType.of( required(100, "file_path", StringType.get()), required(101, "file_format", StringType.get()), required(102, "partition", partitionType), required(103, "record_count", LongType.get()), required(104, "file_size_in_bytes", LongType.get()), required(105, "block_size_in_bytes", LongType.get()), optional(106, "file_ordinal", IntegerType.get()), optional(107, "sort_columns", ListType.ofRequired(112, IntegerType.get())), optional(108, "column_sizes", MapType.ofRequired(117, 118, IntegerType.get(), LongType.get())), optional(109, "value_counts", MapType.ofRequired(119, 120, IntegerType.get(), LongType.get())), optional(110, "null_value_counts", MapType.ofRequired(121, 122, IntegerType.get(), LongType.get())), optional(125, "lower_bounds", MapType.ofRequired(126, 127, IntegerType.get(), BinaryType.get())), optional(128, "upper_bounds", MapType.ofRequired(129, 130, IntegerType.get(), BinaryType.get())) // NEXT ID TO ASSIGN: 131 ); } /** * @return fully qualified path to the file, suitable for constructing a Hadoop Path */ CharSequence path(); /** * @return format of the data file */ FileFormat format(); /** * @return partition data for this file as a {@link StructLike} */ StructLike partition(); /** * @return the number of top-level records in the data file */ long recordCount(); /** * @return the data file size in bytes */ long fileSizeInBytes(); /** * @return the data file block size in bytes (for split planning) */ long blockSizeInBytes(); /** * @return file ordinal if written in a global ordering, or null */ Integer fileOrdinal(); /** * @return list of columns the file records are sorted by, or null */ List<Integer> sortColumns(); /** * @return if collected, map from column ID to the size of the column in bytes, null otherwise */ Map<Integer, Long> columnSizes(); /** * @return if collected, map from column ID to the count of its non-null values, null otherwise */ Map<Integer, Long> valueCounts(); /** * @return if collected, map from column ID to its null value count, null otherwise */ Map<Integer, Long> nullValueCounts(); /** * @return if collected, map from column ID to value lower bounds, null otherwise */ Map<Integer, ByteBuffer> lowerBounds(); /** * @return if collected, map from column ID to value upper bounds, null otherwise */ Map<Integer, ByteBuffer> upperBounds(); /** * Copies this {@link DataFile data file}. Manifest readers can reuse data file instances; use * this method to copy data when collecting files from tasks. * * @return a copy of this data file */ DataFile copy(); }
6,437
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/AppendFiles.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; /** * API for appending new files in a table. * <p> * This API accumulates file additions, produces a new {@link Snapshot} of the table, and commits * that snapshot as the current. * <p> * When committing, these changes will be applied to the latest table snapshot. Commit conflicts * will be resolved by applying the changes to the new latest snapshot and reattempting the commit. */ public interface AppendFiles extends PendingUpdate<Snapshot> { /** * Append a {@link DataFile} to the table. * * @param file a data file * @return this for method chaining */ AppendFiles appendFile(DataFile file); }
6,438
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/TableScan.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.google.common.collect.Lists; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.io.CloseableIterable; import java.util.Collection; /** * API for configuring a table scan. * <p> * TableScan objects are immutable and can be shared between threads. Refinement methods, like * {@link #select(Collection)} and {@link #filter(Expression)}, create new TableScan instances. */ public interface TableScan { /** * Returns the {@link Table} from which this scan loads data. * * @return this scan's table */ Table table(); /** * Create a new {@link TableScan} from this scan's configuration that will use the given snapshot * by ID. * * @param snapshotId a snapshot ID * @return a new scan based on this with the given snapshot ID * @throws IllegalArgumentException if the snapshot cannot be found */ TableScan useSnapshot(long snapshotId); /** * Create a new {@link TableScan} from this scan's configuration that will use the most recent * snapshot as of the given time in milliseconds. * * @param timestampMillis a timestamp in milliseconds. * @return a new scan based on this with the current snapshot at the given time * @throws IllegalArgumentException if the snapshot cannot be found */ TableScan asOfTime(long timestampMillis); /** * Create a new {@link TableScan} from this with the schema as its projection. * * @param schema a projection schema * @return a new scan based on this with the given projection */ TableScan project(Schema schema); /** * Create a new {@link TableScan} from this that will read the given data columns. This produces * an expected schema that includes all fields that are either selected or used by this scan's * filter expression. * * @param columns column names from the table's schema * @return a new scan based on this with the given projection columns */ default TableScan select(String... columns) { return select(Lists.newArrayList(columns)); } /** * Create a new {@link TableScan} from this that will read the given data columns. This produces * an expected schema that includes all fields that are either selected or used by this scan's * filter expression. * * @param columns column names from the manifest file schema * @return a new scan based on this with the given manifest columns */ TableScan select(Collection<String> columns); /** * Create a new {@link TableScan} from the results of this filtered by the {@link Expression}. * * @param expr a filter expression * @return a new scan based on this with results filtered by the expression */ TableScan filter(Expression expr); /** * Plan the {@link FileScanTask files} that will be read by this scan. * <p> * Each file has a residual expression that should be applied to filter the file's rows. * <p> * This simple plan returns file scans for each file from position 0 to the file's length. For * planning that will combine small files, split large files, and attempt to balance work, use * {@link #planTasks()} instead. * * @return an Iterable of file tasks that are required by this scan */ CloseableIterable<FileScanTask> planFiles(); /** * Plan the {@link CombinedScanTask tasks} for this scan. * <p> * Tasks created by this method may read partial input files, multiple input files, or both. * * @return an Iterable of tasks for this scan */ CloseableIterable<CombinedScanTask> planTasks(); /** * Returns this scan's projection {@link Schema}. * <p> * If the projection schema was set directly using {@link #project(Schema)}, returns that schema. * <p> * If the projection schema was set by calling {@link #select(Collection)}, returns a projection * schema that includes the selected data fields and any fields used in the filter expression. * * @return this scan's projection schema */ Schema schema(); /** * Returns this scan's filter {@link Expression}. * * @return this scan's filter expression */ Expression filter(); }
6,439
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/ExpireSnapshots.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import java.util.List; import java.util.function.Consumer; /** * API for removing old {@link Snapshot snapshots} from a table. * <p> * This API accumulates snapshot deletions and commits the new list to the table. This API does not * allow deleting the current snapshot. * <p> * When committing, these changes will be applied to the latest table metadata. Commit conflicts * will be resolved by applying the changes to the new latest metadata and reattempting the commit. * <p> * Manifest files that are no longer used by valid snapshots will be deleted. Data files that were * deleted by snapshots that are expired will be deleted. {@link #deleteWith(Consumer)} can be used * to pass an alternative deletion method. * * {@link #apply()} returns a list of the snapshots that will be removed. */ public interface ExpireSnapshots extends PendingUpdate<List<Snapshot>> { /** * Expires a specific {@link Snapshot} identified by id. * * @param snapshotId long id of the snapshot to expire * @return this for method chaining */ ExpireSnapshots expireSnapshotId(long snapshotId); /** * Expires all snapshots older than the given timestamp. * * @param timestampMillis a long timestamp, as returned by {@link System#currentTimeMillis()} * @return this for method chaining */ ExpireSnapshots expireOlderThan(long timestampMillis); /** * Passes an alternative delete implementation that will be used for manifests and data files. * <p> * Manifest files that are no longer used by valid snapshots will be deleted. Data files that were * deleted by snapshots that are expired will be deleted. * <p> * If this method is not called, unnecessary manifests and data files will still be deleted. * * @param deleteFunc a function that will be called to delete manifests and data files * @return this for method chaining */ ExpireSnapshots deleteWith(Consumer<String> deleteFunc); }
6,440
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/RewriteFiles.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.netflix.iceberg.exceptions.ValidationException; import java.util.Set; /** * API for replacing files in a table. * <p> * This API accumulates file additions and deletions, produces a new {@link Snapshot} of the * changes, and commits that snapshot as the current. * <p> * When committing, these changes will be applied to the latest table snapshot. Commit conflicts * will be resolved by applying the changes to the new latest snapshot and reattempting the commit. * If any of the deleted files are no longer in the latest snapshot when reattempting, the commit * will throw a {@link ValidationException}. */ public interface RewriteFiles extends PendingUpdate<Snapshot> { /** * Add a rewrite that replaces one set of files with another set that contains the same data. * * @param filesToDelete files that will be replaced (deleted), cannot be null or empty. * @param filesToAdd files that will be added, cannot be null or empty. * @return this for method chaining */ RewriteFiles rewriteFiles(Set<DataFile> filesToDelete, Set<DataFile> filesToAdd); }
6,441
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/StructLike.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; /** * Interface for accessing data by position in a schema. * <p> * This interface supports accessing data in top-level fields, not in nested fields. */ public interface StructLike { int size(); <T> T get(int pos, Class<T> javaClass); <T> void set(int pos, T value); }
6,442
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/ScanTask.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import java.io.Serializable; /** * A scan task. */ public interface ScanTask extends Serializable { /** * @return true if this is a {@link FileScanTask}, false otherwise. */ default boolean isFileScanTask() { return false; } /** * @return this cast to {@link FileScanTask} if it is one * @throws IllegalStateException if this is not a {@link FileScanTask} */ default FileScanTask asFileScanTask() { throw new IllegalStateException("Not a FileScanTask: " + this); } /** * @return this cast to {@link CombinedScanTask} if it is one * @throws IllegalStateException if this is not a {@link CombinedScanTask} */ default CombinedScanTask asCombinedScanTask() { throw new IllegalStateException("Not a CombinedScanTask: " + this); } }
6,443
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Metrics.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import java.io.Serializable; import java.nio.ByteBuffer; import java.util.Map; import static com.google.common.collect.ImmutableMap.copyOf; public class Metrics implements Serializable { private Long rowCount = null; private Map<Integer, Long> columnSizes = null; private Map<Integer, Long> valueCounts = null; private Map<Integer, Long> nullValueCounts = null; private Map<Integer, ByteBuffer> lowerBounds = null; private Map<Integer, ByteBuffer> upperBounds = null; public Metrics() { } public Metrics(Long rowCount, Map<Integer, Long> columnSizes, Map<Integer, Long> valueCounts, Map<Integer, Long> nullValueCounts) { this.rowCount = rowCount; this.columnSizes = columnSizes; this.valueCounts = valueCounts; this.nullValueCounts = nullValueCounts; } public Metrics(Long rowCount, Map<Integer, Long> columnSizes, Map<Integer, Long> valueCounts, Map<Integer, Long> nullValueCounts, Map<Integer, ByteBuffer> lowerBounds, Map<Integer, ByteBuffer> upperBounds) { this.rowCount = rowCount; this.columnSizes = columnSizes; this.valueCounts = valueCounts; this.nullValueCounts = nullValueCounts; this.lowerBounds = lowerBounds; this.upperBounds = upperBounds; } public Long recordCount() { return rowCount; } public Map<Integer, Long> columnSizes() { return columnSizes; } public Map<Integer, Long> valueCounts() { return valueCounts; } public Map<Integer, Long> nullValueCounts() { return nullValueCounts; } public Map<Integer, ByteBuffer> lowerBounds() { return lowerBounds; } public Map<Integer, ByteBuffer> upperBounds() { return upperBounds; } }
6,444
0
Create_ds/iceberg/api/src/main/java/com/netflix
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/Transaction.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg; import com.netflix.iceberg.exceptions.CommitFailedException; import com.netflix.iceberg.exceptions.ValidationException; /** * A transaction for performing multiple updates to a table. */ public interface Transaction { /** * Return the {@link Table} that this transaction will update. * * @return this transaction's table */ Table table(); /** * Create a new {@link UpdateProperties} to update table properties. * * @return a new {@link UpdateProperties} */ UpdateProperties updateProperties(); /** * Create a new {@link AppendFiles append API} to add files to this table. * * @return a new {@link AppendFiles} */ AppendFiles newAppend(); /** * Create a new {@link AppendFiles append API} to add files to this table. * <p> * Using this method signals to the underlying implementation that the append should not perform * extra work in order to commit quickly. Fast appends are not recommended for normal writes * because the fast commit may cause split planning to slow down over time. * <p> * Implementations may not support fast appends, in which case this will return the same appender * as {@link #newAppend()}. * * @return a new {@link AppendFiles} */ default AppendFiles newFastAppend() { return newAppend(); } /** * Create a new {@link RewriteFiles rewrite API} to replace files in this table. * * @return a new {@link RewriteFiles} */ RewriteFiles newRewrite(); /** * Create a new {@link OverwriteFiles overwrite API} to overwrite files by a filter expression. * * @return a new {@link OverwriteFiles} */ OverwriteFiles newOverwrite(); /** * Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically * overwrite partitions in the table with new data. * <p> * This is provided to implement SQL compatible with Hive table operations but is not recommended. * Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data. * * @return a new {@link ReplacePartitions} */ ReplacePartitions newReplacePartitions(); /** * Create a new {@link DeleteFiles delete API} to replace files in this table. * * @return a new {@link DeleteFiles} */ DeleteFiles newDelete(); /** * Create a new {@link ExpireSnapshots expire API} to manage snapshots in this table. * * @return a new {@link ExpireSnapshots} */ ExpireSnapshots expireSnapshots(); /** * Apply the pending changes from all actions and commit. * * @throws ValidationException If any update cannot be applied to the current table metadata. * @throws CommitFailedException If the updates cannot be committed due to conflicts. */ void commitTransaction(); }
6,445
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/IndexByName.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import com.google.common.base.Joiner; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import java.util.List; import java.util.Map; public class IndexByName extends TypeUtil.SchemaVisitor<Map<String, Integer>> { private static final Joiner DOT = Joiner.on("."); private final Map<String, Integer> nameToId = Maps.newHashMap(); @Override public Map<String, Integer> schema(Schema schema, Map<String, Integer> structResult) { return nameToId; } @Override public Map<String, Integer> struct(Types.StructType struct, List<Map<String, Integer>> fieldResults) { return nameToId; } @Override public Map<String, Integer> field(Types.NestedField field, Map<String, Integer> fieldResult) { addField(field.name(), field.fieldId()); return null; } @Override public Map<String, Integer> list(Types.ListType list, Map<String, Integer> elementResult) { for (Types.NestedField field : list.fields()) { addField(field.name(), field.fieldId()); } return null; } @Override public Map<String, Integer> map(Types.MapType map, Map<String, Integer> keyResult, Map<String, Integer> valueResult) { for (Types.NestedField field : map.fields()) { addField(field.name(), field.fieldId()); } return null; } private void addField(String name, int fieldId) { String fullName = name; if (!fieldNames.isEmpty()) { fullName = DOT.join(DOT.join(fieldNames.descendingIterator()), name); } nameToId.put(fullName, fieldId); } }
6,446
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/Type.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import java.io.ObjectStreamException; import java.io.Serializable; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.List; public interface Type extends Serializable { enum TypeID { BOOLEAN(Boolean.class), INTEGER(Integer.class), LONG(Long.class), FLOAT(Float.class), DOUBLE(Double.class), DATE(Integer.class), TIME(Long.class), TIMESTAMP(Long.class), STRING(CharSequence.class), UUID(java.util.UUID.class), FIXED(ByteBuffer.class), BINARY(ByteBuffer.class), DECIMAL(BigDecimal.class), STRUCT(Void.class), LIST(Void.class), MAP(Void.class); private final Class<?> javaClass; TypeID(Class<?> javaClass) { this.javaClass = javaClass; } public Class<?> javaClass() { return javaClass; } } TypeID typeId(); default boolean isPrimitiveType() { return false; } default PrimitiveType asPrimitiveType() { throw new IllegalArgumentException("Not a primitive type: " + this); } default Types.StructType asStructType() { throw new IllegalArgumentException("Not a struct type: " + this); } default Types.ListType asListType() { throw new IllegalArgumentException("Not a list type: " + this); } default Types.MapType asMapType() { throw new IllegalArgumentException("Not a map type: " + this); } default boolean isNestedType() { return false; } default boolean isStructType() { return false; } default boolean isListType() { return false; } default boolean isMapType() { return false; } default NestedType asNestedType() { throw new IllegalArgumentException("Not a nested type: " + this); } abstract class PrimitiveType implements Type { public boolean isPrimitiveType() { return true; } public PrimitiveType asPrimitiveType() { return this; } Object writeReplace() throws ObjectStreamException { return new PrimitiveHolder(toString()); } } abstract class NestedType implements Type { public boolean isNestedType() { return true; } public NestedType asNestedType() { return this; } public abstract List<Types.NestedField> fields(); public abstract Type fieldType(String name); public abstract Types.NestedField field(int id); } }
6,447
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/AssignFreshIds.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import com.google.common.collect.Lists; import com.netflix.iceberg.Schema; import java.util.Iterator; import java.util.List; import java.util.function.Supplier; class AssignFreshIds extends TypeUtil.CustomOrderSchemaVisitor<Type> { private final TypeUtil.NextID nextId; AssignFreshIds(TypeUtil.NextID nextId) { this.nextId = nextId; } @Override public Type schema(Schema schema, Supplier<Type> future) { return future.get(); } @Override public Type struct(Types.StructType struct, Iterable<Type> futures) { List<Types.NestedField> fields = struct.fields(); int length = struct.fields().size(); List<Integer> newIds = Lists.newArrayListWithExpectedSize(length); for (int i = 0; i < length; i += 1) { newIds.add(nextId.get()); // assign IDs for this struct's fields first } List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(length); Iterator<Type> types = futures.iterator(); for (int i = 0; i < length; i += 1) { Types.NestedField field = fields.get(i); Type type = types.next(); if (field.isOptional()) { newFields.add(Types.NestedField.optional(newIds.get(i), field.name(), type)); } else { newFields.add(Types.NestedField.required(newIds.get(i), field.name(), type)); } } return Types.StructType.of(newFields); } @Override public Type field(Types.NestedField field, Supplier<Type> future) { return future.get(); } @Override public Type list(Types.ListType list, Supplier<Type> future) { int newId = nextId.get(); if (list.isElementOptional()) { return Types.ListType.ofOptional(newId, future.get()); } else { return Types.ListType.ofRequired(newId, future.get()); } } @Override public Type map(Types.MapType map, Supplier<Type> keyFuture, Supplier<Type> valuefuture) { int newKeyId = nextId.get(); int newValueId = nextId.get(); if (map.isValueOptional()) { return Types.MapType.ofOptional(newKeyId, newValueId, keyFuture.get(), valuefuture.get()); } else { return Types.MapType.ofRequired(newKeyId, newValueId, keyFuture.get(), valuefuture.get()); } } @Override public Type primitive(Type.PrimitiveType primitive) { return primitive; } }
6,448
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/GetProjectedIds.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import java.util.List; import java.util.Set; class GetProjectedIds extends TypeUtil.SchemaVisitor<Set<Integer>> { private final Set<Integer> fieldIds = Sets.newHashSet(); @Override public Set<Integer> schema(Schema schema, Set<Integer> structResult) { return fieldIds; } @Override public Set<Integer> struct(Types.StructType struct, List<Set<Integer>> fieldResults) { return fieldIds; } @Override public Set<Integer> field(Types.NestedField field, Set<Integer> fieldResult) { if (fieldResult == null) { fieldIds.add(field.fieldId()); } return fieldIds; } @Override public Set<Integer> list(Types.ListType list, Set<Integer> elementResult) { if (elementResult == null) { for (Types.NestedField field : list.fields()) { fieldIds.add(field.fieldId()); } } return fieldIds; } @Override public Set<Integer> map(Types.MapType map, Set<Integer> keyResult, Set<Integer> valueResult) { if (valueResult == null) { for (Types.NestedField field : map.fields()) { fieldIds.add(field.fieldId()); } } return fieldIds; } }
6,449
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/ReassignIds.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.netflix.iceberg.Schema; import java.util.List; import java.util.function.Supplier; class ReassignIds extends TypeUtil.CustomOrderSchemaVisitor<Type> { private final Schema sourceSchema; private Type sourceType; ReassignIds(Schema sourceSchema) { this.sourceSchema = sourceSchema; } @Override public Type schema(Schema schema, Supplier<Type> future) { this.sourceType = sourceSchema.asStruct(); try { return future.get(); } finally { this.sourceType = null; } } @Override public Type struct(Types.StructType struct, Iterable<Type> fieldTypes) { Preconditions.checkNotNull(sourceType, "Evaluation must start with a schema."); Preconditions.checkArgument(sourceType.isStructType(), "Not a struct: " + sourceType); Types.StructType sourceStruct = sourceType.asStructType(); List<Types.NestedField> fields = struct.fields(); int length = fields.size(); List<Type> types = Lists.newArrayList(fieldTypes); List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(length); for (int i = 0; i < length; i += 1) { Types.NestedField field = fields.get(i); int sourceFieldId = sourceStruct.field(field.name()).fieldId(); if (field.isRequired()) { newFields.add(Types.NestedField.required(sourceFieldId, field.name(), types.get(i))); } else { newFields.add(Types.NestedField.optional(sourceFieldId, field.name(), types.get(i))); } } return Types.StructType.of(newFields); } @Override public Type field(Types.NestedField field, Supplier<Type> future) { Preconditions.checkArgument(sourceType.isStructType(), "Not a struct: " + sourceType); Types.StructType sourceStruct = sourceType.asStructType(); Types.NestedField sourceField = sourceStruct.field(field.name()); this.sourceType = sourceField.type(); try { return future.get(); } finally { sourceType = sourceStruct; } } @Override public Type list(Types.ListType list, Supplier<Type> elementTypeFuture) { Preconditions.checkArgument(sourceType.isListType(), "Not a list: " + sourceType); Types.ListType sourceList = sourceType.asListType(); int sourceElementId = sourceList.elementId(); this.sourceType = sourceList.elementType(); try { if (list.isElementOptional()) { return Types.ListType.ofOptional(sourceElementId, elementTypeFuture.get()); } else { return Types.ListType.ofRequired(sourceElementId, elementTypeFuture.get()); } } finally { this.sourceType = sourceList; } } @Override public Type map(Types.MapType map, Supplier<Type> keyTypeFuture, Supplier<Type> valueTypeFuture) { Preconditions.checkArgument(sourceType.isMapType(), "Not a map: " + sourceType); Types.MapType sourceMap = sourceType.asMapType(); int sourceKeyId = sourceMap.keyId(); int sourceValueId = sourceMap.valueId(); try { this.sourceType = sourceMap.keyType(); Type keyType = keyTypeFuture.get(); this.sourceType = sourceMap.valueType(); Type valueType = valueTypeFuture.get(); if (map.isValueOptional()) { return Types.MapType.ofOptional(sourceKeyId, sourceValueId, keyType, valueType); } else { return Types.MapType.ofRequired(sourceKeyId, sourceValueId, keyType, valueType); } } finally { this.sourceType = sourceMap; } } @Override public Type primitive(Type.PrimitiveType primitive) { return primitive; // nothing to reassign } }
6,450
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/PruneColumns.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import com.google.common.collect.Lists; import com.netflix.iceberg.Schema; import java.util.List; import java.util.Set; class PruneColumns extends TypeUtil.SchemaVisitor<Type> { private final Set<Integer> selected; public PruneColumns(Set<Integer> selected) { this.selected = selected; } @Override public Type schema(Schema schema, Type structResult) { return structResult; } @Override public Type struct(Types.StructType struct, List<Type> fieldResults) { List<Types.NestedField> fields = struct.fields(); List<Types.NestedField> selectedFields = Lists.newArrayListWithExpectedSize(fields.size()); boolean sameTypes = true; for (int i = 0; i < fieldResults.size(); i += 1) { Types.NestedField field = fields.get(i); Type projectedType = fieldResults.get(i); if (field.type() == projectedType) { // uses identity because there is no need to check structure. if identity doesn't match // then structure should not either. selectedFields.add(field); } else if (projectedType != null) { sameTypes = false; // signal that some types were altered if (field.isOptional()) { selectedFields.add( Types.NestedField.optional(field.fieldId(), field.name(), projectedType)); } else { selectedFields.add( Types.NestedField.required(field.fieldId(), field.name(), projectedType)); } } } if (!selectedFields.isEmpty()) { if (selectedFields.size() == fields.size() && sameTypes) { return struct; } else { return Types.StructType.of(selectedFields); } } return null; } @Override public Type field(Types.NestedField field, Type fieldResult) { if (selected.contains(field.fieldId())) { return field.type(); } else if (fieldResult != null) { // this isn't necessarily the same as field.type() because a struct may not have all // fields selected. return fieldResult; } return null; } @Override public Type list(Types.ListType list, Type elementResult) { if (selected.contains(list.elementId())) { return list; } else if (elementResult != null) { if (list.elementType() == elementResult) { return list; } else if (list.isElementOptional()) { return Types.ListType.ofOptional(list.elementId(), elementResult); } else { return Types.ListType.ofRequired(list.elementId(), elementResult); } } return null; } @Override public Type map(Types.MapType map, Type ignored, Type valueResult) { if (selected.contains(map.valueId())) { return map; } else if (valueResult != null) { if (map.valueType() == valueResult) { return map; } else if (map.isValueOptional()) { return Types.MapType.ofOptional(map.keyId(), map.valueId(), map.keyType(), valueResult); } else { return Types.MapType.ofRequired(map.keyId(), map.valueId(), map.keyType(), valueResult); } } else if (selected.contains(map.keyId())) { // right now, maps can't be selected without values return map; } return null; } @Override public Type primitive(Type.PrimitiveType primitive) { return null; } }
6,451
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/PrimitiveHolder.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import java.io.ObjectStreamException; import java.io.Serializable; /** * Replacement for primitive types in Java Serialization. */ class PrimitiveHolder implements Serializable { private String typeAsString = null; /** * Constructor for Java serialization. */ PrimitiveHolder() { } PrimitiveHolder(String typeAsString) { this.typeAsString = typeAsString; } Object readResolve() throws ObjectStreamException { return Types.fromPrimitiveString(typeAsString); } }
6,452
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/CheckCompatibility.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.function.Supplier; import static com.netflix.iceberg.types.TypeUtil.isPromotionAllowed; public class CheckCompatibility extends TypeUtil.CustomOrderSchemaVisitor<List<String>> { /** * Returns a list of compatibility errors for writing with the given write schema. * * @param readSchema a read schema * @param writeSchema a write schema * @return a list of error details, or an empty list if there are no compatibility problems */ public static List<String> writeCompatibilityErrors(Schema readSchema, Schema writeSchema) { return TypeUtil.visit(readSchema, new CheckCompatibility(writeSchema, true)); } /** * Returns a list of compatibility errors for reading with the given read schema. * * @param readSchema a read schema * @param writeSchema a write schema * @return a list of error details, or an empty list if there are no compatibility problems */ public static List<String> readCompatibilityErrors(Schema readSchema, Schema writeSchema) { return TypeUtil.visit(readSchema, new CheckCompatibility(writeSchema, false)); } private static final List<String> NO_ERRORS = ImmutableList.of(); private final Schema schema; private final boolean checkOrdering; // the current file schema, maintained while traversing a write schema private Type currentType; private CheckCompatibility(Schema schema, boolean checkOrdering) { this.schema = schema; this.checkOrdering = checkOrdering; } @Override public List<String> schema(Schema readSchema, Supplier<List<String>> structErrors) { this.currentType = this.schema.asStruct(); try { return structErrors.get(); } finally { this.currentType = null; } } @Override public List<String> struct(Types.StructType readStruct, Iterable<List<String>> fieldErrorLists) { Preconditions.checkNotNull(readStruct, "Evaluation must start with a schema."); if (!currentType.isStructType()) { return ImmutableList.of(String.format(": %s cannot be read as a struct", currentType)); } List<String> errors = Lists.newArrayList(); for (List<String> fieldErrors : fieldErrorLists) { errors.addAll(fieldErrors); } // detect reordered fields if (checkOrdering) { Types.StructType struct = currentType.asStructType(); List<Types.NestedField> fields = struct.fields(); Map<Integer, Integer> idToOrdinal = Maps.newHashMap(); for (int i = 0; i < fields.size(); i += 1) { idToOrdinal.put(fields.get(i).fieldId(), i); } int lastOrdinal = -1; for (Types.NestedField readField : readStruct.fields()) { int id = readField.fieldId(); Types.NestedField field = struct.field(id); if (field != null) { int ordinal = idToOrdinal.get(id); if (lastOrdinal >= ordinal) { errors.add( readField.name() + " is out of order, before " + fields.get(lastOrdinal).name()); } lastOrdinal = ordinal; } } } return errors; } @Override public List<String> field(Types.NestedField readField, Supplier<List<String>> fieldErrors) { Types.StructType struct = currentType.asStructType(); Types.NestedField field = struct.field(readField.fieldId()); List<String> errors = Lists.newArrayList(); if (field == null) { if (readField.isRequired()) { return ImmutableList.of(readField.name() + " is required, but is missing"); } // if the field is optional, it will be read as nulls return NO_ERRORS; } this.currentType = field.type(); try { if (readField.isRequired() && field.isOptional()) { errors.add(readField.name() + " should be required, but is optional"); } for (String error : fieldErrors.get()) { if (error.startsWith(":")) { // this is the last field name before the error message errors.add(readField.name() + error); } else { // this has a nested field, add '.' for nesting errors.add(readField.name() + "." + error); } } return errors; } finally { this.currentType = struct; } } @Override public List<String> list(Types.ListType readList, Supplier<List<String>> elementErrors) { if (!currentType.isListType()) { return ImmutableList.of(String.format(": %s cannot be read as a list", currentType)); } Types.ListType list = currentType.asNestedType().asListType(); List<String> errors = Lists.newArrayList(); this.currentType = list.elementType(); try { if (readList.isElementRequired() && list.isElementOptional()) { errors.add(": elements should be required, but are optional"); } errors.addAll(elementErrors.get()); return errors; } finally { this.currentType = list; } } @Override public List<String> map(Types.MapType readMap, Supplier<List<String>> keyErrors, Supplier<List<String>> valueErrors) { if (!currentType.isMapType()) { return ImmutableList.of(String.format(": %s cannot be read as a map", currentType)); } Types.MapType map = currentType.asNestedType().asMapType(); List<String> errors = Lists.newArrayList(); try { if (readMap.isValueRequired() && map.isValueOptional()) { errors.add(": values should be required, but are optional"); } this.currentType = map.keyType(); errors.addAll(keyErrors.get()); this.currentType = map.valueType(); errors.addAll(valueErrors.get()); return errors; } finally { this.currentType = map; } } @Override public List<String> primitive(Type.PrimitiveType readPrimitive) { if (currentType.equals(readPrimitive)) { return NO_ERRORS; } if (!currentType.isPrimitiveType()) { return ImmutableList.of(String.format(": %s cannot be read as a %s", currentType.typeId().toString().toLowerCase(Locale.ENGLISH), readPrimitive)); } if (!isPromotionAllowed(currentType.asPrimitiveType(), readPrimitive)) { return ImmutableList.of(String.format(": %s cannot be promoted to %s", currentType, readPrimitive)); } // both are primitives and promotion is allowed to the read type return NO_ERRORS; } }
6,453
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/Types.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.netflix.iceberg.types.Type.NestedType; import com.netflix.iceberg.types.Type.PrimitiveType; import java.io.Serializable; import java.util.Arrays; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.regex.Matcher; import java.util.regex.Pattern; public class Types { private static final ImmutableMap<String, PrimitiveType> TYPES = ImmutableMap .<String, PrimitiveType>builder() .put(BooleanType.get().toString(), BooleanType.get()) .put(IntegerType.get().toString(), IntegerType.get()) .put(LongType.get().toString(), LongType.get()) .put(FloatType.get().toString(), FloatType.get()) .put(DoubleType.get().toString(), DoubleType.get()) .put(DateType.get().toString(), DateType.get()) .put(TimeType.get().toString(), TimeType.get()) .put(TimestampType.withZone().toString(), TimestampType.withZone()) .put(TimestampType.withoutZone().toString(), TimestampType.withoutZone()) .put(StringType.get().toString(), StringType.get()) .put(UUIDType.get().toString(), UUIDType.get()) .put(BinaryType.get().toString(), BinaryType.get()) .build(); private static final Pattern FIXED = Pattern.compile("fixed\\[(\\d+)\\]"); private static final Pattern DECIMAL = Pattern.compile("decimal\\((\\d+),\\s+(\\d+)\\)"); public static PrimitiveType fromPrimitiveString(String typeString) { String lowerTypeString = typeString.toLowerCase(Locale.ENGLISH); if (TYPES.containsKey(lowerTypeString)) { return TYPES.get(lowerTypeString); } Matcher fixed = FIXED.matcher(lowerTypeString); if (fixed.matches()) { return FixedType.ofLength(Integer.parseInt(fixed.group(1))); } Matcher decimal = DECIMAL.matcher(lowerTypeString); if (decimal.matches()) { return DecimalType.of( Integer.parseInt(decimal.group(1)), Integer.parseInt(decimal.group(2))); } throw new IllegalArgumentException("Cannot parse type string to primitive: " + typeString); } public static class BooleanType extends PrimitiveType { private static final BooleanType INSTANCE = new BooleanType(); public static BooleanType get() { return INSTANCE; } @Override public TypeID typeId() { return TypeID.BOOLEAN; } @Override public String toString() { return "boolean"; } } public static class IntegerType extends PrimitiveType { private static final IntegerType INSTANCE = new IntegerType(); public static IntegerType get() { return INSTANCE; } @Override public TypeID typeId() { return TypeID.INTEGER; } @Override public String toString() { return "int"; } } public static class LongType extends PrimitiveType { private static final LongType INSTANCE = new LongType(); public static LongType get() { return INSTANCE; } @Override public TypeID typeId() { return TypeID.LONG; } @Override public String toString() { return "long"; } } public static class FloatType extends PrimitiveType { private static final FloatType INSTANCE = new FloatType(); public static FloatType get() { return INSTANCE; } @Override public TypeID typeId() { return TypeID.FLOAT; } @Override public String toString() { return "float"; } } public static class DoubleType extends PrimitiveType { private static final DoubleType INSTANCE = new DoubleType(); public static DoubleType get() { return INSTANCE; } @Override public TypeID typeId() { return TypeID.DOUBLE; } @Override public String toString() { return "double"; } } public static class DateType extends PrimitiveType { private static final DateType INSTANCE = new DateType(); public static DateType get() { return INSTANCE; } @Override public TypeID typeId() { return TypeID.DATE; } @Override public String toString() { return "date"; } } public static class TimeType extends PrimitiveType { private static final TimeType INSTANCE = new TimeType(); public static TimeType get() { return INSTANCE; } private TimeType() { } @Override public TypeID typeId() { return TypeID.TIME; } @Override public String toString() { return "time"; } } public static class TimestampType extends PrimitiveType { private static final TimestampType INSTANCE_WITH_ZONE = new TimestampType(true); private static final TimestampType INSTANCE_WITHOUT_ZONE = new TimestampType(false); public static TimestampType withZone() { return INSTANCE_WITH_ZONE; } public static TimestampType withoutZone() { return INSTANCE_WITHOUT_ZONE; } private final boolean adjustToUTC; private TimestampType(boolean adjustToUTC) { this.adjustToUTC = adjustToUTC; } public boolean shouldAdjustToUTC() { return adjustToUTC; } @Override public TypeID typeId() { return TypeID.TIMESTAMP; } @Override public String toString() { if (shouldAdjustToUTC()) { return "timestamptz"; } else { return "timestamp"; } } @Override public boolean equals(Object o) { if (this == o) { return true; } else if (o == null || getClass() != o.getClass()) { return false; } TimestampType timestampType = (TimestampType) o; return adjustToUTC == timestampType.adjustToUTC; } @Override public int hashCode() { return Objects.hash(TimestampType.class, adjustToUTC); } } public static class StringType extends PrimitiveType { private static final StringType INSTANCE = new StringType(); public static StringType get() { return INSTANCE; } @Override public TypeID typeId() { return TypeID.STRING; } @Override public String toString() { return "string"; } } public static class UUIDType extends PrimitiveType { private static final UUIDType INSTANCE = new UUIDType(); public static UUIDType get() { return INSTANCE; } @Override public TypeID typeId() { return TypeID.UUID; } @Override public String toString() { return "uuid"; } } public static class FixedType extends PrimitiveType { public static FixedType ofLength(int length) { return new FixedType(length); } private final int length; private FixedType(int length) { this.length = length; } public int length() { return length; } @Override public TypeID typeId() { return TypeID.FIXED; } @Override public String toString() { return String.format("fixed[%d]", length); } @Override public boolean equals(Object o) { if (this == o) { return true; } else if (o == null || getClass() != o.getClass()) { return false; } FixedType fixedType = (FixedType) o; return length == fixedType.length; } @Override public int hashCode() { return Objects.hash(FixedType.class, length); } } public static class BinaryType extends PrimitiveType { private static final BinaryType INSTANCE = new BinaryType(); public static BinaryType get() { return INSTANCE; } @Override public TypeID typeId() { return TypeID.BINARY; } @Override public String toString() { return "binary"; } } public static class DecimalType extends PrimitiveType { public static DecimalType of(int precision, int scale) { return new DecimalType(precision, scale); } private final int scale; private final int precision; private DecimalType(int precision, int scale) { Preconditions.checkArgument(precision <= 38, "Decimals with precision larger than 38 are not supported: %s", precision); this.scale = scale; this.precision = precision; } public int scale() { return scale; } public int precision() { return precision; } @Override public TypeID typeId() { return TypeID.DECIMAL; } @Override public String toString() { return String.format("decimal(%d, %d)", precision, scale); } @Override public boolean equals(Object o) { if (this == o) { return true; } else if (o == null || getClass() != o.getClass()) { return false; } DecimalType that = (DecimalType) o; if (scale != that.scale) { return false; } return precision == that.precision; } @Override public int hashCode() { return Objects.hash(DecimalType.class, scale, precision); } } public static class NestedField implements Serializable { public static NestedField optional(int id, String name, Type type) { return new NestedField(true, id, name, type); } public static NestedField required(int id, String name, Type type) { return new NestedField(false, id, name, type); } private final boolean isOptional; private final int id; private final String name; private final Type type; private NestedField(boolean isOptional, int id, String name, Type type) { Preconditions.checkNotNull(name, "Name cannot be null"); Preconditions.checkNotNull(type, "Type cannot be null"); this.isOptional = isOptional; this.id = id; this.name = name; this.type = type; } public boolean isOptional() { return isOptional; } public boolean isRequired() { return !isOptional; } public int fieldId() { return id; } public String name() { return name; } public Type type() { return type; } @Override public String toString() { return String.format("%d: %s: %s %s", id, name, isOptional ? "optional" : "required", type); } @Override public boolean equals(Object o) { if (this == o) { return true; } else if (o == null || getClass() != o.getClass()) { return false; } NestedField that = (NestedField) o; if (isOptional != that.isOptional) { return false; } else if (id != that.id) { return false; } else if (!name.equals(that.name)) { return false; } return type.equals(that.type); } @Override public int hashCode() { return Objects.hash(NestedField.class, id, isOptional, name, type); } } public static class StructType extends NestedType { private static final Joiner FIELD_SEP = Joiner.on(", "); public static StructType of(NestedField... fields) { return of(Arrays.asList(fields)); } public static StructType of(List<NestedField> fields) { return new StructType(fields); } private final NestedField[] fields; // lazy values private transient List<NestedField> fieldList = null; private transient Map<String, NestedField> fieldsByName = null; private transient Map<Integer, NestedField> fieldsById = null; private StructType(List<NestedField> fields) { Preconditions.checkNotNull(fields, "Field list cannot be null"); this.fields = new NestedField[fields.size()]; for (int i = 0; i < this.fields.length; i += 1) { this.fields[i] = fields.get(i); } } @Override public List<NestedField> fields() { return lazyFieldList(); } public NestedField field(String name) { return lazyFieldsByName().get(name); } @Override public Type fieldType(String name) { NestedField field = field(name); if (field != null) { return field.type(); } return null; } @Override public NestedField field(int id) { return lazyFieldsById().get(id); } @Override public TypeID typeId() { return TypeID.STRUCT; } @Override public boolean isStructType() { return true; } @Override public Types.StructType asStructType() { return this; } @Override public String toString() { return String.format("struct<%s>", FIELD_SEP.join(fields)); } @Override public boolean equals(Object o) { if (this == o) { return true; } else if (o == null || getClass() != o.getClass()) { return false; } StructType that = (StructType) o; return Arrays.equals(fields, that.fields); } @Override public int hashCode() { return Objects.hash(NestedField.class, Arrays.hashCode(fields)); } private List<NestedField> lazyFieldList() { if (fieldList == null) { this.fieldList = ImmutableList.copyOf(fields); } return fieldList; } private Map<String, NestedField> lazyFieldsByName() { if (fieldsByName == null) { indexFields(); } return fieldsByName; } private Map<Integer, NestedField> lazyFieldsById() { if (fieldsById == null) { indexFields(); } return fieldsById; } private void indexFields() { ImmutableMap.Builder<String, NestedField> byNameBuilder = ImmutableMap.builder(); ImmutableMap.Builder<Integer, NestedField> byIdBuilder = ImmutableMap.builder(); for (NestedField field : fields) { byNameBuilder.put(field.name(), field); byIdBuilder.put(field.fieldId(), field); } this.fieldsByName = byNameBuilder.build(); this.fieldsById = byIdBuilder.build(); } } public static class ListType extends NestedType { public static ListType ofOptional(int elementId, Type elementType) { Preconditions.checkNotNull(elementType, "Element type cannot be null"); return new ListType(NestedField.optional(elementId, "element", elementType)); } public static ListType ofRequired(int elementId, Type elementType) { Preconditions.checkNotNull(elementType, "Element type cannot be null"); return new ListType(NestedField.required(elementId, "element", elementType)); } private final NestedField elementField; private transient List<NestedField> fields = null; private ListType(NestedField elementField) { this.elementField = elementField; } public Type elementType() { return elementField.type(); } @Override public Type fieldType(String name) { if ("element".equals(name)) { return elementType(); } return null; } @Override public NestedField field(int id) { if (elementField.fieldId() == id) { return elementField; } return null; } @Override public List<NestedField> fields() { return lazyFieldList(); } public int elementId() { return elementField.fieldId(); } public boolean isElementRequired() { return !elementField.isOptional; } public boolean isElementOptional() { return elementField.isOptional; } @Override public TypeID typeId() { return TypeID.LIST; } @Override public boolean isListType() { return true; } @Override public Types.ListType asListType() { return this; } @Override public String toString() { return String.format("list<%s>", elementField.type()); } @Override public boolean equals(Object o) { if (this == o) { return true; } else if (o == null || getClass() != o.getClass()) { return false; } ListType listType = (ListType) o; return elementField.equals(listType.elementField); } @Override public int hashCode() { return Objects.hash(ListType.class, elementField); } private List<NestedField> lazyFieldList() { if (fields == null) { this.fields = ImmutableList.of(elementField); } return fields; } } public static class MapType extends NestedType { public static MapType ofOptional(int keyId, int valueId, Type keyType, Type valueType) { Preconditions.checkNotNull(valueType, "Value type cannot be null"); return new MapType( NestedField.required(keyId, "key", keyType), NestedField.optional(valueId, "value", valueType)); } public static MapType ofRequired(int keyId, int valueId, Type keyType, Type valueType) { Preconditions.checkNotNull(valueType, "Value type cannot be null"); return new MapType( NestedField.required(keyId, "key", keyType), NestedField.required(valueId, "value", valueType)); } private final NestedField keyField; private final NestedField valueField; private transient List<NestedField> fields = null; private MapType(NestedField keyField, NestedField valueField) { this.keyField = keyField; this.valueField = valueField; } public Type keyType() { return keyField.type(); } public Type valueType() { return valueField.type(); } @Override public Type fieldType(String name) { if ("key".equals(name)) { return keyField.type(); } else if ("value".equals(name)) { return valueField.type(); } return null; } @Override public NestedField field(int id) { if (keyField.fieldId() == id) { return keyField; } else if (valueField.fieldId() == id) { return valueField; } return null; } @Override public List<NestedField> fields() { return lazyFieldList(); } public int keyId() { return keyField.fieldId(); } public int valueId() { return valueField.fieldId(); } public boolean isValueRequired() { return !valueField.isOptional; } public boolean isValueOptional() { return valueField.isOptional; } @Override public TypeID typeId() { return TypeID.MAP; } @Override public boolean isMapType() { return true; } @Override public Types.MapType asMapType() { return this; } @Override public String toString() { return String.format("map<%s, %s>", keyField.type(), valueField.type()); } @Override public boolean equals(Object o) { if (this == o) { return true; } else if (o == null || getClass() != o.getClass()) { return false; } MapType mapType = (MapType) o; if (!keyField.equals(mapType.keyField)) { return false; } return valueField.equals(mapType.valueField); } @Override public int hashCode() { return Objects.hash(MapType.class, keyField, valueField); } private List<NestedField> lazyFieldList() { if (fields == null) { this.fields = ImmutableList.of(keyField, valueField); } return fields; } } }
6,454
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/TypeUtil.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.netflix.iceberg.Schema; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Predicate; import java.util.function.Supplier; public class TypeUtil { public static Schema select(Schema schema, Set<Integer> fieldIds) { Preconditions.checkNotNull(schema, "Schema cannot be null"); Preconditions.checkNotNull(fieldIds, "Field ids cannot be null"); Type result = visit(schema, new PruneColumns(fieldIds)); if (schema.asStruct() == result) { return schema; } else if (result != null) { if (schema.getAliases() != null) { return new Schema(result.asNestedType().fields(), schema.getAliases()); } else { return new Schema(result.asNestedType().fields()); } } return new Schema(ImmutableList.of(), schema.getAliases()); } public static Set<Integer> getProjectedIds(Schema schema) { return visit(schema, new GetProjectedIds()); } public static Set<Integer> getProjectedIds(Type schema) { if (schema.isPrimitiveType()) { return ImmutableSet.of(); } return ImmutableSet.copyOf(visit(schema, new GetProjectedIds())); } public static Schema selectNot(Schema schema, Set<Integer> fieldIds) { Set<Integer> projectedIds = getProjectedIds(schema); projectedIds.removeAll(fieldIds); return select(schema, projectedIds); } public static Schema join(Schema left, Schema right) { List<Types.NestedField> joinedColumns = Lists.newArrayList(); joinedColumns.addAll(left.columns()); joinedColumns.addAll(right.columns()); return new Schema(joinedColumns); } public static Map<String, Integer> indexByName(Types.StructType struct) { return visit(struct, new IndexByName()); } public static Map<Integer, Types.NestedField> indexById(Types.StructType struct) { return visit(struct, new IndexById()); } /** * Assigns fresh ids from the {@link NextID nextId function} for all fields in a type. * * @param type a type * @param nextId an id assignment function * @return an structurally identical type with new ids assigned by the nextId function */ public static Type assignFreshIds(Type type, NextID nextId) { return TypeUtil.visit(type, new AssignFreshIds(nextId)); } /** * Assigns fresh ids from the {@link NextID nextId function} for all fields in a schema. * * @param schema a schema * @param nextId an id assignment function * @return an structurally identical schema with new ids assigned by the nextId function */ public static Schema assignFreshIds(Schema schema, NextID nextId) { return new Schema(TypeUtil .visit(schema.asStruct(), new AssignFreshIds(nextId)) .asNestedType() .fields()); } /** * Reassigns ids in a schema from another schema. * <p> * Ids are determined by field names. If a field in the schema cannot be found in the source * schema, this will throw IllegalArgumentException. * <p> * This will not alter a schema's structure, nullability, or types. * * @param schema the schema to have ids reassigned * @param idSourceSchema the schema from which field ids will be used * @return an structurally identical schema with field ids matching the source schema * @throws IllegalArgumentException if a field cannot be found (by name) in the source schema */ public static Schema reassignIds(Schema schema, Schema idSourceSchema) { Types.StructType struct = visit(schema, new ReassignIds(idSourceSchema)).asStructType(); return new Schema(struct.fields()); } public static Type find(Schema schema, Predicate<Type> predicate) { return visit(schema, new FindTypeVisitor(predicate)); } public static boolean isPromotionAllowed(Type from, Type.PrimitiveType to) { // Warning! Before changing this function, make sure that the type change doesn't introduce // compatibility problems in partitioning. if (from.equals(to)) { return true; } switch (from.typeId()) { case INTEGER: return to == Types.LongType.get(); case FLOAT: return to == Types.DoubleType.get(); case DECIMAL: Types.DecimalType fromDecimal = (Types.DecimalType) from; if (to.typeId() != Type.TypeID.DECIMAL) { return false; } Types.DecimalType toDecimal = (Types.DecimalType) to; return (fromDecimal.scale() == toDecimal.scale() && fromDecimal.precision() <= toDecimal.precision()); } return false; } /** * Interface for passing a function that assigns column IDs. */ public interface NextID { int get(); } public static class SchemaVisitor<T> { protected LinkedList<String> fieldNames = Lists.newLinkedList(); protected LinkedList<Integer> fieldIds = Lists.newLinkedList(); public T schema(Schema schema, T structResult) { return null; } public T struct(Types.StructType struct, List<T> fieldResults) { return null; } public T field(Types.NestedField field, T fieldResult) { return null; } public T list(Types.ListType list, T elementResult) { return null; } public T map(Types.MapType map, T keyResult, T valueResult) { return null; } public T primitive(Type.PrimitiveType primitive) { return null; } } public static <T> T visit(Schema schema, SchemaVisitor<T> visitor) { return visitor.schema(schema, visit(schema.asStruct(), visitor)); } public static <T> T visit(Type type, SchemaVisitor<T> visitor) { switch (type.typeId()) { case STRUCT: Types.StructType struct = type.asNestedType().asStructType(); List<T> results = Lists.newArrayListWithExpectedSize(struct.fields().size()); for (Types.NestedField field : struct.fields()) { visitor.fieldIds.push(field.fieldId()); visitor.fieldNames.push(field.name()); T result; try { result = visit(field.type(), visitor); } finally { visitor.fieldIds.pop(); visitor.fieldNames.pop(); } results.add(visitor.field(field, result)); } return visitor.struct(struct, results); case LIST: Types.ListType list = type.asNestedType().asListType(); T elementResult; visitor.fieldIds.push(list.elementId()); try { elementResult = visit(list.elementType(), visitor); } finally { visitor.fieldIds.pop(); } return visitor.list(list, elementResult); case MAP: Types.MapType map = type.asNestedType().asMapType(); T keyResult; T valueResult; visitor.fieldIds.push(map.keyId()); try { keyResult = visit(map.keyType(), visitor); } finally { visitor.fieldIds.pop(); } visitor.fieldIds.push(map.valueId()); try { valueResult = visit(map.valueType(), visitor); } finally { visitor.fieldIds.pop(); } return visitor.map(map, keyResult, valueResult); default: return visitor.primitive(type.asPrimitiveType()); } } public static class CustomOrderSchemaVisitor<T> { public T schema(Schema schema, Supplier<T> structResult) { return null; } public T struct(Types.StructType struct, Iterable<T> fieldResults) { return null; } public T field(Types.NestedField field, Supplier<T> fieldResult) { return null; } public T list(Types.ListType list, Supplier<T> elementResult) { return null; } public T map(Types.MapType map, Supplier<T> keyResult, Supplier<T> valueResult) { return null; } public T primitive(Type.PrimitiveType primitive) { return null; } } private static class VisitFuture<T> implements Supplier<T> { private final Type type; private final CustomOrderSchemaVisitor<T> visitor; private VisitFuture(Type type, CustomOrderSchemaVisitor<T> visitor) { this.type = type; this.visitor = visitor; } @Override public T get() { return visit(type, visitor); } } private static class VisitFieldFuture<T> implements Supplier<T> { private final Types.NestedField field; private final CustomOrderSchemaVisitor<T> visitor; private VisitFieldFuture(Types.NestedField field, CustomOrderSchemaVisitor<T> visitor) { this.field = field; this.visitor = visitor; } @Override public T get() { return visitor.field(field, new VisitFuture<>(field.type(), visitor)); } } public static <T> T visit(Schema schema, CustomOrderSchemaVisitor<T> visitor) { return visitor.schema(schema, new VisitFuture<>(schema.asStruct(), visitor)); } /** * Used to traverse types with traversals other than pre-order. * <p> * This passes a {@link Supplier} to each {@link CustomOrderSchemaVisitor visitor} method that * returns the result of traversing child types. Structs are passed an {@link Iterable} that * traverses child fields during iteration. * <p> * An example use is assigning column IDs, which should be done with a post-order traversal. * * @param type a type to traverse with a visitor * @param visitor a custom order visitor * @param <T> the type returned by the visitor * @return the result of traversing the given type with the visitor */ public static <T> T visit(Type type, CustomOrderSchemaVisitor<T> visitor) { switch (type.typeId()) { case STRUCT: Types.StructType struct = type.asNestedType().asStructType(); List<VisitFieldFuture<T>> results = Lists .newArrayListWithExpectedSize(struct.fields().size()); for (Types.NestedField field : struct.fields()) { results.add( new VisitFieldFuture<>(field, visitor)); } return visitor.struct(struct, Iterables.transform(results, VisitFieldFuture::get)); case LIST: Types.ListType list = type.asNestedType().asListType(); return visitor.list(list, new VisitFuture<>(list.elementType(), visitor)); case MAP: Types.MapType map = type.asNestedType().asMapType(); return visitor.map(map, new VisitFuture<>(map.keyType(), visitor), new VisitFuture<>(map.valueType(), visitor)); default: return visitor.primitive(type.asPrimitiveType()); } } static int decimalMaxPrecision(int numBytes) { Preconditions.checkArgument(numBytes >= 0 && numBytes < 24, "Unsupported decimal length: " + numBytes); return MAX_PRECISION[numBytes]; } public static int decimalRequriedBytes(int precision) { Preconditions.checkArgument(precision >= 0 && precision < 40, "Unsupported decimal precision: " + precision); return REQUIRED_LENGTH[precision]; } private static int[] MAX_PRECISION = new int[24]; private static int[] REQUIRED_LENGTH = new int[40]; static { // for each length, calculate the max precision for (int len = 0; len < MAX_PRECISION.length; len += 1) { MAX_PRECISION[len] = (int) Math.floor(Math.log10(Math.pow(2, 8*len - 1) - 1)); } // for each precision, find the first length that can hold it for (int precision = 0; precision < REQUIRED_LENGTH.length; precision += 1) { REQUIRED_LENGTH[precision] = -1; for (int len = 0; len < MAX_PRECISION.length; len += 1) { // find the first length that can hold the precision if (precision <= MAX_PRECISION[len]) { REQUIRED_LENGTH[precision] = len; break; } } if (REQUIRED_LENGTH[precision] < 0) { throw new IllegalStateException( "Could not find required length for precision " + precision); } } } }
6,455
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/Conversions.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import com.google.common.base.Charsets; import com.netflix.iceberg.exceptions.RuntimeIOException; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.CharBuffer; import java.nio.charset.CharacterCodingException; import java.nio.charset.CharsetDecoder; import java.nio.charset.CharsetEncoder; import java.util.Arrays; import java.util.UUID; public class Conversions { private static final String HIVE_NULL = "__HIVE_DEFAULT_PARTITION__"; public static Object fromPartitionString(Type type, String asString) { if (asString == null || HIVE_NULL.equals(asString)) { return null; } switch (type.typeId()) { case BOOLEAN: return Boolean.valueOf(asString); case INTEGER: return Integer.valueOf(asString); case LONG: return Long.valueOf(asString); case FLOAT: return Long.valueOf(asString); case DOUBLE: return Double.valueOf(asString); case STRING: return asString; case UUID: return UUID.fromString(asString); case FIXED: Types.FixedType fixed = (Types.FixedType) type; return Arrays.copyOf( asString.getBytes(Charsets.UTF_8), fixed.length()); case BINARY: return asString.getBytes(Charsets.UTF_8); case DECIMAL: return new BigDecimal(asString); default: throw new UnsupportedOperationException( "Unsupported type for fromPartitionString: " + type); } } private static final ThreadLocal<CharsetEncoder> ENCODER = ThreadLocal.withInitial(Charsets.UTF_8::newEncoder); private static final ThreadLocal<CharsetDecoder> DECODER = ThreadLocal.withInitial(Charsets.UTF_8::newDecoder); public static ByteBuffer toByteBuffer(Type type, Object value) { switch (type.typeId()) { case BOOLEAN: return ByteBuffer.allocate(1).put(0, (Boolean) value ? (byte) 0x01 : (byte) 0x00); case INTEGER: case DATE: return ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN).putInt(0, (int) value); case LONG: case TIME: case TIMESTAMP: return ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN).putLong(0, (long) value); case FLOAT: return ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN).putFloat(0, (float) value); case DOUBLE: return ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN).putDouble(0, (double) value); case STRING: CharBuffer buffer = CharBuffer.wrap((CharSequence) value); try { return ENCODER.get().encode(buffer); } catch (CharacterCodingException e) { throw new RuntimeIOException(e, "Failed to encode value as UTF-8: " + value); } case UUID: UUID uuid = (UUID) value; return ByteBuffer.allocate(16).order(ByteOrder.LITTLE_ENDIAN) .putLong(0, uuid.getMostSignificantBits()) .putLong(1, uuid.getLeastSignificantBits()); case FIXED: case BINARY: return (ByteBuffer) value; case DECIMAL: return ByteBuffer.wrap(((BigDecimal) value).unscaledValue().toByteArray()); default: throw new UnsupportedOperationException("Cannot serialize type: " + type); } } @SuppressWarnings("unchecked") public static <T> T fromByteBuffer(Type type, ByteBuffer buffer) { return (T) internalFromByteBuffer(type, buffer); } private static Object internalFromByteBuffer(Type type, ByteBuffer buffer) { ByteBuffer tmp = buffer.duplicate().order(ByteOrder.LITTLE_ENDIAN); switch (type.typeId()) { case BOOLEAN: return (tmp.get() != 0x00); case INTEGER: case DATE: return tmp.getInt(); case LONG: case TIME: case TIMESTAMP: if (tmp.remaining() < 8) { // type was later promoted to long return (long) tmp.getInt(); } return tmp.getLong(); case FLOAT: return tmp.getFloat(); case DOUBLE: if (tmp.remaining() < 8) { // type was later promoted to long return (double) tmp.getFloat(); } return tmp.getDouble(); case STRING: try { return DECODER.get().decode(tmp); } catch (CharacterCodingException e) { throw new RuntimeIOException(e, "Failed to decode value as UTF-8: " + buffer); } case UUID: long mostSigBits = tmp.getLong(); long leastSigBits = tmp.getLong(); return new UUID(mostSigBits, leastSigBits); case FIXED: case BINARY: return tmp; case DECIMAL: Types.DecimalType decimal = (Types.DecimalType) type; byte[] unscaledBytes = new byte[buffer.remaining()]; tmp.get(unscaledBytes); return new BigDecimal(new BigInteger(unscaledBytes), decimal.scale()); default: throw new UnsupportedOperationException("Cannot deserialize type: " + type); } } }
6,456
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/Comparators.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import com.google.common.collect.ImmutableMap; import java.nio.ByteBuffer; import java.util.Comparator; public class Comparators { private static final ImmutableMap<Type.PrimitiveType, Comparator<?>> COMPARATORS = ImmutableMap .<Type.PrimitiveType, Comparator<?>>builder() .put(Types.BooleanType.get(), Comparator.naturalOrder()) .put(Types.IntegerType.get(), Comparator.naturalOrder()) .put(Types.LongType.get(), Comparator.naturalOrder()) .put(Types.FloatType.get(), Comparator.naturalOrder()) .put(Types.DoubleType.get(), Comparator.naturalOrder()) .put(Types.DateType.get(), Comparator.naturalOrder()) .put(Types.TimeType.get(), Comparator.naturalOrder()) .put(Types.TimestampType.withZone(), Comparator.naturalOrder()) .put(Types.TimestampType.withoutZone(), Comparator.naturalOrder()) .put(Types.StringType.get(), Comparators.charSequences()) .put(Types.UUIDType.get(), Comparator.naturalOrder()) .put(Types.BinaryType.get(), Comparators.unsignedBytes()) .build(); @SuppressWarnings("unchecked") public static <T> Comparator<T> forType(Type.PrimitiveType type) { Comparator<?> cmp = COMPARATORS.get(type); if (cmp != null) { return (Comparator<T>) cmp; } else if (type instanceof Types.FixedType) { return (Comparator<T>) Comparators.unsignedBytes(); } else if (type instanceof Types.DecimalType) { return (Comparator<T>) Comparator.naturalOrder(); } throw new UnsupportedOperationException("Cannot determine comparator for type: " + type); } public static Comparator<ByteBuffer> unsignedBytes() { return UnsignedByteBufComparator.INSTANCE; } public static Comparator<ByteBuffer> signedBytes() { return Comparator.naturalOrder(); } @SuppressWarnings("unchecked") public static <T> Comparator<T> nullsFirst() { return (Comparator<T>) NullsFirst.INSTANCE; } @SuppressWarnings("unchecked") public static <T> Comparator<T> nullsLast() { return (Comparator<T>) NullsLast.INSTANCE; } public static Comparator<CharSequence> charSequences() { return CharSeqComparator.INSTANCE; } private static class NullsFirst<T> implements Comparator<T> { private static final NullsFirst<?> INSTANCE = new NullsFirst<>(); private NullsFirst() { } @Override public int compare(T o1, T o2) { if (o1 != null) { if (o2 != null) { return 0; } return 1; } else if (o2 != null) { return -1; } return 0; } @Override public Comparator<T> thenComparing(Comparator<? super T> other) { return new NullSafeChainedComparator<>(this, other); } } private static class NullsLast<T> implements Comparator<T> { private static final NullsLast<?> INSTANCE = new NullsLast<>(); private NullsLast() { } @Override public int compare(T o1, T o2) { if (o1 != null) { if (o2 != null) { return 0; } return -1; } else if (o2 != null) { return 1; } return 0; } @Override public Comparator<T> thenComparing(Comparator<? super T> other) { return new NullSafeChainedComparator<>(this, other); } } private static class NullSafeChainedComparator<T> implements Comparator<T> { private final Comparator<T> first; private final Comparator<? super T> second; public NullSafeChainedComparator(Comparator<T> first, Comparator<? super T> second) { this.first = first; this.second = second; } @Override public int compare(T o1, T o2) { int cmp = first.compare(o1, o2); if (cmp == 0 && o1 != null) { return second.compare(o1, o2); } return cmp; } } private static class UnsignedByteBufComparator implements Comparator<ByteBuffer> { private static final UnsignedByteBufComparator INSTANCE = new UnsignedByteBufComparator(); private UnsignedByteBufComparator() { } @Override public int compare(ByteBuffer buf1, ByteBuffer buf2) { int len = Math.min(buf1.remaining(), buf2.remaining()); // find the first difference and return int b1pos = buf1.position(); int b2pos = buf2.position(); for (int i = 0; i < len; i += 1) { // Conversion to int is what Byte.toUnsignedInt would do int cmp = Integer.compare( ((int) buf1.get(b1pos + i)) & 0xff, ((int) buf2.get(b2pos + i)) & 0xff); if (cmp != 0) { return cmp; } } // if there are no differences, then the shorter seq is first return Integer.compare(buf1.remaining(), buf2.remaining()); } } private static class CharSeqComparator implements Comparator<CharSequence> { private static final CharSeqComparator INSTANCE = new CharSeqComparator(); private CharSeqComparator() { } @Override public int compare(CharSequence s1, CharSequence s2) { int len = Math.min(s1.length(), s2.length()); // find the first difference and return for (int i = 0; i < len; i += 1) { int cmp = Character.compare(s1.charAt(i), s2.charAt(i)); if (cmp != 0) { return cmp; } } // if there are no differences, then the shorter seq is first return Integer.compare(s1.length(), s2.length()); } } }
6,457
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/IndexById.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import java.util.List; import java.util.Map; class IndexById extends TypeUtil.SchemaVisitor<Map<Integer, Types.NestedField>> { private final Map<Integer, Types.NestedField> index = Maps.newHashMap(); @Override public Map<Integer, Types.NestedField> schema(Schema schema, Map<Integer, Types.NestedField> structResult) { return index; } @Override public Map<Integer, Types.NestedField> struct(Types.StructType struct, List<Map<Integer, Types.NestedField>> fieldResults) { return index; } @Override public Map<Integer, Types.NestedField> field(Types.NestedField field, Map<Integer, Types.NestedField> fieldResult) { index.put(field.fieldId(), field); return null; } @Override public Map<Integer, Types.NestedField> list(Types.ListType list, Map<Integer, Types.NestedField> elementResult) { for (Types.NestedField field : list.fields()) { index.put(field.fieldId(), field); } return null; } @Override public Map<Integer, Types.NestedField> map(Types.MapType map, Map<Integer, Types.NestedField> keyResult, Map<Integer, Types.NestedField> valueResult) { for (Types.NestedField field : map.fields()) { index.put(field.fieldId(), field); } return null; } }
6,458
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/types/FindTypeVisitor.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.types; import com.netflix.iceberg.Schema; import java.util.List; import java.util.function.Predicate; class FindTypeVisitor extends TypeUtil.SchemaVisitor<Type> { private final Predicate<Type> predicate; FindTypeVisitor(Predicate<Type> predicate) { this.predicate = predicate; } @Override public Type schema(Schema schema, Type structResult) { return structResult; } @Override public Type struct(Types.StructType struct, List<Type> fieldResults) { if (predicate.test(struct)) { return struct; } for (Type fieldType : fieldResults) { if (fieldType != null) { return fieldType; } } return null; } @Override public Type field(Types.NestedField field, Type fieldResult) { return fieldResult; } @Override public Type list(Types.ListType list, Type elementResult) { if (predicate.test(list)) { return list; } return elementResult; } @Override public Type map(Types.MapType map, Type keyResult, Type valueResult) { if (predicate.test(map)) { return map; } if (keyResult != null) { return keyResult; } return valueResult; } @Override public Type primitive(Type.PrimitiveType primitive) { if (predicate.test(primitive)) { return primitive; } return null; } }
6,459
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/OutputFile.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.io; import com.netflix.iceberg.exceptions.AlreadyExistsException; import com.netflix.iceberg.exceptions.RuntimeIOException; import java.io.IOException; /** * An interface used to create output files using {@link PositionOutputStream} instances. * <p> * This class is based on Parquet's InputFile. */ public interface OutputFile { /** * Create a new file and return a {@link PositionOutputStream} to it. * <p> * If the file already exists, this will throw an exception. * * @return an output stream that can report its position * @throws AlreadyExistsException If the path already exists * @throws RuntimeIOException If the implementation throws an {@link IOException} */ PositionOutputStream create(); /** * Create a new file and return a {@link PositionOutputStream} to it. * <p> * If the file already exists, this will not throw an exception and will replace the file. * * @return an output stream that can report its position * @throws RuntimeIOException If the implementation throws an {@link IOException} */ PositionOutputStream createOrOverwrite(); /** * Return the location this output file will create. * * @return the location of this output file */ String location(); /** * Return an {@link InputFile} for the location of this output file. * * @return an input file for the location of this output file */ InputFile toInputFile(); }
6,460
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/PositionOutputStream.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.io; import java.io.IOException; import java.io.OutputStream; public abstract class PositionOutputStream extends OutputStream { /** * Return the current position in the OutputStream. * * @return current position in bytes from the start of the stream * @throws IOException If the underlying stream throws IOException */ public abstract long getPos() throws IOException; }
6,461
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/SeekableInputStream.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.io; import java.io.IOException; import java.io.InputStream; /** * {@code SeekableInputStream} is an interface with the methods needed to read data from a file or * Hadoop data stream. * * This class is based on Parquet's SeekableInputStream. */ public abstract class SeekableInputStream extends InputStream { /** * Return the current position in the InputStream. * * @return current position in bytes from the start of the stream * @throws IOException If the underlying stream throws IOException */ public abstract long getPos() throws IOException; /** * Seek to a new position in the InputStream. * * @param newPos the new position to seek to * @throws IOException If the underlying stream throws IOException */ public abstract void seek(long newPos) throws IOException; }
6,462
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/FileAppender.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.io; import com.netflix.iceberg.Metrics; import java.io.Closeable; import java.util.Iterator; public interface FileAppender<D> extends Closeable { void add(D datum); default void addAll(Iterator<D> values) { while (values.hasNext()) { add(values.next()); } } default void addAll(Iterable<D> values) { addAll(values.iterator()); } /** * @return {@link Metrics} for this file. Only valid after the file is closed. */ Metrics metrics(); }
6,463
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/CloseableIterable.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.io; import com.google.common.base.Preconditions; import java.io.Closeable; import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.function.Function; public interface CloseableIterable<T> extends Iterable<T>, Closeable { static <E> CloseableIterable<E> withNoopClose(Iterable<E> iterable) { return new CloseableIterable<E>() { @Override public void close() { } @Override public Iterator<E> iterator() { return iterable.iterator(); } }; } static <E> CloseableIterable<E> empty() { return new CloseableIterable<E>() { @Override public void close() { } @Override public Iterator<E> iterator() { return Collections.emptyIterator(); } }; } static <E> CloseableIterable<E> combine(Iterable<E> iterable, Iterable<Closeable> closeables) { return new CloseableGroup.ClosingIterable<>(iterable, closeables); } static <I, O> CloseableIterable<O> wrap(CloseableIterable<I> iterable, Function<Iterable<I>, Iterable<O>> wrap) { Iterable<O> wrappedIterable = wrap.apply(iterable); return new CloseableIterable<O>() { @Override public void close() throws IOException { iterable.close(); } @Override public Iterator<O> iterator() { return wrappedIterable.iterator(); } }; } static <I, O> CloseableIterable<O> transform(CloseableIterable<I> iterable, Function<I, O> transform) { Preconditions.checkNotNull(transform, "Cannot apply a null transform"); return new CloseableIterable<O>() { @Override public void close() throws IOException { iterable.close(); } @Override public Iterator<O> iterator() { return new Iterator<O>() { private final Iterator<I> inner = iterable.iterator(); @Override public boolean hasNext() { return inner.hasNext(); } @Override public O next() { return transform.apply(inner.next()); } }; } }; } }
6,464
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/CloseableGroup.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.io; import com.google.common.collect.Lists; import java.io.Closeable; import java.io.IOException; import java.util.Iterator; import java.util.LinkedList; public abstract class CloseableGroup implements Closeable { private final LinkedList<Closeable> closeables = Lists.newLinkedList(); protected void addCloseable(Closeable closeable) { closeables.add(closeable); } @Override public void close() throws IOException { while (!closeables.isEmpty()) { Closeable toClose = closeables.removeFirst(); if (toClose != null) { toClose.close(); } } } static class ClosingIterable<T> extends CloseableGroup implements CloseableIterable<T> { private final Iterable<T> iterable; public ClosingIterable(Iterable<T> iterable, Iterable<Closeable> closeables) { this.iterable = iterable; for (Closeable closeable : closeables) { addCloseable(closeable); } } @Override public Iterator<T> iterator() { return iterable.iterator(); } } }
6,465
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/DelegatingInputStream.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.io; import java.io.InputStream; public interface DelegatingInputStream { InputStream getDelegate(); }
6,466
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/DelegatingOutputStream.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.io; import java.io.OutputStream; public interface DelegatingOutputStream { OutputStream getDelegate(); }
6,467
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/io/InputFile.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.io; import com.netflix.iceberg.exceptions.RuntimeIOException; import java.io.IOException; /** * An interface used to read input files using {@link SeekableInputStream} instances. * <p> * This class is based on Parquet's InputFile. */ public interface InputFile { /** * @return the total length of the file, in bytes * @throws RuntimeIOException If the implementation throws an {@link IOException} */ long getLength(); /** * Opens a new {@link SeekableInputStream} for the underlying data file * * @return a seekable stream for reading the file * @throws RuntimeIOException If the implementation throws an {@link IOException} */ SeekableInputStream newStream(); /** * The fully-qualified location of the input file as a String. * * @return the input file location */ String location(); }
6,468
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/exceptions/ValidationException.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.exceptions; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; /** * Exception raised when validation checks fail. * <p> * For example, this is thrown when attempting to create a table with a {@link PartitionSpec} that * is not compatible with the table {@link Schema} */ public class ValidationException extends RuntimeException { public ValidationException(String message, Object... args) { super(String.format(message, args)); } public ValidationException(Throwable cause, String message, Object... args) { super(String.format(message, args), cause); } public static void check(boolean test, String message, Object... args) { if (!test) { throw new ValidationException(message, args); } } }
6,469
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/exceptions/AlreadyExistsException.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.exceptions; /** * Exception raised when attempting to create a table that already exists. */ public class AlreadyExistsException extends RuntimeException { public AlreadyExistsException(String message, Object... args) { super(String.format(message, args)); } public AlreadyExistsException(Throwable cause, String message, Object... args) { super(String.format(message, args), cause); } }
6,470
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/exceptions/RuntimeIOException.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.exceptions; import java.io.IOException; /** * Exception used to wrap {@link IOException} as a {@link RuntimeException} and add context. */ public class RuntimeIOException extends RuntimeException { public RuntimeIOException(IOException e) { super(e); } public RuntimeIOException(IOException e, String message, Object... args) { super(String.format(message, args), e); } public RuntimeIOException(String message, Object...args) { super(String.format(message, args)); } }
6,471
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/exceptions/NoSuchTableException.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.exceptions; /** * Exception raised when attempting to load a table that does not exist. */ public class NoSuchTableException extends RuntimeException { public NoSuchTableException(String message, Object... args) { super(String.format(message, args)); } public NoSuchTableException(Throwable cause, String message, Object... args) { super(String.format(message, args), cause); } }
6,472
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/exceptions/CommitFailedException.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.exceptions; /** * Exception raised when a commit fails because of out of date metadata. */ public class CommitFailedException extends RuntimeException { public CommitFailedException(String message, Object... args) { super(String.format(message, args)); } public CommitFailedException(Throwable cause, String message, Object... args) { super(String.format(message, args), cause); } }
6,473
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Transform.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Type; import java.io.Serializable; /** * A transform function used for partitioning. * <p> * Implementations of this interface can be used to transform values, check or types, and project * {@link BoundPredicate predicates} to predicates on partition values. * * @param <S> Java class of source values * @param <T> Java class of transformed values */ public interface Transform<S, T> extends Serializable { /** * Transforms a value to its corresponding partition value. * * @param value a source value * @return a transformed partition value */ T apply(S value); /** * Checks whether this function can be applied to the give {@link Type}. * * @param type a type * @return true if this transform can be applied to the type, false otherwise */ boolean canTransform(Type type); /** * Returns the {@link Type} produced by this transform given a source type. * * @param sourceType a type * @return the result type created by the apply method for the given type */ Type getResultType(Type sourceType); /** * Transforms a {@link BoundPredicate predicate} to an inclusive predicate on the partition * values produced by {@link #apply(Object)}. * <p> * This inclusive transform guarantees that if pred(v) is true, then projected(apply(v)) is true. * * @param name the field name for partition values * @param predicate a predicate for source values * @return an inclusive predicate on partition values */ UnboundPredicate<T> project(String name, BoundPredicate<S> predicate); /** * Transforms a {@link BoundPredicate predicate} to a strict predicate on the partition values * produced by {@link #apply(Object)}. * <p> * This strict transform guarantees that if strict(apply(v)) is true, then pred(v) is also true. * * @param name the field name for partition values * @param predicate a predicate for source values * @return an inclusive predicate on partition values */ UnboundPredicate<T> projectStrict(String name, BoundPredicate<S> predicate); /** * Returns a human-readable String representation of a transformed value. * <p> * null values will return "null" * * @param value a transformed value * @return a human-readable String representation of the value */ default String toHumanString(T value) { return String.valueOf(value); } }
6,474
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/ProjectionUtil.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.UnboundPredicate; import java.math.BigDecimal; import java.math.BigInteger; import static com.netflix.iceberg.expressions.Expressions.predicate; class ProjectionUtil { static <T> UnboundPredicate<T> truncateInteger( String name, BoundPredicate<Integer> pred, Transform<Integer, T> transform) { int boundary = pred.literal().value(); switch (pred.op()) { case LT: // adjust closed and then transform ltEq return predicate(Expression.Operation.LT_EQ, name, transform.apply(boundary - 1)); case LT_EQ: return predicate(Expression.Operation.LT_EQ, name, transform.apply(boundary)); case GT: // adjust closed and then transform gtEq return predicate(Expression.Operation.GT_EQ, name, transform.apply(boundary + 1)); case GT_EQ: return predicate(Expression.Operation.GT_EQ, name, transform.apply(boundary)); case EQ: return predicate(pred.op(), name, transform.apply(boundary)); default: return null; } } static <T> UnboundPredicate<T> truncateLong( String name, BoundPredicate<Long> pred, Transform<Long, T> transform) { long boundary = pred.literal().value(); switch (pred.op()) { case LT: // adjust closed and then transform ltEq return predicate(Expression.Operation.LT_EQ, name, transform.apply(boundary - 1L)); case LT_EQ: return predicate(Expression.Operation.LT_EQ, name, transform.apply(boundary)); case GT: // adjust closed and then transform gtEq return predicate(Expression.Operation.GT_EQ, name, transform.apply(boundary + 1L)); case GT_EQ: return predicate(Expression.Operation.GT_EQ, name, transform.apply(boundary)); case EQ: return predicate(pred.op(), name, transform.apply(boundary)); default: return null; } } static <T> UnboundPredicate<T> truncateDecimal( String name, BoundPredicate<BigDecimal> pred, Transform<BigDecimal, T> transform) { BigDecimal boundary = pred.literal().value(); switch (pred.op()) { case LT: // adjust closed and then transform ltEq BigDecimal minusOne = new BigDecimal( boundary.unscaledValue().subtract(BigInteger.ONE), boundary.scale()); return predicate(Expression.Operation.LT_EQ, name, transform.apply(minusOne)); case LT_EQ: return predicate(Expression.Operation.LT_EQ, name, transform.apply(boundary)); case GT: // adjust closed and then transform gtEq BigDecimal plusOne = new BigDecimal( boundary.unscaledValue().add(BigInteger.ONE), boundary.scale()); return predicate(Expression.Operation.GT_EQ, name, transform.apply(plusOne)); case GT_EQ: return predicate(Expression.Operation.GT_EQ, name, transform.apply(boundary)); case EQ: return predicate(pred.op(), name, transform.apply(boundary)); default: return null; } } static <S, T> UnboundPredicate<T> truncateArray( String name, BoundPredicate<S> pred, Transform<S, T> transform) { S boundary = pred.literal().value(); switch (pred.op()) { case LT: case LT_EQ: return predicate(Expression.Operation.LT_EQ, name, transform.apply(boundary)); case GT: case GT_EQ: return predicate(Expression.Operation.GT_EQ, name, transform.apply(boundary)); case EQ: return predicate(Expression.Operation.EQ, name, transform.apply(boundary)); // case IN: // TODO // return Expressions.predicate(Operation.IN, name, transform.apply(boundary)); default: return null; } } }
6,475
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Timestamps.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.time.Instant; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import static com.netflix.iceberg.expressions.Expression.Operation.IS_NULL; import static com.netflix.iceberg.expressions.Expression.Operation.NOT_NULL; enum Timestamps implements Transform<Long, Integer> { YEAR(ChronoUnit.YEARS, "year"), MONTH(ChronoUnit.MONTHS, "month"), DAY(ChronoUnit.DAYS, "day"), HOUR(ChronoUnit.HOURS, "hour"); private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private final ChronoUnit granularity; private final String name; Timestamps(ChronoUnit granularity, String name) { this.granularity = granularity; this.name = name; } @Override public Integer apply(Long timestampMicros) { // discards fractional seconds, not needed for calculation OffsetDateTime timestamp = Instant .ofEpochSecond(timestampMicros / 1_000_000) .atOffset(ZoneOffset.UTC); return (int) granularity.between(EPOCH, timestamp); } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.TIMESTAMP; } @Override public Type getResultType(Type sourceType) { return Types.IntegerType.get(); } @Override public UnboundPredicate<Integer> project(String name, BoundPredicate<Long> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateLong(name, pred, this); } @Override public UnboundPredicate<Integer> projectStrict(String name, BoundPredicate<Long> predicate) { return null; } @Override public String toHumanString(Integer value) { if (value == null) { return "null"; } switch (granularity) { case YEARS: return TransformUtil.humanYear(value); case MONTHS: return TransformUtil.humanMonth(value); case DAYS: return TransformUtil.humanDay(value); case HOURS: return TransformUtil.humanHour(value); default: throw new UnsupportedOperationException("Unsupported time unit: " + granularity); } } @Override public String toString() { return name; } }
6,476
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Identity.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.base.Objects; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.nio.ByteBuffer; class Identity<T> implements Transform<T, T> { @SuppressWarnings("unchecked") public static <I> Identity<I> get(Type type) { return new Identity<>(type); } private final Type type; private Identity(Type type) { this.type = type; } @Override public T apply(T value) { return value; } @Override public boolean canTransform(Type type) { return type.isPrimitiveType(); } @Override public Type getResultType(Type sourceType) { return sourceType; } @Override public UnboundPredicate<T> project(String name, BoundPredicate<T> predicate) { return projectStrict(name, predicate); } @Override public UnboundPredicate<T> projectStrict(String name, BoundPredicate<T> predicate) { if (predicate.literal() != null) { return Expressions.predicate(predicate.op(), name, predicate.literal().value()); } else { return Expressions.predicate(predicate.op(), name); } } @Override public String toHumanString(T value) { if (value == null) { return "null"; } switch (type.typeId()) { case DATE: return TransformUtil.humanDay((Integer) value); case TIME: return TransformUtil.humanTime((Long) value); case TIMESTAMP: if (((Types.TimestampType) type).shouldAdjustToUTC()) { return TransformUtil.humanTimestampWithZone((Long) value); } else { return TransformUtil.humanTimestampWithoutZone((Long) value); } case FIXED: case BINARY: if (value instanceof ByteBuffer) { return TransformUtil.base64encode(((ByteBuffer) value).duplicate()); } else if (value instanceof byte[]) { return TransformUtil.base64encode(ByteBuffer.wrap((byte[]) value)); } else { throw new UnsupportedOperationException("Unsupported binary type: " + value.getClass()); } default: return value.toString(); } } @Override public String toString() { return "identity"; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Identity<?> that = (Identity<?>) o; return type.equals(that.type); } @Override public int hashCode() { return Objects.hashCode(type); } }
6,477
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Transforms.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.base.Preconditions; import com.netflix.iceberg.Schema; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.types.Type; import java.util.Locale; import java.util.regex.Matcher; import java.util.regex.Pattern; /** * Factory methods for transforms. * <p> * Most users should create transforms using a * {@link PartitionSpec.Builder#builderFor(Schema)} partition spec builder}. * * @see PartitionSpec#builderFor(Schema) The partition spec builder. */ public class Transforms { private Transforms() { } private static final Pattern HAS_WIDTH = Pattern.compile("(\\w+)\\[(\\d+)\\]"); public static Transform<?, ?> fromString(Type type, String transform) { Matcher width = HAS_WIDTH.matcher(transform); if (width.matches()) { String name = width.group(1); int w = Integer.parseInt(width.group(2)); if (name.equalsIgnoreCase("truncate")) { return Truncate.get(type, w); } else if (name.equals("bucket")) { return Bucket.get(type, w); } } if (transform.equalsIgnoreCase("identity")) { return Identity.get(type); } else if (type.typeId() == Type.TypeID.TIMESTAMP) { return Timestamps.valueOf(transform.toUpperCase(Locale.ENGLISH)); } else if (type.typeId() == Type.TypeID.DATE) { return Dates.valueOf(transform.toUpperCase(Locale.ENGLISH)); } throw new IllegalArgumentException("Unknown transform: " + transform); } /** * Returns an identity {@link Transform} that can be used for any type. * * @param type the {@link Type source type} for the transform * @param <T> Java type passed to this transform * @return an identity transform */ public static <T> Transform<T, T> identity(Type type) { return Identity.get(type); } /** * Returns a year {@link Transform} for date or timestamp types. * * @param type the {@link Type source type} for the transform * @param <T> Java type passed to this transform * @return a year transform */ @SuppressWarnings("unchecked") public static <T> Transform<T, Integer> year(Type type) { switch (type.typeId()) { case DATE: return (Transform<T, Integer>) Dates.YEAR; case TIMESTAMP: return (Transform<T, Integer>) Timestamps.YEAR; default: throw new IllegalArgumentException( "Cannot partition type " + type + " by year"); } } /** * Returns a month {@link Transform} for date or timestamp types. * * @param type the {@link Type source type} for the transform * @param <T> Java type passed to this transform * @return a month transform */ @SuppressWarnings("unchecked") public static <T> Transform<T, Integer> month(Type type) { switch (type.typeId()) { case DATE: return (Transform<T, Integer>) Dates.MONTH; case TIMESTAMP: return (Transform<T, Integer>) Timestamps.MONTH; default: throw new IllegalArgumentException( "Cannot partition type " + type + " by month"); } } /** * Returns a day {@link Transform} for date or timestamp types. * * @param type the {@link Type source type} for the transform * @param <T> Java type passed to this transform * @return a day transform */ @SuppressWarnings("unchecked") public static <T> Transform<T, Integer> day(Type type) { switch (type.typeId()) { case DATE: return (Transform<T, Integer>) Dates.DAY; case TIMESTAMP: return (Transform<T, Integer>) Timestamps.DAY; default: throw new IllegalArgumentException( "Cannot partition type " + type + " by month"); } } /** * Returns a hour {@link Transform} for timestamps. * * @param type the {@link Type source type} for the transform * @param <T> Java type passed to this transform * @return a hour transform */ @SuppressWarnings("unchecked") public static <T> Transform<T, Integer> hour(Type type) { Preconditions.checkArgument(type.typeId() == Type.TypeID.TIMESTAMP, "Cannot partition type %s by hour", type); return (Transform<T, Integer>) Timestamps.HOUR; } /** * Returns a bucket {@link Transform} for the given type and number of buckets. * * @param type the {@link Type source type} for the transform * @param numBuckets the number of buckets for the transform to produce * @param <T> Java type passed to this transform * @return a transform that buckets values into numBuckets */ public static <T> Transform<T, Integer> bucket(Type type, int numBuckets) { return Bucket.get(type, numBuckets); } /** * Returns a truncate {@link Transform} for the given type and width. * * @param type the {@link Type source type} for the transform * @param width the width to truncate data values * @param <T> Java type passed to this transform * @return a transform that truncates the given type to width */ public static <T> Transform<T, T> truncate(Type type, int width) { return Truncate.get(type, width); } }
6,478
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/TransformUtil.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.base.Charsets; import java.nio.ByteBuffer; import java.time.Instant; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.Base64; class TransformUtil { private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final int EPOCH_YEAR = EPOCH.getYear(); static String humanYear(int yearOrdinal) { return String.format("%04d", EPOCH_YEAR + yearOrdinal); } static String humanMonth(int monthOrdinal) { return String.format("%04d-%02d", EPOCH_YEAR + (monthOrdinal / 12), 1 + (monthOrdinal % 12)); } static String humanDay(int dayOrdinal) { OffsetDateTime day = EPOCH.plusDays(dayOrdinal); return String.format("%04d-%02d-%02d", day.getYear(), day.getMonth().getValue(), day.getDayOfMonth()); } static String humanTime(Long microsFromMidnight) { return LocalTime.ofNanoOfDay(microsFromMidnight * 1000).toString(); } static String humanTimestampWithZone(Long timestampMicros) { return ChronoUnit.MICROS.addTo(EPOCH, timestampMicros).toString(); } static String humanTimestampWithoutZone(Long timestampMicros) { return ChronoUnit.MICROS.addTo(EPOCH, timestampMicros).toLocalDateTime().toString(); } static String humanHour(int hourOrdinal) { OffsetDateTime time = EPOCH.plusHours(hourOrdinal); return String.format("%04d-%02d-%02d-%02d", time.getYear(), time.getMonth().getValue(), time.getDayOfMonth(), time.getHour()); } static String base64encode(ByteBuffer buffer) { // use direct encoding because all of the encoded bytes are in ASCII return Charsets.ISO_8859_1.decode(Base64.getEncoder().encode(buffer)).toString(); } }
6,479
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Truncate.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.base.Objects; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Type; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import static com.netflix.iceberg.expressions.Expression.Operation.IS_NULL; import static com.netflix.iceberg.expressions.Expression.Operation.LT; import static com.netflix.iceberg.expressions.Expression.Operation.LT_EQ; import static com.netflix.iceberg.expressions.Expression.Operation.NOT_NULL; abstract class Truncate<T> implements Transform<T, T> { @SuppressWarnings("unchecked") static <T> Truncate<T> get(Type type, int width) { switch (type.typeId()) { case INTEGER: return (Truncate<T>) new TruncateInteger(width); case LONG: return (Truncate<T>) new TruncateLong(width); case DECIMAL: return (Truncate<T>) new TruncateDecimal(width); case STRING: return (Truncate<T>) new TruncateString(width); case BINARY: return (Truncate<T>) new TruncateByteBuffer(width); default: throw new UnsupportedOperationException( "Cannot truncate type: " + type); } } abstract public Integer width(); @Override abstract public T apply(T value); @Override public Type getResultType(Type sourceType) { return sourceType; } private static class TruncateInteger extends Truncate<Integer> { private final int W; private TruncateInteger(int width) { this.W = width; } @Override public Integer width() { return W; } @Override public Integer apply(Integer value) { return value - (((value % W) + W) % W); } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.INTEGER; } @Override public UnboundPredicate<Integer> project(String name, BoundPredicate<Integer> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateInteger(name, pred, this); } @Override public UnboundPredicate<Integer> projectStrict(String name, BoundPredicate<Integer> predicate) { // TODO: for integers, can this return the original predicate? // No. the predicate needs to be in terms of the applied value. For all x, apply(x) <= x. // Therefore, the lower bound can be transformed outside of a greater-than bound. int in; int out; int inImage; int outImage; switch (predicate.op()) { case LT: in = predicate.literal().value() - 1; out = predicate.literal().value(); inImage = apply(in); outImage = apply(out); if (inImage != outImage) { return Expressions.predicate(LT_EQ, name, inImage); } else { return Expressions.predicate(LT, name, inImage); } case LT_EQ: in = predicate.literal().value(); out = predicate.literal().value() + 1; inImage = apply(in); outImage = apply(out); if (inImage != outImage) { return Expressions.predicate(LT_EQ, name, inImage); } else { return Expressions.predicate(LT, name, inImage); } case GT: case GT_EQ: case EQ: case NOT_EQ: // case IN: // break; // case NOT_IN: // break; default: return null; } } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TruncateInteger that = (TruncateInteger) o; return W == that.W; } @Override public int hashCode() { return Objects.hashCode(W); } @Override public String toString() { return "truncate[" + W + "]"; } } private static class TruncateLong extends Truncate<Long> { private final int W; private TruncateLong(int width) { this.W = width; } @Override public Integer width() { return W; } @Override public Long apply(Long value) { return value - (((value % W) + W) % W); } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.LONG; } @Override public UnboundPredicate<Long> project(String name, BoundPredicate<Long> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateLong(name, pred, this); } @Override public UnboundPredicate<Long> projectStrict(String name, BoundPredicate<Long> predicate) { return null; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TruncateLong that = (TruncateLong) o; return W == that.W; } @Override public int hashCode() { return Objects.hashCode(W); } @Override public String toString() { return "truncate[" + W + "]"; } } private static class TruncateString extends Truncate<CharSequence> { private final int L; private TruncateString(int length) { this.L = length; } @Override public Integer width() { return L; } @Override public CharSequence apply(CharSequence value) { return value.subSequence(0, Math.min(value.length(), L)); } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.STRING; } @Override public UnboundPredicate<CharSequence> project(String name, BoundPredicate<CharSequence> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateArray(name, pred, this); } @Override public UnboundPredicate<CharSequence> projectStrict(String name, BoundPredicate<CharSequence> predicate) { return null; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TruncateString that = (TruncateString) o; return L == that.L; } @Override public int hashCode() { return Objects.hashCode(L); } @Override public String toString() { return "truncate[" + L + "]"; } } private static class TruncateByteBuffer extends Truncate<ByteBuffer> { private final int L; private TruncateByteBuffer(int length) { this.L = length; } @Override public Integer width() { return L; } @Override public ByteBuffer apply(ByteBuffer value) { ByteBuffer ret = value.duplicate(); ret.limit(Math.min(value.limit(), value.position() + L)); return ret; } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.BINARY; } @Override public UnboundPredicate<ByteBuffer> project(String name, BoundPredicate<ByteBuffer> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateArray(name, pred, this); } @Override public UnboundPredicate<ByteBuffer> projectStrict(String name, BoundPredicate<ByteBuffer> predicate) { return null; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TruncateByteBuffer that = (TruncateByteBuffer) o; return L == that.L; } @Override public int hashCode() { return Objects.hashCode(L); } @Override public String toHumanString(ByteBuffer value) { return value == null ? "null" : TransformUtil.base64encode(value); } @Override public String toString() { return "truncate[" + L + "]"; } } private static class TruncateDecimal extends Truncate<BigDecimal> { private final BigInteger unscaledWidth; private TruncateDecimal(int unscaledWidth) { this.unscaledWidth = BigInteger.valueOf(unscaledWidth); } @Override public Integer width() { return unscaledWidth.intValue(); } @Override public BigDecimal apply(BigDecimal value) { BigDecimal remainder = new BigDecimal( value.unscaledValue() .remainder(unscaledWidth) .add(unscaledWidth) .remainder(unscaledWidth), value.scale()); return value.subtract(remainder); } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.DECIMAL; } @Override public UnboundPredicate<BigDecimal> project(String name, BoundPredicate<BigDecimal> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateDecimal(name, pred, this); } @Override public UnboundPredicate<BigDecimal> projectStrict(String name, BoundPredicate<BigDecimal> predicate) { return null; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } TruncateDecimal that = (TruncateDecimal) o; return unscaledWidth.equals(that.unscaledWidth); } @Override public int hashCode() { return Objects.hashCode(unscaledWidth); } @Override public String toString() { return "truncate[" + unscaledWidth + "]"; } } }
6,480
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/PartitionSpecVisitor.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.collect.Lists; import com.netflix.iceberg.PartitionField; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import java.util.List; public interface PartitionSpecVisitor<T> { T identity(String sourceName, int sourceId); T bucket(String sourceName, int sourceId, int width); T truncate(String sourceName, int sourceId, int width); T year(String sourceName, int sourceId); T month(String sourceName, int sourceId); T day(String sourceName, int sourceId); T hour(String sourceName, int sourceId); static <R> List<R> visit(Schema schema, PartitionSpec spec, PartitionSpecVisitor<R> visitor) { List<R> results = Lists.newArrayListWithExpectedSize(spec.fields().size()); for (PartitionField field : spec.fields()) { String sourceName = schema.findColumnName(field.sourceId()); Transform<?, ?> transform = field.transform(); if (transform instanceof Identity) { results.add(visitor.identity(sourceName, field.sourceId())); } else if (transform instanceof Bucket) { results.add(visitor.bucket(sourceName, field.sourceId(), ((Bucket<?>) transform).numBuckets())); } else if (transform instanceof Truncate) { results.add(visitor.truncate(sourceName, field.sourceId(), ((Truncate<?>) transform).width())); } else if (transform == Dates.YEAR || transform == Timestamps.YEAR) { results.add(visitor.year(sourceName, field.sourceId())); } else if (transform == Dates.MONTH || transform == Timestamps.MONTH) { results.add(visitor.month(sourceName, field.sourceId())); } else if (transform == Dates.DAY || transform == Timestamps.DAY) { results.add(visitor.day(sourceName, field.sourceId())); } else if (transform == Timestamps.HOUR) { results.add(visitor.hour(sourceName, field.sourceId())); } } return results; } }
6,481
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Dates.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.time.Instant; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import static com.netflix.iceberg.expressions.Expression.Operation.IS_NULL; import static com.netflix.iceberg.expressions.Expression.Operation.NOT_NULL; enum Dates implements Transform<Integer, Integer> { YEAR(ChronoUnit.YEARS, "year"), MONTH(ChronoUnit.MONTHS, "month"), DAY(ChronoUnit.DAYS, "day"); private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private final ChronoUnit granularity; private final String name; Dates(ChronoUnit granularity, String name) { this.granularity = granularity; this.name = name; } @Override public Integer apply(Integer days) { if (granularity == ChronoUnit.DAYS) { return days; } return (int) granularity.between(EPOCH, EPOCH.plusDays(days)); } @Override public boolean canTransform(Type type) { return type.typeId() == Type.TypeID.DATE; } @Override public Type getResultType(Type sourceType) { return Types.IntegerType.get(); } @Override public UnboundPredicate<Integer> project(String name, BoundPredicate<Integer> pred) { if (pred.op() == NOT_NULL || pred.op() == IS_NULL) { return Expressions.predicate(pred.op(), name); } return ProjectionUtil.truncateInteger(name, pred, this); } @Override public UnboundPredicate<Integer> projectStrict(String name, BoundPredicate<Integer> predicate) { return null; } @Override public String toHumanString(Integer value) { if (value == null) { return "null"; } switch (granularity) { case YEARS: return TransformUtil.humanYear(value); case MONTHS: return TransformUtil.humanMonth(value); case DAYS: return TransformUtil.humanDay(value); default: throw new UnsupportedOperationException("Unsupported time unit: " + granularity); } } @Override public String toString() { return name; } }
6,482
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/transforms/Bucket.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.transforms; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Charsets; import com.google.common.base.Objects; import com.google.common.collect.Sets; import com.google.common.hash.HashFunction; import com.google.common.hash.Hashing; import com.netflix.iceberg.expressions.BoundPredicate; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.UnboundPredicate; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.Set; import java.util.UUID; import static com.netflix.iceberg.types.Type.TypeID; abstract class Bucket<T> implements Transform<T, Integer> { private static final HashFunction MURMUR3 = Hashing.murmur3_32(); @SuppressWarnings("unchecked") static <T> Bucket<T> get(Type type, int N) { switch (type.typeId()) { case DATE: case INTEGER: return (Bucket<T>) new BucketInteger(N); case TIME: case TIMESTAMP: case LONG: return (Bucket<T>) new BucketLong(N); case DECIMAL: return (Bucket<T>) new BucketDecimal(N); case STRING: return (Bucket<T>) new BucketString(N); case FIXED: case BINARY: return (Bucket<T>) new BucketByteBuffer(N); case UUID: return (Bucket<T>) new BucketUUID(N); default: throw new IllegalArgumentException("Cannot bucket by type: " + type); } } private final int N; private Bucket(int N) { this.N = N; } public Integer numBuckets() { return N; } @VisibleForTesting abstract int hash(T value); @Override public Integer apply(T value) { return (hash(value) & Integer.MAX_VALUE) % N; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } Bucket<?> bucket = (Bucket<?>) o; return N == bucket.N; } @Override public int hashCode() { return Objects.hashCode(N); } @Override public String toString() { return "bucket[" + N + "]"; } @Override public UnboundPredicate<Integer> project(String name, BoundPredicate<T> predicate) { switch (predicate.op()) { case EQ: return Expressions.predicate( predicate.op(), name, apply(predicate.literal().value())); // case IN: // return Expressions.predicate(); default: // comparison predicates can't be projected, notEq can't be projected // TODO: small ranges can be projected. // for example, (x > 0) and (x < 3) can be turned into in({1, 2}) and projected. return null; } } @Override public UnboundPredicate<Integer> projectStrict(String name, BoundPredicate<T> predicate) { switch (predicate.op()) { case NOT_EQ: // TODO: need to translate not(eq(...)) into notEq in expressions return Expressions.predicate(predicate.op(), name, apply(predicate.literal().value())); // case NOT_IN: // return null; default: // no strict projection for comparison or equality return null; } } @Override public Type getResultType(Type sourceType) { return Types.IntegerType.get(); } private static class BucketInteger extends Bucket<Integer> { private BucketInteger(int N) { super(N); } public int hash(Integer value) { return MURMUR3.hashLong(value.longValue()).asInt(); } @Override public boolean canTransform(Type type) { return type.typeId() == TypeID.INTEGER || type.typeId() == TypeID.DATE; } } private static class BucketLong extends Bucket<Long> { private BucketLong(int N) { super(N); } public int hash(Long value) { return MURMUR3.hashLong(value).asInt(); } @Override public boolean canTransform(Type type) { return ( type.typeId() == TypeID.LONG || type.typeId() == TypeID.TIME || type.typeId() == TypeID.TIMESTAMP ); } } // bucketing by Double is not allowed by the spec, but this has the float hash implementation static class BucketFloat extends Bucket<Float> { // used by tests because the factory method will not instantiate a bucket function for floats BucketFloat(int N) { super(N); } public int hash(Float value) { return MURMUR3.hashLong(Double.doubleToRawLongBits((double) value)).asInt(); } @Override public boolean canTransform(Type type) { return type.typeId() == TypeID.FLOAT; } } // bucketing by Double is not allowed by the spec, but this has the double hash implementation static class BucketDouble extends Bucket<Double> { // used by tests because the factory method will not instantiate a bucket function for doubles BucketDouble(int N) { super(N); } public int hash(Double value) { return MURMUR3.hashLong(Double.doubleToRawLongBits(value)).asInt(); } @Override public boolean canTransform(Type type) { return type.typeId() == TypeID.DOUBLE; } } private static class BucketString extends Bucket<CharSequence> { private BucketString(int N) { super(N); } public int hash(CharSequence value) { return MURMUR3.hashString(value, Charsets.UTF_8).asInt(); } @Override public boolean canTransform(Type type) { return type.typeId() == TypeID.STRING; } } private static class BucketBytes extends Bucket<byte[]> { private static final Set<TypeID> SUPPORTED_TYPES = Sets.newHashSet( TypeID.BINARY, TypeID.FIXED); private BucketBytes(int N) { super(N); } public int hash(byte[] value) { return MURMUR3.hashBytes(value).asInt(); } @Override public boolean canTransform(Type type) { return SUPPORTED_TYPES.contains(type.typeId()); } } private static class BucketByteBuffer extends Bucket<ByteBuffer> { private static final Set<TypeID> SUPPORTED_TYPES = Sets.newHashSet( TypeID.BINARY, TypeID.FIXED); private BucketByteBuffer(int N) { super(N); } public int hash(ByteBuffer value) { if (value.hasArray()) { return MURMUR3.hashBytes(value.array(), value.arrayOffset() + value.position(), value.arrayOffset() + value.remaining()).asInt(); } else { int position = value.position(); byte[] copy = new byte[value.remaining()]; try { value.get(copy); } finally { // make sure the buffer position is unchanged value.position(position); } return MURMUR3.hashBytes(copy).asInt(); } } @Override public boolean canTransform(Type type) { return SUPPORTED_TYPES.contains(type.typeId()); } } private static class BucketUUID extends Bucket<UUID> { private static final ThreadLocal<ByteBuffer> BUFFER = ThreadLocal.withInitial(() -> { ByteBuffer buffer = ByteBuffer.allocate(16); buffer.order(ByteOrder.BIG_ENDIAN); return buffer; }); private BucketUUID(int N) { super(N); } public int hash(UUID value) { ByteBuffer buffer = BUFFER.get(); buffer.rewind(); buffer.putLong(value.getMostSignificantBits()); buffer.putLong(value.getLeastSignificantBits()); return MURMUR3.hashBytes(buffer.array()).asInt(); } @Override public boolean canTransform(Type type) { return type.typeId() == TypeID.UUID; } } private static class BucketDecimal extends Bucket<BigDecimal> { private BucketDecimal(int N) { super(N); } public int hash(BigDecimal value) { return MURMUR3.hashBytes(value.unscaledValue().toByteArray()).asInt(); } @Override public boolean canTransform(Type type) { return type.typeId() == TypeID.DECIMAL; } } }
6,483
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Projections.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.expressions.ExpressionVisitors.ExpressionVisitor; import com.netflix.iceberg.PartitionField; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.transforms.Transform; /** * Utils to project expressions on rows to expressions on partitions. */ public class Projections { private Projections() { } /** * A class that projects expressions for a table's data rows into expressions on the table's * partition values, for a table's {@link PartitionSpec partition spec}. * <p> * There are two types of projections: inclusive and strict. * <p> * An inclusive projection guarantees that if an expression matches a row, the projected * expression will match the row's partition. * <p> * A strict projection guarantees that if a partition matches a projected expression, then all * rows in that partition will match the original expression. */ public static abstract class ProjectionEvaluator extends ExpressionVisitor<Expression> { /** * Project the given row expression to a partition expression. * * @param expr an expression on data rows * @return an expression on partition data (depends on the projection) */ public abstract Expression project(Expression expr); } /** * Creates an inclusive {@code ProjectionEvaluator} for the {@link PartitionSpec spec}. * <p> * An evaluator is used to project expressions for a table's data rows into expressions on the * table's partition values. The evaluator returned by this function is inclusive and will build * expressions with the following guarantee: if the original expression matches a row, then the * projected expression will match that row's partition. * <p> * Each predicate in the expression is projected using * {@link Transform#project(String, BoundPredicate)}. * * @param spec a partition spec * @return an inclusive projection evaluator for the partition spec * @see Transform#project(String, BoundPredicate) Inclusive transform used for each predicate */ public static ProjectionEvaluator inclusive(PartitionSpec spec) { return new InclusiveProjection(spec); } /** * Creates a strict {@code ProjectionEvaluator} for the {@link PartitionSpec spec}. * <p> * An evaluator is used to project expressions for a table's data rows into expressions on the * table's partition values. The evaluator returned by this function is strict and will build * expressions with the following guarantee: if the projected expression matches a partition, * then the original expression will match all rows in that partition. * <p> * Each predicate in the expression is projected using * {@link Transform#projectStrict(String, BoundPredicate)}. * * @param spec a partition spec * @return a strict projection evaluator for the partition spec * @see Transform#projectStrict(String, BoundPredicate) Strict transform used for each predicate */ public static ProjectionEvaluator strict(PartitionSpec spec) { return new StrictProjection(spec); } private static class BaseProjectionEvaluator extends ProjectionEvaluator { final PartitionSpec spec; private BaseProjectionEvaluator(PartitionSpec spec) { this.spec = spec; } @Override public Expression project(Expression expr) { // projections assume that there are no NOT nodes in the expression tree. to ensure that this // is the case, the expression is rewritten to push all NOT nodes down to the expression // leaf nodes. // this is necessary to ensure that the default expression returned when a predicate can't be // projected is correct. return ExpressionVisitors.visit(ExpressionVisitors.visit(expr, RewriteNot.get()), this); } @Override public Expression alwaysTrue() { return Expressions.alwaysTrue(); } @Override public Expression alwaysFalse() { return Expressions.alwaysFalse(); } @Override public Expression not(Expression result) { throw new UnsupportedOperationException("[BUG] project called on expression with a not"); } @Override public Expression and(Expression leftResult, Expression rightResult) { return Expressions.and(leftResult, rightResult); } @Override public Expression or(Expression leftResult, Expression rightResult) { return Expressions.or(leftResult, rightResult); } @Override public <T> Expression predicate(UnboundPredicate<T> pred) { Expression bound = pred.bind(spec.schema().asStruct()); if (bound instanceof BoundPredicate) { return predicate((BoundPredicate<?>) bound); } return bound; } } private static class InclusiveProjection extends BaseProjectionEvaluator { private InclusiveProjection(PartitionSpec spec) { super(spec); } @Override @SuppressWarnings("unchecked") public <T> Expression predicate(BoundPredicate<T> pred) { PartitionField part = spec.getFieldBySourceId(pred.ref().fieldId()); if (part == null) { // the predicate has no partition column return alwaysTrue(); } UnboundPredicate<?> result = ((Transform<T, ?>) part.transform()).project(part.name(), pred); if (result != null) { return result; } // if the predicate could not be projected, it always matches return alwaysTrue(); } } private static class StrictProjection extends BaseProjectionEvaluator { private StrictProjection(PartitionSpec spec) { super(spec); } @Override @SuppressWarnings("unchecked") public <T> Expression predicate(BoundPredicate<T> pred) { PartitionField part = spec.getFieldBySourceId(pred.ref().fieldId()); if (part == null) { // the predicate has no partition column return alwaysFalse(); } UnboundPredicate<?> result = ((Transform<T, ?>) part.transform()) .projectStrict(part.name(), pred); if (result != null) { return result; } // if the predicate could not be projected, it never matches return alwaysFalse(); } } }
6,484
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/ExpressionVisitors.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; /** * Utils for traversing {@link Expression expressions}. */ public class ExpressionVisitors { public abstract static class ExpressionVisitor<R> { public R alwaysTrue() { return null; } public R alwaysFalse() { return null; } public R not(R result) { return null; } public R and(R leftResult, R rightResult) { return null; } public R or(R leftResult, R rightResult) { return null; } public <T> R predicate(BoundPredicate<T> pred) { return null; } public <T> R predicate(UnboundPredicate<T> pred) { return null; } } public abstract static class BoundExpressionVisitor<R> extends ExpressionVisitor<R> { public <T> R isNull(BoundReference<T> ref) { return null; } public <T> R notNull(BoundReference<T> ref) { return null; } public <T> R lt(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R ltEq(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R gt(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R gtEq(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R eq(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R notEq(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R in(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R notIn(BoundReference<T> ref, Literal<T> lit) { return null; } public <T> R predicate(BoundPredicate<T> pred) { switch (pred.op()) { case IS_NULL: return isNull(pred.ref()); case NOT_NULL: return notNull(pred.ref()); case LT: return lt(pred.ref(), pred.literal()); case LT_EQ: return ltEq(pred.ref(), pred.literal()); case GT: return gt(pred.ref(), pred.literal()); case GT_EQ: return gtEq(pred.ref(), pred.literal()); case EQ: return eq(pred.ref(), pred.literal()); case NOT_EQ: return notEq(pred.ref(), pred.literal()); case IN: return in(pred.ref(), pred.literal()); case NOT_IN: return notIn(pred.ref(), pred.literal()); default: throw new UnsupportedOperationException( "Unknown operation for predicate: " + pred.op()); } } public <T> R predicate(UnboundPredicate<T> pred) { throw new UnsupportedOperationException("Not a bound predicate: " + pred); } } /** * Traverses the given {@link Expression expression} with a {@link ExpressionVisitor visitor}. * <p> * The visitor will be called to handle each node in the expression tree in postfix order. Result * values produced by child nodes are passed when parent nodes are handled. * * @param expr an expression to traverse * @param visitor a visitor that will be called to handle each node in the expression tree * @param <R> the return type produced by the expression visitor * @return the value returned by the visitor for the root expression node */ @SuppressWarnings("unchecked") public static <R> R visit(Expression expr, ExpressionVisitor<R> visitor) { if (expr instanceof Predicate) { if (expr instanceof BoundPredicate) { return visitor.predicate((BoundPredicate<?>) expr); } else { return visitor.predicate((UnboundPredicate<?>) expr); } } else { switch (expr.op()) { case TRUE: return visitor.alwaysTrue(); case FALSE: return visitor.alwaysFalse(); case NOT: Not not = (Not) expr; return visitor.not(visit(not.child(), visitor)); case AND: And and = (And) expr; return visitor.and(visit(and.left(), visitor), visit(and.right(), visitor)); case OR: Or or = (Or) expr; return visitor.or(visit(or.left(), visitor), visit(or.right(), visitor)); default: throw new UnsupportedOperationException( "Unknown operation: " + expr.op()); } } } }
6,485
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/False.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import java.io.ObjectStreamException; /** * An {@link Expression expression} that is always false. */ public class False implements Expression { static final False INSTANCE = new False(); private False() { } @Override public Operation op() { return Operation.FALSE; } @Override public Expression negate() { return True.INSTANCE; } @Override public String toString() { return "false"; } Object writeReplace() throws ObjectStreamException { return new SerializationProxies.ConstantExpressionProxy(false); } }
6,486
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/StrictMetricsEvaluator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.base.Preconditions; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.ExpressionVisitors.BoundExpressionVisitor; import com.netflix.iceberg.types.Conversions; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import java.nio.ByteBuffer; import java.util.Map; import static com.netflix.iceberg.expressions.Expressions.rewriteNot; /** * Evaluates an {@link Expression} on a {@link DataFile} to test whether all rows in the file match. * <p> * This evaluation is strict: it returns true if all rows in a file must match the expression. For * example, if a file's ts column has min X and max Y, this evaluator will return true for ts &lt; Y+1 * but not for ts &lt; Y-1. * <p> * Files are passed to {@link #eval(DataFile)}, which returns true if all rows in the file must * contain matching rows and false if the file may contain rows that do not match. */ public class StrictMetricsEvaluator { private final Schema schema; private final StructType struct; private final Expression expr; private transient ThreadLocal<MetricsEvalVisitor> visitors = null; private MetricsEvalVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(MetricsEvalVisitor::new); } return visitors.get(); } public StrictMetricsEvaluator(Schema schema, Expression unbound) { this.schema = schema; this.struct = schema.asStruct(); this.expr = Binder.bind(struct, rewriteNot(unbound)); } /** * Test whether the file may contain records that match the expression. * * @param file a data file * @return false if the file cannot contain rows that match the expression, true otherwise. */ public boolean eval(DataFile file) { // TODO: detect the case where a column is missing from the file using file's max field id. return visitor().eval(file); } private static final boolean ROWS_MUST_MATCH = true; private static final boolean ROWS_MIGHT_NOT_MATCH = false; private class MetricsEvalVisitor extends BoundExpressionVisitor<Boolean> { private Map<Integer, Long> valueCounts = null; private Map<Integer, Long> nullCounts = null; private Map<Integer, ByteBuffer> lowerBounds = null; private Map<Integer, ByteBuffer> upperBounds = null; private boolean eval(DataFile file) { if (file.recordCount() <= 0) { return ROWS_MUST_MATCH; } this.valueCounts = file.valueCounts(); this.nullCounts = file.nullValueCounts(); this.lowerBounds = file.lowerBounds(); this.upperBounds = file.upperBounds(); return ExpressionVisitors.visit(expr, this); } @Override public Boolean alwaysTrue() { return ROWS_MUST_MATCH; // all rows match } @Override public Boolean alwaysFalse() { return ROWS_MIGHT_NOT_MATCH; // no rows match } @Override public Boolean not(Boolean result) { return !result; } @Override public Boolean and(Boolean leftResult, Boolean rightResult) { return leftResult && rightResult; } @Override public Boolean or(Boolean leftResult, Boolean rightResult) { return leftResult || rightResult; } @Override public <T> Boolean isNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has any non-null values, the expression does not match Integer id = ref.fieldId(); Preconditions.checkNotNull(struct.field(id), "Cannot filter by nested column: %s", schema.findField(id)); if (valueCounts != null && valueCounts.containsKey(id) && nullCounts != null && nullCounts.containsKey(id) && valueCounts.get(id) - nullCounts.get(id) == 0) { return ROWS_MUST_MATCH; } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean notNull(BoundReference<T> ref) { // no need to check whether the field is required because binding evaluates that case // if the column has any null values, the expression does not match Integer id = ref.fieldId(); Preconditions.checkNotNull(struct.field(id), "Cannot filter by nested column: %s", schema.findField(id)); if (nullCounts != null && nullCounts.containsKey(id) && nullCounts.get(id) == 0) { return ROWS_MUST_MATCH; } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean lt(BoundReference<T> ref, Literal<T> lit) { // Rows must match when: <----------Min----Max---X-------> Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_MUST_MATCH; } } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean ltEq(BoundReference<T> ref, Literal<T> lit) { // Rows must match when: <----------Min----Max---X-------> Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp <= 0) { return ROWS_MUST_MATCH; } } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean gt(BoundReference<T> ref, Literal<T> lit) { // Rows must match when: <-------X---Min----Max----------> Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(field.type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_MUST_MATCH; } } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean gtEq(BoundReference<T> ref, Literal<T> lit) { // Rows must match when: <-------X---Min----Max----------> Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(field.type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp >= 0) { return ROWS_MUST_MATCH; } } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean eq(BoundReference<T> ref, Literal<T> lit) { // Rows must match when Min == X == Max Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id) && upperBounds != null && upperBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(struct.field(id).type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp != 0) { return ROWS_MIGHT_NOT_MATCH; } T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); cmp = lit.comparator().compare(upper, lit.value()); if (cmp != 0) { return ROWS_MIGHT_NOT_MATCH; } return ROWS_MUST_MATCH; } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean notEq(BoundReference<T> ref, Literal<T> lit) { // Rows must match when X < Min or Max < X because it is not in the range Integer id = ref.fieldId(); Types.NestedField field = struct.field(id); Preconditions.checkNotNull(field, "Cannot filter by nested column: %s", schema.findField(id)); if (lowerBounds != null && lowerBounds.containsKey(id)) { T lower = Conversions.fromByteBuffer(struct.field(id).type(), lowerBounds.get(id)); int cmp = lit.comparator().compare(lower, lit.value()); if (cmp > 0) { return ROWS_MUST_MATCH; } } if (upperBounds != null && upperBounds.containsKey(id)) { T upper = Conversions.fromByteBuffer(field.type(), upperBounds.get(id)); int cmp = lit.comparator().compare(upper, lit.value()); if (cmp < 0) { return ROWS_MUST_MATCH; } } return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean in(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_NOT_MATCH; } @Override public <T> Boolean notIn(BoundReference<T> ref, Literal<T> lit) { return ROWS_MIGHT_NOT_MATCH; } } }
6,487
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Literals.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.base.Preconditions; import com.netflix.iceberg.types.Comparators; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.io.ObjectStreamException; import java.math.BigDecimal; import java.math.RoundingMode; import java.nio.ByteBuffer; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.format.DateTimeFormatter; import java.time.temporal.ChronoUnit; import java.util.Comparator; import java.util.UUID; class Literals { private Literals() { } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); /** * Create a {@link Literal} from an Object. * * @param value a value * @param <T> Java type of value * @return a Literal for the given value */ @SuppressWarnings("unchecked") static <T> Literal<T> from(T value) { Preconditions.checkNotNull(value, "Cannot create expression literal from null"); if (value instanceof Boolean) { return (Literal<T>) new Literals.BooleanLiteral((Boolean) value); } else if (value instanceof Integer) { return (Literal<T>) new Literals.IntegerLiteral((Integer) value); } else if (value instanceof Long) { return (Literal<T>) new Literals.LongLiteral((Long) value); } else if (value instanceof Float) { return (Literal<T>) new Literals.FloatLiteral((Float) value); } else if (value instanceof Double) { return (Literal<T>) new Literals.DoubleLiteral((Double) value); } else if (value instanceof CharSequence) { return (Literal<T>) new Literals.StringLiteral((CharSequence) value); } else if (value instanceof UUID) { return (Literal<T>) new Literals.UUIDLiteral((UUID) value); } else if (value instanceof byte[]) { return (Literal<T>) new Literals.FixedLiteral(ByteBuffer.wrap((byte[]) value)); } else if (value instanceof ByteBuffer) { return (Literal<T>) new Literals.BinaryLiteral((ByteBuffer) value); } else if (value instanceof BigDecimal) { return (Literal<T>) new Literals.DecimalLiteral((BigDecimal) value); } throw new IllegalArgumentException(String.format( "Cannot create expression literal from %s: %s", value.getClass().getName(), value)); } @SuppressWarnings("unchecked") static <T> AboveMax<T> aboveMax() { return AboveMax.INSTANCE; } @SuppressWarnings("unchecked") static <T> BelowMin<T> belowMin() { return BelowMin.INSTANCE; } private abstract static class BaseLiteral<T> implements Literal<T> { private final T value; BaseLiteral(T value) { Preconditions.checkNotNull(value, "Literal values cannot be null"); this.value = value; } @Override public T value() { return value; } @Override public String toString() { return String.valueOf(value); } } private abstract static class ComparableLiteral<C extends Comparable<C>> extends BaseLiteral<C> { @SuppressWarnings("unchecked") private static final Comparator<? extends Comparable> CMP = Comparators.<Comparable>nullsFirst().thenComparing(Comparator.naturalOrder()); public ComparableLiteral(C value) { super(value); } @Override @SuppressWarnings("unchecked") public Comparator<C> comparator() { return (Comparator<C>) CMP; } } static class AboveMax<T> implements Literal<T> { private static final AboveMax INSTANCE = new AboveMax(); private AboveMax() { } @Override public T value() { throw new UnsupportedOperationException("AboveMax has no value"); } @Override public <X> Literal<X> to(Type type) { throw new UnsupportedOperationException("Cannot change the type of AboveMax"); } @Override public Comparator<T> comparator() { throw new UnsupportedOperationException("AboveMax has no comparator"); } @Override public String toString() { return "aboveMax"; } } static class BelowMin<T> implements Literal<T> { private static final BelowMin INSTANCE = new BelowMin(); private BelowMin() { } @Override public T value() { throw new UnsupportedOperationException("BelowMin has no value"); } @Override public <X> Literal<X> to(Type type) { throw new UnsupportedOperationException("Cannot change the type of BelowMin"); } @Override public Comparator<T> comparator() { throw new UnsupportedOperationException("BelowMin has no comparator"); } @Override public String toString() { return "belowMin"; } } static class BooleanLiteral extends ComparableLiteral<Boolean> { BooleanLiteral(Boolean value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { if (type.typeId() == Type.TypeID.BOOLEAN) { return (Literal<T>) this; } return null; } } static class IntegerLiteral extends ComparableLiteral<Integer> { IntegerLiteral(Integer value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case INTEGER: return (Literal<T>) this; case LONG: return (Literal<T>) new LongLiteral(value().longValue()); case FLOAT: return (Literal<T>) new FloatLiteral(value().floatValue()); case DOUBLE: return (Literal<T>) new DoubleLiteral(value().doubleValue()); case DATE: return (Literal<T>) new DateLiteral(value()); case DECIMAL: int scale = ((Types.DecimalType) type).scale(); // rounding mode isn't necessary, but pass one to avoid warnings return (Literal<T>) new DecimalLiteral( BigDecimal.valueOf(value()).setScale(scale, RoundingMode.HALF_UP)); default: return null; } } } static class LongLiteral extends ComparableLiteral<Long> { LongLiteral(Long value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case INTEGER: if ((long) Integer.MAX_VALUE < value()) { return aboveMax(); } else if ((long) Integer.MIN_VALUE > value()) { return belowMin(); } return (Literal<T>) new IntegerLiteral(value().intValue()); case LONG: return (Literal<T>) this; case FLOAT: return (Literal<T>) new FloatLiteral(value().floatValue()); case DOUBLE: return (Literal<T>) new DoubleLiteral(value().doubleValue()); case TIME: return (Literal<T>) new TimeLiteral(value()); case TIMESTAMP: return (Literal<T>) new TimestampLiteral(value()); case DECIMAL: int scale = ((Types.DecimalType) type).scale(); // rounding mode isn't necessary, but pass one to avoid warnings return (Literal<T>) new DecimalLiteral( BigDecimal.valueOf(value()).setScale(scale, RoundingMode.HALF_UP)); default: return null; } } } static class FloatLiteral extends ComparableLiteral<Float> { FloatLiteral(Float value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case FLOAT: return (Literal<T>) this; case DOUBLE: return (Literal<T>) new DoubleLiteral(value().doubleValue()); case DECIMAL: int scale = ((Types.DecimalType) type).scale(); return (Literal<T>) new DecimalLiteral( BigDecimal.valueOf(value()).setScale(scale, RoundingMode.HALF_UP)); default: return null; } } } static class DoubleLiteral extends ComparableLiteral<Double> { DoubleLiteral(Double value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case FLOAT: if ((double) Float.MAX_VALUE < value()) { return aboveMax(); } else if ((double) -Float.MAX_VALUE > value()) { // Compare with -Float.MAX_VALUE because it is the most negative float value. // Float.MIN_VALUE is the smallest non-negative floating point value. return belowMin(); } return (Literal<T>) new FloatLiteral(value().floatValue()); case DOUBLE: return (Literal<T>) this; case DECIMAL: int scale = ((Types.DecimalType) type).scale(); return (Literal<T>) new DecimalLiteral( BigDecimal.valueOf(value()).setScale(scale, RoundingMode.HALF_UP)); default: return null; } } } static class DateLiteral extends ComparableLiteral<Integer> { DateLiteral(Integer value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { if (type.typeId() == Type.TypeID.DATE) { return (Literal<T>) this; } return null; } } static class TimeLiteral extends ComparableLiteral<Long> { TimeLiteral(Long value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { if (type.typeId() == Type.TypeID.TIME) { return (Literal<T>) this ; } return null; } } static class TimestampLiteral extends ComparableLiteral<Long> { TimestampLiteral(Long value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case TIMESTAMP: return (Literal<T>) this; case DATE: return (Literal<T>) new DateLiteral((int) ChronoUnit.DAYS.between( EPOCH_DAY, EPOCH.plus(value(), ChronoUnit.MICROS).toLocalDate())); default: } return null; } } static class DecimalLiteral extends ComparableLiteral<BigDecimal> { DecimalLiteral(BigDecimal value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case DECIMAL: // do not change decimal scale if (value().scale() == ((Types.DecimalType) type).scale()) { return (Literal<T>) this; } return null; default: return null; } } } static class StringLiteral extends BaseLiteral<CharSequence> { private static final Comparator<CharSequence> CMP = Comparators.<CharSequence>nullsFirst().thenComparing(Comparators.charSequences()); StringLiteral(CharSequence value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case DATE: int date = (int) ChronoUnit.DAYS.between(EPOCH_DAY, LocalDate.parse(value(), DateTimeFormatter.ISO_LOCAL_DATE)); return (Literal<T>) new DateLiteral(date); case TIME: long timeMicros = LocalTime.parse(value(), DateTimeFormatter.ISO_LOCAL_TIME) .toNanoOfDay() / 1000; return (Literal<T>) new TimeLiteral(timeMicros); case TIMESTAMP: if (((Types.TimestampType) type).shouldAdjustToUTC()) { long timestampMicros = ChronoUnit.MICROS.between(EPOCH, OffsetDateTime.parse(value(), DateTimeFormatter.ISO_DATE_TIME)); return (Literal<T>) new TimestampLiteral(timestampMicros); } else { long timestampMicros = ChronoUnit.MICROS.between(EPOCH, LocalDateTime.parse(value(), DateTimeFormatter.ISO_LOCAL_DATE_TIME) .atOffset(ZoneOffset.UTC)); return (Literal<T>) new TimestampLiteral(timestampMicros); } case STRING: return (Literal<T>) this; case UUID: return (Literal<T>) new UUIDLiteral(UUID.fromString(value().toString())); case DECIMAL: int scale = ((Types.DecimalType) type).scale(); BigDecimal decimal = new BigDecimal(value().toString()); if (scale == decimal.scale()) { return (Literal<T>) new DecimalLiteral(decimal); } return null; default: return null; } } @Override public Comparator<CharSequence> comparator() { return CMP; } @Override public String toString() { return "\"" + value() + "\""; } } static class UUIDLiteral extends ComparableLiteral<UUID> { UUIDLiteral(UUID value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { if (type.typeId() == Type.TypeID.UUID) { return (Literal<T>) this; } return null; } } static class FixedLiteral extends BaseLiteral<ByteBuffer> { private static final Comparator<ByteBuffer> CMP = Comparators.<ByteBuffer>nullsFirst().thenComparing(Comparators.unsignedBytes()); FixedLiteral(ByteBuffer value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case FIXED: Types.FixedType fixed = (Types.FixedType) type; if (value().remaining() == fixed.length()) { return (Literal<T>) this; } return null; case BINARY: return (Literal<T>) new BinaryLiteral(value()); default: return null; } } @Override public Comparator<ByteBuffer> comparator() { return CMP; } Object writeReplace() throws ObjectStreamException { return new SerializationProxies.FixedLiteralProxy(value()); } } static class BinaryLiteral extends BaseLiteral<ByteBuffer> { private static final Comparator<ByteBuffer> CMP = Comparators.<ByteBuffer>nullsFirst().thenComparing(Comparators.unsignedBytes()); BinaryLiteral(ByteBuffer value) { super(value); } @Override @SuppressWarnings("unchecked") public <T> Literal<T> to(Type type) { switch (type.typeId()) { case FIXED: Types.FixedType fixed = (Types.FixedType) type; if (value().remaining() == fixed.length()) { return (Literal<T>) new FixedLiteral(value()); } return null; case BINARY: return (Literal<T>) this; default: return null; } } @Override public Comparator<ByteBuffer> comparator() { return CMP; } Object writeReplace() throws ObjectStreamException { return new SerializationProxies.BinaryLiteralProxy(value()); } } }
6,488
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/UnboundPredicate.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.types.Types; import static com.netflix.iceberg.expressions.Expression.Operation.IS_NULL; import static com.netflix.iceberg.expressions.Expression.Operation.NOT_NULL; public class UnboundPredicate<T> extends Predicate<T, NamedReference> { UnboundPredicate(Operation op, NamedReference namedRef, T value) { super(op, namedRef, Literals.from(value)); } UnboundPredicate(Operation op, NamedReference namedRef) { super(op, namedRef, null); } UnboundPredicate(Operation op, NamedReference namedRef, Literal<T> lit) { super(op, namedRef, lit); } @Override public Expression negate() { return new UnboundPredicate<>(op().negate(), ref(), literal()); } public Expression bind(Types.StructType struct) { Types.NestedField field = struct.field(ref().name()); ValidationException.check(field != null, "Cannot find field '%s' in struct: %s", ref().name(), struct); if (literal() == null) { switch (op()) { case IS_NULL: if (field.isRequired()) { return Expressions.alwaysFalse(); } return new BoundPredicate<>(IS_NULL, new BoundReference<>(struct, field.fieldId())); case NOT_NULL: if (field.isRequired()) { return Expressions.alwaysTrue(); } return new BoundPredicate<>(NOT_NULL, new BoundReference<>(struct, field.fieldId())); default: throw new ValidationException("Operation must be IS_NULL or NOT_NULL"); } } Literal<T> lit = literal().to(field.type()); if (lit == null) { throw new ValidationException(String.format( "Invalid value for comparison inclusive type %s: %s (%s)", field.type(), literal().value(), literal().value().getClass().getName())); } else if (lit == Literals.aboveMax()) { switch (op()) { case LT: case LT_EQ: case NOT_EQ: return Expressions.alwaysTrue(); case GT: case GT_EQ: case EQ: return Expressions.alwaysFalse(); // case IN: // break; // case NOT_IN: // break; } } else if (lit == Literals.belowMin()) { switch (op()) { case GT: case GT_EQ: case NOT_EQ: return Expressions.alwaysTrue(); case LT: case LT_EQ: case EQ: return Expressions.alwaysFalse(); // case IN: // break; // case NOT_IN: // break; } } return new BoundPredicate<>(op(), new BoundReference<>(struct, field.fieldId()), lit); } }
6,489
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/ResidualEvaluator.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.StructLike; import com.netflix.iceberg.PartitionField; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.transforms.Transform; import java.io.Serializable; import java.util.Comparator; /** * Finds the residuals for an {@link Expression} the partitions in the given {@link PartitionSpec}. * <p> * A residual expression is made by partially evaluating an expression using partition values. For * example, if a table is partitioned by day(utc_timestamp) and is read with a filter expression * utc_timestamp &gt;= a and utc_timestamp &lt;= b, then there are 4 possible residuals expressions * for the partition data, d: * <ul> * <li>If d &gt; day(a) and d &lt; day(b), the residual is always true</li> * <li>If d == day(a) and d != day(b), the residual is utc_timestamp &gt;= a</li> * <li>if d == day(b) and d != day(a), the residual is utc_timestamp &lt;= b</li> * <li>If d == day(a) == day(b), the residual is utc_timestamp &gt;= a and utc_timestamp &lt;= b * </li> * </ul> * <p> * Partition data is passed using {@link StructLike}. Residuals are returned by * {@link #residualFor(StructLike)}. * <p> * This class is thread-safe. */ public class ResidualEvaluator implements Serializable { private final PartitionSpec spec; private final Expression expr; private transient ThreadLocal<ResidualVisitor> visitors = null; private ResidualVisitor visitor() { if (visitors == null) { this.visitors = ThreadLocal.withInitial(ResidualVisitor::new); } return visitors.get(); } public ResidualEvaluator(PartitionSpec spec, Expression expr) { this.spec = spec; this.expr = expr; } /** * Returns a residual expression for the given partition values. * * @param partitionData partition data values * @return the residual of this evaluator's expression from the partition values */ public Expression residualFor(StructLike partitionData) { return visitor().eval(partitionData); } private class ResidualVisitor extends ExpressionVisitors.BoundExpressionVisitor<Expression> { private StructLike struct; private Expression eval(StructLike struct) { this.struct = struct; return ExpressionVisitors.visit(expr, this); } @Override public Expression alwaysTrue() { return Expressions.alwaysTrue(); } @Override public Expression alwaysFalse() { return Expressions.alwaysFalse(); } @Override public Expression not(Expression result) { return Expressions.not(result); } @Override public Expression and(Expression leftResult, Expression rightResult) { return Expressions.and(leftResult, rightResult); } @Override public Expression or(Expression leftResult, Expression rightResult) { return Expressions.or(leftResult, rightResult); } @Override public <T> Expression isNull(BoundReference<T> ref) { return (ref.get(struct) == null) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression notNull(BoundReference<T> ref) { return (ref.get(struct) != null) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression lt(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return (cmp.compare(ref.get(struct), lit.value()) < 0) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression ltEq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return (cmp.compare(ref.get(struct), lit.value()) <= 0) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression gt(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return (cmp.compare(ref.get(struct), lit.value()) > 0) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression gtEq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return (cmp.compare(ref.get(struct), lit.value()) >= 0) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression eq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return (cmp.compare(ref.get(struct), lit.value()) == 0) ? alwaysTrue() : alwaysFalse(); } @Override public <T> Expression notEq(BoundReference<T> ref, Literal<T> lit) { Comparator<T> cmp = lit.comparator(); return (cmp.compare(ref.get(struct), lit.value()) != 0) ? alwaysTrue() : alwaysFalse(); } @Override @SuppressWarnings("unchecked") public <T> Expression predicate(BoundPredicate<T> pred) { // Get the strict projection of this predicate in partition data, then use it to determine // whether to return the original predicate. The strict projection returns true iff the // original predicate would have returned true, so the predicate can be eliminated if the // strict projection evaluates to true. // // If there is no strict projection or if it evaluates to false, then return the predicate. PartitionField part = spec.getFieldBySourceId(pred.ref().fieldId()); if (part == null) { return pred; // not associated inclusive a partition field, can't be evaluated } UnboundPredicate<?> strictProjection = ((Transform<T, ?>) part.transform()) .projectStrict(part.name(), pred); if (strictProjection != null) { Expression bound = strictProjection.bind(spec.partitionType()); if (bound instanceof BoundPredicate) { // the predicate methods will evaluate and return alwaysTrue or alwaysFalse return super.predicate((BoundPredicate<?>) bound); } return bound; // use the non-predicate residual (e.g. alwaysTrue) } // if the predicate could not be projected, it must be in the residual return pred; } @Override public <T> Expression predicate(UnboundPredicate<T> pred) { Expression bound = pred.bind(spec.schema().asStruct()); if (bound instanceof BoundPredicate) { Expression boundResidual = predicate((BoundPredicate<?>) bound); if (boundResidual instanceof Predicate) { return pred; // replace inclusive original unbound predicate } return boundResidual; // use the non-predicate residual (e.g. alwaysTrue) } // if binding didn't result in a Predicate, return the expression return bound; } } }
6,490
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Binder.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.expressions.ExpressionVisitors.ExpressionVisitor; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types.StructType; import java.util.List; import java.util.Set; /** * Rewrites {@link Expression expressions} by replacing unbound named references with references to * fields in a struct schema. */ public class Binder { private Binder() { } /** * Replaces all unbound/named references with bound references to fields in the given struct. * <p> * When a reference is resolved, any literal used in a predicate for that field is converted to * the field's type using {@link Literal#to(Type)}. If automatic conversion to that type isn't * allowed, a {@link ValidationException validation exception} is thrown. * <p> * The result expression may be simplified when constructed. For example, {@code isNull("a")} is * replaced with {@code alwaysFalse()} when {@code "a"} is resolved to a required field. * <p> * The expression cannot contain references that are already bound, or an * {@link IllegalStateException} will be thrown. * * @param struct The {@link StructType struct type} to resolve references by name. * @param expr An {@link Expression expression} to rewrite with bound references. * @return the expression rewritten with bound references * @throws ValidationException if literals do not match bound references * @throws IllegalStateException if any references are already bound */ public static Expression bind(StructType struct, Expression expr) { return ExpressionVisitors.visit(expr, new BindVisitor(struct)); } public static Set<Integer> boundReferences(StructType struct, List<Expression> exprs) { if (exprs == null) { return ImmutableSet.of(); } ReferenceVisitor visitor = new ReferenceVisitor(); for (Expression expr : exprs) { ExpressionVisitors.visit(bind(struct, expr), visitor); } return visitor.references; } private static class BindVisitor extends ExpressionVisitor<Expression> { private final StructType struct; private BindVisitor(StructType struct) { this.struct = struct; } @Override public Expression alwaysTrue() { return Expressions.alwaysTrue(); } @Override public Expression alwaysFalse() { return Expressions.alwaysFalse(); } @Override public Expression not(Expression result) { return Expressions.not(result); } @Override public Expression and(Expression leftResult, Expression rightResult) { return Expressions.and(leftResult, rightResult); } @Override public Expression or(Expression leftResult, Expression rightResult) { return Expressions.or(leftResult, rightResult); } @Override public <T> Expression predicate(BoundPredicate<T> pred) { throw new IllegalStateException("Found already bound predicate: " + pred); } @Override public <T> Expression predicate(UnboundPredicate<T> pred) { return pred.bind(struct); } } private static class ReferenceVisitor extends ExpressionVisitor<Set<Integer>> { private final Set<Integer> references = Sets.newHashSet(); @Override public Set<Integer> alwaysTrue() { return references; } @Override public Set<Integer> alwaysFalse() { return references; } @Override public Set<Integer> not(Set<Integer> result) { return references; } @Override public Set<Integer> and(Set<Integer> leftResult, Set<Integer> rightResult) { return references; } @Override public Set<Integer> or(Set<Integer> leftResult, Set<Integer> rightResult) { return references; } @Override public <T> Set<Integer> predicate(BoundPredicate<T> pred) { references.add(pred.ref().fieldId()); return references; } } }
6,491
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/BoundPredicate.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; public class BoundPredicate<T> extends Predicate<T, BoundReference<T>> { BoundPredicate(Operation op, BoundReference<T> ref, Literal<T> lit) { super(op, ref, lit); } BoundPredicate(Operation op, BoundReference<T> ref) { super(op, ref, null); } @Override public Expression negate() { return new BoundPredicate<>(op().negate(), ref(), literal()); } }
6,492
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Expression.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import java.io.Serializable; /** * Represents a boolean expression tree. */ public interface Expression extends Serializable { enum Operation { TRUE, FALSE, IS_NULL, NOT_NULL, LT, LT_EQ, GT, GT_EQ, EQ, NOT_EQ, IN, NOT_IN, NOT, AND, OR; /** * @return the operation used when this is negated */ public Operation negate() { switch (this) { case IS_NULL: return Operation.NOT_NULL; case NOT_NULL: return Operation.IS_NULL; case LT: return Operation.GT_EQ; case LT_EQ: return Operation.GT; case GT: return Operation.LT_EQ; case GT_EQ: return Operation.LT; case EQ: return Operation.NOT_EQ; case NOT_EQ: return Operation.EQ; case IN: return Operation.NOT_IN; case NOT_IN: return Operation.IN; default: throw new IllegalArgumentException("No negation for operation: " + this); } } /** * @return the equivalent operation when the left and right operands are exchanged */ public Operation flipLR() { switch (this) { case LT: return Operation.GT; case LT_EQ: return Operation.GT_EQ; case GT: return Operation.LT; case GT_EQ: return Operation.LT_EQ; case EQ: return Operation.EQ; case NOT_EQ: return Operation.NOT_EQ; case AND: return Operation.AND; case OR: return Operation.OR; default: throw new IllegalArgumentException("No left-right flip for operation: " + this); } } } /** * @return the operation for an expression node. */ Operation op(); /** * @return the negation of this expression, equivalent to not(this). */ default Expression negate() { throw new UnsupportedOperationException(String.format("%s cannot be negated", this)); } }
6,493
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Reference.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import java.io.Serializable; /** * Represents a variable reference in an {@link Expression expression}. * @see BoundReference * @see NamedReference */ public interface Reference extends Serializable { }
6,494
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Predicate.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; public abstract class Predicate<T, R extends Reference> implements Expression { private final Operation op; private final R ref; private final Literal<T> literal; Predicate(Operation op, R ref, Literal<T> lit) { this.op = op; this.ref = ref; this.literal = lit; } @Override public Operation op() { return op; } public R ref() { return ref; } public Literal<T> literal() { return literal; } @Override public String toString() { switch (op) { case IS_NULL: return "is_null(" + ref() + ")"; case NOT_NULL: return "not_null(" + ref() + ")"; case LT: return String.valueOf(ref()) + " < " + literal(); case LT_EQ: return String.valueOf(ref()) + " <= " + literal(); case GT: return String.valueOf(ref()) + " > " + literal(); case GT_EQ: return String.valueOf(ref()) + " >= " + literal(); case EQ: return String.valueOf(ref()) + " == " + literal(); case NOT_EQ: return String.valueOf(ref()) + " != " + literal(); // case IN: // break; // case NOT_IN: // break; default: return "Invalid predicate: operation = " + op; } } }
6,495
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/True.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import java.io.ObjectStreamException; /** * An {@link Expression expression} that is always true. */ public class True implements Expression { static final True INSTANCE = new True(); private True() { } @Override public Operation op() { return Operation.TRUE; } @Override public Expression negate() { return False.INSTANCE; } @Override public String toString() { return "true"; } Object writeReplace() throws ObjectStreamException { return new SerializationProxies.ConstantExpressionProxy(true); } }
6,496
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/And.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; public class And implements Expression { private final Expression left; private final Expression right; And(Expression left, Expression right) { this.left = left; this.right = right; } public Expression left() { return left; } public Expression right() { return right; } @Override public Operation op() { return Expression.Operation.AND; } @Override public Expression negate() { // not(and(a, b)) => or(not(a), not(b)) return Expressions.or(left.negate(), right.negate()); } @Override public String toString() { return String.format("(%s and %s)", left, right); } }
6,497
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/Literal.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.types.Type; import java.io.Serializable; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.Comparator; import java.util.UUID; /** * Represents a literal fixed value in an expression predicate * @param <T> The Java type of the value wrapped by a {@link Literal} */ public interface Literal<T> extends Serializable { static Literal<Boolean> of(boolean value) { return new Literals.BooleanLiteral(value); } static Literal<Integer> of(int value) { return new Literals.IntegerLiteral(value); } static Literal<Long> of(long value) { return new Literals.LongLiteral(value); } static Literal<Float> of(float value) { return new Literals.FloatLiteral(value); } static Literal<Double> of(double value) { return new Literals.DoubleLiteral(value); } static Literal<CharSequence> of(CharSequence value) { return new Literals.StringLiteral(value); } static Literal<UUID> of(UUID value) { return new Literals.UUIDLiteral(value); } static Literal<ByteBuffer> of(byte[] value) { return new Literals.FixedLiteral(ByteBuffer.wrap(value)); } static Literal<ByteBuffer> of(ByteBuffer value) { return new Literals.BinaryLiteral(value); } static Literal<BigDecimal> of(BigDecimal value) { return new Literals.DecimalLiteral(value); } /** * @return the value wrapped by this literal */ T value(); /** * Converts this literal to a literal of the given type. * <p> * When a predicate is bound to a concrete data column, literals are converted to match the bound * column's type. This conversion process is more narrow than a cast and is only intended for * cases where substituting one type is a common mistake (e.g. 34 instead of 34L) or where this * API avoids requiring a concrete class (e.g., dates). * <p> * If conversion to a target type is not supported, this method returns null. * <p> * This method may return {@link Literals#aboveMax} or {@link Literals#belowMin} when the target * type is not as wide as the original type. These values indicate that the containing predicate * can be simplified. For example, Integer.MAX_VALUE+1 converted to an int will result in * {@code aboveMax} and can simplify a &lt; Integer.MAX_VALUE+1 to {@link Expressions#alwaysTrue} * * @param type A primitive {@link Type} * @param <X> The Java type of value the new literal contains * @return A literal of the given type or null if conversion was not valid */ <X> Literal<X> to(Type type); /** * Return a {@link Comparator} for values. * @return a comparator for T objects */ Comparator<T> comparator(); }
6,498
0
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg
Create_ds/iceberg/api/src/main/java/com/netflix/iceberg/expressions/BoundReference.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.expressions; import com.netflix.iceberg.StructLike; import com.netflix.iceberg.exceptions.ValidationException; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import java.util.List; public class BoundReference<T> implements Reference { private final int fieldId; private final Type type; private final int pos; BoundReference(Types.StructType struct, int fieldId) { this.fieldId = fieldId; this.pos = find(fieldId, struct); this.type = struct.fields().get(pos).type(); } private int find(int fieldId, Types.StructType struct) { List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { if (fields.get(i).fieldId() == fieldId) { return i; } } throw new ValidationException( "Cannot find top-level field id %d in struct: %s", fieldId, struct); } public Type type() { return type; } public int fieldId() { return fieldId; } public int pos() { return pos; } public T get(StructLike struct) { return struct.get(pos, javaType()); } @Override public String toString() { return String.format("ref(id=%d, pos=%d, type=%s)", fieldId, pos, type); } @SuppressWarnings("unchecked") private Class<T> javaType() { return (Class<T>) type.asPrimitiveType().typeId().javaClass(); } }
6,499