index
int64
0
0
repo_id
stringlengths
9
205
file_path
stringlengths
31
246
content
stringlengths
1
12.2M
__index_level_0__
int64
0
10k
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/TableScanIterable.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.netflix.iceberg.CombinedScanTask; import com.netflix.iceberg.FileScanTask; import com.netflix.iceberg.HasTableOperations; import com.netflix.iceberg.Schema; import com.netflix.iceberg.TableOperations; import com.netflix.iceberg.TableScan; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.data.avro.DataReader; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.expressions.Binder; import com.netflix.iceberg.expressions.Evaluator; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.io.CloseableGroup; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.types.TypeUtil; import java.io.Closeable; import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; import java.util.Set; import static com.google.common.collect.Iterables.concat; import static com.google.common.collect.Iterables.filter; import static com.google.common.collect.Iterables.transform; import static com.netflix.iceberg.data.parquet.GenericParquetReaders.buildReader; import static java.util.Collections.emptyIterator; class TableScanIterable extends CloseableGroup implements CloseableIterable<Record> { private final TableOperations ops; private final Schema projection; private final boolean reuseContainers; private final CloseableIterable<CombinedScanTask> tasks; TableScanIterable(TableScan scan, boolean reuseContainers) { Preconditions.checkArgument(scan.table() instanceof HasTableOperations, "Cannot scan table that doesn't expose its TableOperations"); this.ops = ((HasTableOperations) scan.table()).operations(); this.projection = scan.schema(); this.reuseContainers = reuseContainers; // start planning tasks in the background this.tasks = scan.planTasks(); } @Override public Iterator<Record> iterator() { ScanIterator iter = new ScanIterator(tasks); addCloseable(iter); return iter; } private CloseableIterable<Record> open(FileScanTask task) { InputFile input = ops.io().newInputFile(task.file().path().toString()); // TODO: join to partition data from the manifest file switch (task.file().format()) { case AVRO: Avro.ReadBuilder avro = Avro.read(input) .project(projection) .createReaderFunc(DataReader::create) .split(task.start(), task.length()); if (reuseContainers) { avro.reuseContainers(); } return avro.build(); case PARQUET: Parquet.ReadBuilder parquet = Parquet.read(input) .project(projection) .createReaderFunc(fileSchema -> buildReader(projection, fileSchema)) .split(task.start(), task.length()); if (reuseContainers) { parquet.reuseContainers(); } return parquet.build(); default: throw new UnsupportedOperationException(String.format("Cannot read %s file: %s", task.file().format().name(), task.file().path())); } } @Override public void close() throws IOException { tasks.close(); // close manifests from scan planning super.close(); // close data files } private class ScanIterator implements Iterator<Record>, Closeable { private final Iterator<FileScanTask> tasks; private Closeable currentCloseable = null; private Iterator<Record> currentIterator = emptyIterator(); private ScanIterator(Iterable<CombinedScanTask> tasks) { this.tasks = Lists.newArrayList(concat(transform(tasks, CombinedScanTask::files))).iterator(); } @Override public boolean hasNext() { while (true) { if (currentIterator.hasNext()) { return true; } else if (tasks.hasNext()) { if (currentCloseable != null) { try { currentCloseable.close(); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to close task"); } } FileScanTask task = tasks.next(); CloseableIterable<Record> reader = open(task); this.currentCloseable = reader; if (task.residual() != null && task.residual() != Expressions.alwaysTrue()) { Evaluator filter = new Evaluator(projection.asStruct(), task.residual()); this.currentIterator = filter(reader, filter::eval).iterator(); } else { this.currentIterator = reader.iterator(); } } else { return false; } } } @Override public Record next() { if (!hasNext()) { throw new NoSuchElementException(); } return currentIterator.next(); } @Override public void close() throws IOException { if (currentCloseable != null) { currentCloseable.close(); } } } }
2,200
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/Record.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.netflix.iceberg.StructLike; import com.netflix.iceberg.types.Types.StructType; import java.util.Map; public interface Record extends StructLike { StructType struct(); Object getField(String name); void setField(String name, Object value); Object get(int pos); Record copy(); Record copy(Map<String, Object> overwriteValues); }
2,201
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/IcebergGenerics.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data; import com.google.common.collect.ImmutableList; import com.netflix.iceberg.Table; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.Expressions; import java.util.List; public class IcebergGenerics { private IcebergGenerics() { } /** * Returns a builder to configure a read of the given table that produces generic records. * * @param table an Iceberg table * @return a builder to configure the scan */ public static ScanBuilder read(Table table) { return new ScanBuilder(table); } public static class ScanBuilder { private final Table table; private Expression where = Expressions.alwaysTrue(); private List<String> columns = ImmutableList.of("*"); private boolean reuseContainers = false; public ScanBuilder(Table table) { this.table = table; } public ScanBuilder reuseContainers() { this.reuseContainers = true; return this; } public ScanBuilder where(Expression rowFilter) { this.where = Expressions.and(where, rowFilter); return this; } public ScanBuilder select(String... columns) { this.columns = ImmutableList.copyOf(columns); return this; } public Iterable<Record> build() { return new TableScanIterable(table.newScan().filter(where).select(columns), reuseContainers); } } }
2,202
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/avro/IcebergDecoder.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.collect.ImmutableMap; import com.google.common.collect.MapMaker; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.avro.ProjectionDatumReader; import org.apache.avro.AvroRuntimeException; import org.apache.avro.Schema; import org.apache.avro.SchemaNormalization; import org.apache.avro.io.BinaryDecoder; import org.apache.avro.io.DatumReader; import org.apache.avro.io.DecoderFactory; import org.apache.avro.message.BadHeaderException; import org.apache.avro.message.MessageDecoder; import org.apache.avro.message.MissingSchemaException; import org.apache.avro.message.SchemaStore; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.Map; public class IcebergDecoder<D> extends MessageDecoder.BaseDecoder<D> { private static final ThreadLocal<byte[]> HEADER_BUFFER = ThreadLocal.withInitial(() -> new byte[10]); private static final ThreadLocal<ByteBuffer> FP_BUFFER = ThreadLocal.withInitial(() -> { byte[] header = HEADER_BUFFER.get(); return ByteBuffer.wrap(header).order(ByteOrder.LITTLE_ENDIAN); }); private final com.netflix.iceberg.Schema readSchema; private final SchemaStore resolver; private final Map<Long, RawDecoder<D>> decoders = new MapMaker().makeMap(); /** * Creates a new decoder that constructs datum instances described by an * {@link com.netflix.iceberg.Schema Iceberg schema}. * <p> * The {@code readSchema} is as used the expected schema (read schema). Datum instances created * by this class will are described by the expected schema. * <p> * The schema used to decode incoming buffers is determined by the schema fingerprint encoded in * the message header. This class can decode messages that were encoded using the * {@code readSchema} and other schemas that are added using * {@link #addSchema(com.netflix.iceberg.Schema)}. * * @param readSchema the schema used to construct datum instances */ public IcebergDecoder(com.netflix.iceberg.Schema readSchema) { this(readSchema, null); } /** * Creates a new decoder that constructs datum instances described by an * {@link com.netflix.iceberg.Schema Iceberg schema}. * <p> * The {@code readSchema} is as used the expected schema (read schema). Datum instances created * by this class will are described by the expected schema. * <p> * The schema used to decode incoming buffers is determined by the schema fingerprint encoded in * the message header. This class can decode messages that were encoded using the * {@code readSchema} and other schemas that are added using * {@link #addSchema(com.netflix.iceberg.Schema)}. * <p> * Schemas may also be returned from an Avro {@link SchemaStore}. Avro Schemas from the store * must be compatible with Iceberg and should contain id properties and use only Iceberg types. * * @param readSchema the {@link Schema} used to construct datum instances * @param resolver a {@link SchemaStore} used to find schemas by fingerprint */ public IcebergDecoder(com.netflix.iceberg.Schema readSchema, SchemaStore resolver) { this.readSchema = readSchema; this.resolver = resolver; addSchema(this.readSchema); } /** * Adds an {@link com.netflix.iceberg.Schema Iceberg schema} that can be used to decode buffers. * * @param writeSchema a schema to use when decoding buffers */ public void addSchema(com.netflix.iceberg.Schema writeSchema) { addSchema(AvroSchemaUtil.convert(writeSchema, "table")); } private void addSchema(org.apache.avro.Schema writeSchema) { long fp = SchemaNormalization.parsingFingerprint64(writeSchema); decoders.put(fp, new RawDecoder<>(readSchema, writeSchema)); } private RawDecoder<D> getDecoder(long fp) { RawDecoder<D> decoder = decoders.get(fp); if (decoder != null) { return decoder; } if (resolver != null) { Schema writeSchema = resolver.findByFingerprint(fp); if (writeSchema != null) { addSchema(writeSchema); return decoders.get(fp); } } throw new MissingSchemaException( "Cannot resolve schema for fingerprint: " + fp); } @Override public D decode(InputStream stream, D reuse) throws IOException { byte[] header = HEADER_BUFFER.get(); try { if (!readFully(stream, header)) { throw new BadHeaderException("Not enough header bytes"); } } catch (IOException e) { throw new IOException("Failed to read header and fingerprint bytes", e); } if (IcebergEncoder.V1_HEADER[0] != header[0] || IcebergEncoder.V1_HEADER[1] != header[1]) { throw new BadHeaderException(String.format( "Unrecognized header bytes: 0x%02X 0x%02X", header[0], header[1])); } RawDecoder<D> decoder = getDecoder(FP_BUFFER.get().getLong(2)); return decoder.decode(stream, reuse); } private static class RawDecoder<D> extends MessageDecoder.BaseDecoder<D> { private static final ThreadLocal<BinaryDecoder> DECODER = new ThreadLocal<>(); private final DatumReader<D> reader; /** * Creates a new {@link MessageDecoder} that constructs datum instances described by the * {@link Schema readSchema}. * <p> * The {@code readSchema} is used for the expected schema and the {@code writeSchema} is the * schema used to decode buffers. The {@code writeSchema} must be the schema that was used to * encode all buffers decoded by this class. * * @param readSchema the schema used to construct datum instances * @param writeSchema the schema used to decode buffers */ private RawDecoder(com.netflix.iceberg.Schema readSchema, org.apache.avro.Schema writeSchema) { this.reader = new ProjectionDatumReader<>(DataReader::create, readSchema, ImmutableMap.of()); this.reader.setSchema(writeSchema); } @Override public D decode(InputStream stream, D reuse) { BinaryDecoder decoder = DecoderFactory.get().directBinaryDecoder(stream, DECODER.get()); DECODER.set(decoder); try { return reader.read(reuse, decoder); } catch (IOException e) { throw new AvroRuntimeException("Decoding datum failed", e); } } } /** * Reads a buffer from a stream, making multiple read calls if necessary. * * @param stream an InputStream to read from * @param bytes a buffer * @return true if the buffer is complete, false otherwise (stream ended) * @throws IOException if there is an error while reading */ private boolean readFully(InputStream stream, byte[] bytes) throws IOException { int pos = 0; int bytesRead; while ((bytes.length - pos) > 0 && (bytesRead = stream.read(bytes, pos, bytes.length - pos)) > 0) { pos += bytesRead; } return (pos == bytes.length); } }
2,203
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/avro/DataReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.collect.MapMaker; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.avro.AvroSchemaVisitor; import com.netflix.iceberg.avro.LogicalMap; import com.netflix.iceberg.avro.ValueReader; import com.netflix.iceberg.avro.ValueReaders; import com.netflix.iceberg.exceptions.RuntimeIOException; import org.apache.avro.LogicalType; import org.apache.avro.LogicalTypes; import org.apache.avro.Schema; import org.apache.avro.io.DatumReader; import org.apache.avro.io.Decoder; import org.apache.avro.io.DecoderFactory; import org.apache.avro.io.ResolvingDecoder; import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; public class DataReader<T> implements DatumReader<T> { private static final ThreadLocal<Map<Schema, Map<Schema, ResolvingDecoder>>> DECODER_CACHES = ThreadLocal.withInitial(() -> new MapMaker().weakKeys().makeMap()); public static <D> DataReader<D> create(Schema readSchema) { return new DataReader<>(readSchema); } private final Schema readSchema; private final ValueReader<T> reader; private Schema fileSchema = null; @SuppressWarnings("unchecked") private DataReader(Schema readSchema) { this.readSchema = readSchema; this.reader = (ValueReader<T>) AvroSchemaVisitor.visit(readSchema, new ReadBuilder()); } @Override public void setSchema(Schema fileSchema) { this.fileSchema = Schema.applyAliases(fileSchema, readSchema); } @Override public T read(T reuse, Decoder decoder) throws IOException { ResolvingDecoder resolver = resolve(decoder); T value = reader.read(resolver, reuse); resolver.drain(); return value; } private ResolvingDecoder resolve(Decoder decoder) throws IOException { Map<Schema, Map<Schema, ResolvingDecoder>> cache = DECODER_CACHES.get(); Map<Schema, ResolvingDecoder> fileSchemaToResolver = cache .computeIfAbsent(readSchema, k -> new HashMap<>()); ResolvingDecoder resolver = fileSchemaToResolver.get(fileSchema); if (resolver == null) { resolver = newResolver(); fileSchemaToResolver.put(fileSchema, resolver); } resolver.configure(decoder); return resolver; } private ResolvingDecoder newResolver() { try { return DecoderFactory.get().resolvingDecoder(fileSchema, readSchema, null); } catch (IOException e) { throw new RuntimeIOException(e); } } private static class ReadBuilder extends AvroSchemaVisitor<ValueReader<?>> { private ReadBuilder() { } @Override public ValueReader<?> record(Schema record, List<String> names, List<ValueReader<?>> fields) { return GenericReaders.struct(AvroSchemaUtil.convert(record).asStructType(), fields); } @Override public ValueReader<?> union(Schema union, List<ValueReader<?>> options) { return ValueReaders.union(options); } @Override public ValueReader<?> array(Schema array, ValueReader<?> elementReader) { if (array.getLogicalType() instanceof LogicalMap) { ValueReaders.StructReader<?> keyValueReader = (ValueReaders.StructReader) elementReader; ValueReader<?> keyReader = keyValueReader.reader(0); ValueReader<?> valueReader = keyValueReader.reader(1); return ValueReaders.arrayMap(keyReader, valueReader); } return ValueReaders.array(elementReader); } @Override public ValueReader<?> map(Schema map, ValueReader<?> valueReader) { return ValueReaders.map(ValueReaders.strings(), valueReader); } @Override public ValueReader<?> primitive(Schema primitive) { LogicalType logicalType = primitive.getLogicalType(); if (logicalType != null) { switch (logicalType.getName()) { case "date": return GenericReaders.dates(); case "time-micros": return GenericReaders.times(); case "timestamp-micros": if (AvroSchemaUtil.isTimestamptz(primitive)) { return GenericReaders.timestamptz(); } return GenericReaders.timestamps(); case "decimal": ValueReader<byte[]> inner; switch (primitive.getType()) { case FIXED: inner = ValueReaders.fixed(primitive.getFixedSize()); break; case BYTES: inner = ValueReaders.bytes(); break; default: throw new IllegalArgumentException( "Invalid primitive type for decimal: " + primitive.getType()); } LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; return ValueReaders.decimal(inner, decimal.getScale()); case "uuid": return ValueReaders.uuids(); default: throw new IllegalArgumentException("Unknown logical type: " + logicalType); } } switch (primitive.getType()) { case NULL: return ValueReaders.nulls(); case BOOLEAN: return ValueReaders.booleans(); case INT: return ValueReaders.ints(); case LONG: return ValueReaders.longs(); case FLOAT: return ValueReaders.floats(); case DOUBLE: return ValueReaders.doubles(); case STRING: // might want to use a binary-backed container like Utf8 return ValueReaders.strings(); case FIXED: return ValueReaders.fixed(primitive.getFixedSize()); case BYTES: return ValueReaders.byteBuffers(); default: throw new IllegalArgumentException("Unsupported type: " + primitive); } } } }
2,204
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/avro/GenericWriters.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.netflix.iceberg.avro.ValueWriter; import com.netflix.iceberg.avro.ValueWriters; import com.netflix.iceberg.data.Record; import org.apache.avro.io.Encoder; import java.io.IOException; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.List; class GenericWriters { private GenericWriters() { } static ValueWriter<LocalDate> dates() { return DateWriter.INSTANCE; } static ValueWriter<LocalTime> times() { return TimeWriter.INSTANCE; } static ValueWriter<LocalDateTime> timestamps() { return TimestampWriter.INSTANCE; } static ValueWriter<OffsetDateTime> timestamptz() { return TimestamptzWriter.INSTANCE; } static ValueWriter<Record> struct(List<ValueWriter<?>> writers) { return new GenericRecordWriter(writers); } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); private static class DateWriter implements ValueWriter<LocalDate> { private static final DateWriter INSTANCE = new DateWriter(); private DateWriter() { } @Override public void write(LocalDate date, Encoder encoder) throws IOException { encoder.writeInt((int) ChronoUnit.DAYS.between(EPOCH_DAY, date)); } } private static class TimeWriter implements ValueWriter<LocalTime> { private static final TimeWriter INSTANCE = new TimeWriter(); private TimeWriter() { } @Override public void write(LocalTime time, Encoder encoder) throws IOException { encoder.writeLong(time.toNanoOfDay() / 1000); } } private static class TimestampWriter implements ValueWriter<LocalDateTime> { private static final TimestampWriter INSTANCE = new TimestampWriter(); private TimestampWriter() { } @Override public void write(LocalDateTime timestamp, Encoder encoder) throws IOException { encoder.writeLong(ChronoUnit.MICROS.between(EPOCH, timestamp.atOffset(ZoneOffset.UTC))); } } private static class TimestamptzWriter implements ValueWriter<OffsetDateTime> { private static final TimestamptzWriter INSTANCE = new TimestamptzWriter(); private TimestamptzWriter() { } @Override public void write(OffsetDateTime timestamptz, Encoder encoder) throws IOException { encoder.writeLong(ChronoUnit.MICROS.between(EPOCH, timestamptz)); } } private static class GenericRecordWriter extends ValueWriters.StructWriter<Record> { private GenericRecordWriter(List<ValueWriter<?>> writers) { super(writers); } @Override protected Object get(Record struct, int pos) { return struct.get(pos); } } }
2,205
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/avro/DataWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.base.Preconditions; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.avro.AvroSchemaVisitor; import com.netflix.iceberg.avro.LogicalMap; import com.netflix.iceberg.avro.ValueWriter; import com.netflix.iceberg.avro.ValueWriters; import org.apache.avro.LogicalType; import org.apache.avro.LogicalTypes; import org.apache.avro.Schema; import org.apache.avro.io.DatumWriter; import org.apache.avro.io.Encoder; import java.io.IOException; import java.util.List; import static com.netflix.iceberg.avro.AvroSchemaVisitor.visit; public class DataWriter<T> implements DatumWriter<T> { private ValueWriter<T> writer = null; public static <D> DataWriter<D> create(Schema schema) { return new DataWriter<>(schema); } private DataWriter(Schema schema) { setSchema(schema); } @Override @SuppressWarnings("unchecked") public void setSchema(Schema schema) { this.writer = (ValueWriter<T>) visit(schema, new WriteBuilder()); } @Override public void write(T datum, Encoder out) throws IOException { writer.write(datum, out); } private static class WriteBuilder extends AvroSchemaVisitor<ValueWriter<?>> { private WriteBuilder() { } @Override public ValueWriter<?> record(Schema record, List<String> names, List<ValueWriter<?>> fields) { return GenericWriters.struct(fields); } @Override public ValueWriter<?> union(Schema union, List<ValueWriter<?>> options) { Preconditions.checkArgument(options.contains(ValueWriters.nulls()), "Cannot create writer for non-option union: " + union); Preconditions.checkArgument(options.size() == 2, "Cannot create writer for non-option union: " + union); if (union.getTypes().get(0).getType() == Schema.Type.NULL) { return ValueWriters.option(0, options.get(1)); } else { return ValueWriters.option(1, options.get(0)); } } @Override public ValueWriter<?> array(Schema array, ValueWriter<?> elementWriter) { if (array.getLogicalType() instanceof LogicalMap) { ValueWriters.StructWriter<?> keyValueWriter = (ValueWriters.StructWriter<?>) elementWriter; return ValueWriters.arrayMap(keyValueWriter.writer(0), keyValueWriter.writer(1)); } return ValueWriters.array(elementWriter); } @Override public ValueWriter<?> map(Schema map, ValueWriter<?> valueWriter) { return ValueWriters.map(ValueWriters.strings(), valueWriter); } @Override public ValueWriter<?> primitive(Schema primitive) { LogicalType logicalType = primitive.getLogicalType(); if (logicalType != null) { switch (logicalType.getName()) { case "date": return GenericWriters.dates(); case "time-micros": return GenericWriters.times(); case "timestamp-micros": if (AvroSchemaUtil.isTimestamptz(primitive)) { return GenericWriters.timestamptz(); } return GenericWriters.timestamps(); case "decimal": LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; return ValueWriters.decimal(decimal.getPrecision(), decimal.getScale()); case "uuid": return ValueWriters.uuids(); default: throw new IllegalArgumentException("Unsupported logical type: " + logicalType); } } switch (primitive.getType()) { case NULL: return ValueWriters.nulls(); case BOOLEAN: return ValueWriters.booleans(); case INT: return ValueWriters.ints(); case LONG: return ValueWriters.longs(); case FLOAT: return ValueWriters.floats(); case DOUBLE: return ValueWriters.doubles(); case STRING: return ValueWriters.strings(); case FIXED: return ValueWriters.fixed(primitive.getFixedSize()); case BYTES: return ValueWriters.byteBuffers(); default: throw new IllegalArgumentException("Unsupported type: " + primitive); } } } }
2,206
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/avro/GenericReaders.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.netflix.iceberg.avro.ValueReader; import com.netflix.iceberg.avro.ValueReaders; import com.netflix.iceberg.data.GenericRecord; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.types.Types.StructType; import org.apache.avro.io.Decoder; import java.io.IOException; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.List; class GenericReaders { private GenericReaders() { } static ValueReader<LocalDate> dates() { return DateReader.INSTANCE; } static ValueReader<LocalTime> times() { return TimeReader.INSTANCE; } static ValueReader<LocalDateTime> timestamps() { return TimestampReader.INSTANCE; } static ValueReader<OffsetDateTime> timestamptz() { return TimestamptzReader.INSTANCE; } static ValueReader<Record> struct(StructType struct, List<ValueReader<?>> readers) { return new GenericRecordReader(readers, struct); } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); private static class DateReader implements ValueReader<LocalDate> { private static final DateReader INSTANCE = new DateReader(); private DateReader() { } @Override public LocalDate read(Decoder decoder, Object reuse) throws IOException { return EPOCH_DAY.plusDays(decoder.readInt()); } } private static class TimeReader implements ValueReader<LocalTime> { private static final TimeReader INSTANCE = new TimeReader(); private TimeReader() { } @Override public LocalTime read(Decoder decoder, Object reuse) throws IOException { return LocalTime.ofNanoOfDay(decoder.readLong() * 1000); } } private static class TimestampReader implements ValueReader<LocalDateTime> { private static final TimestampReader INSTANCE = new TimestampReader(); private TimestampReader() { } @Override public LocalDateTime read(Decoder decoder, Object reuse) throws IOException { return EPOCH.plus(decoder.readLong(), ChronoUnit.MICROS).toLocalDateTime(); } } private static class TimestamptzReader implements ValueReader<OffsetDateTime> { private static final TimestamptzReader INSTANCE = new TimestamptzReader(); private TimestamptzReader() { } @Override public OffsetDateTime read(Decoder decoder, Object reuse) throws IOException { return EPOCH.plus(decoder.readLong(), ChronoUnit.MICROS); } } private static class GenericRecordReader extends ValueReaders.StructReader<Record> { private final StructType struct; private GenericRecordReader(List<ValueReader<?>> readers, StructType struct) { super(readers); this.struct = struct; } @Override protected Record reuseOrCreate(Object reuse) { if (reuse instanceof Record) { return (Record) reuse; } else { return GenericRecord.create(struct); } } @Override protected Object get(Record struct, int pos) { return struct.get(pos); } @Override protected void set(Record struct, int pos, Object value) { struct.set(pos, value); } } }
2,207
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/avro/IcebergEncoder.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.avro; import com.google.common.primitives.Bytes; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.AvroSchemaUtil; import org.apache.avro.AvroRuntimeException; import org.apache.avro.SchemaNormalization; import org.apache.avro.io.BinaryEncoder; import org.apache.avro.io.DatumWriter; import org.apache.avro.io.EncoderFactory; import org.apache.avro.message.MessageEncoder; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; import java.nio.ByteBuffer; import java.security.NoSuchAlgorithmException; public class IcebergEncoder<D> implements MessageEncoder<D> { static final byte[] V1_HEADER = new byte[] {(byte) 0xC3, (byte) 0x01}; private static final ThreadLocal<BufferOutputStream> TEMP = ThreadLocal.withInitial(BufferOutputStream::new); private static final ThreadLocal<BinaryEncoder> ENCODER = new ThreadLocal<>(); private final byte[] headerBytes; private final boolean copyOutputBytes; private final DatumWriter<D> writer; /** * Creates a new {@link MessageEncoder} that will deconstruct datum instances described by the * {@link Schema schema}. * <p> * Buffers returned by {@code encode} are copied and will not be modified by future calls to * {@code encode}. * * @param schema the {@link Schema} for datum instances */ public IcebergEncoder(Schema schema) { this(schema, true); } /** * Creates a new {@link MessageEncoder} that will deconstruct datum instances described by the * {@link Schema schema}. * <p> * If {@code shouldCopy} is true, then buffers returned by {@code encode} are copied and will * not be modified by future calls to {@code encode}. * <p> * If {@code shouldCopy} is false, then buffers returned by {@code encode} wrap a thread-local * buffer that can be reused by future calls to {@code encode}, but may not be. Callers should * only set {@code shouldCopy} to false if the buffer will be copied before the current thread's * next call to {@code encode}. * * @param schema the {@link Schema} for datum instances * @param shouldCopy whether to copy buffers before returning encoded results */ public IcebergEncoder(Schema schema, boolean shouldCopy) { this.copyOutputBytes = shouldCopy; org.apache.avro.Schema avroSchema = AvroSchemaUtil.convert(schema, "table"); this.writer = DataWriter.create(avroSchema); this.headerBytes = getWriteHeader(avroSchema); } @Override public ByteBuffer encode(D datum) throws IOException { BufferOutputStream temp = TEMP.get(); temp.reset(); temp.write(headerBytes); encode(datum, temp); if (copyOutputBytes) { return temp.toBufferWithCopy(); } else { return temp.toBufferWithoutCopy(); } } @Override public void encode(D datum, OutputStream stream) throws IOException { BinaryEncoder encoder = EncoderFactory.get().directBinaryEncoder(stream, ENCODER.get()); ENCODER.set(encoder); writer.write(datum, encoder); encoder.flush(); } private static class BufferOutputStream extends ByteArrayOutputStream { BufferOutputStream() { } ByteBuffer toBufferWithoutCopy() { return ByteBuffer.wrap(buf, 0, count); } ByteBuffer toBufferWithCopy() { return ByteBuffer.wrap(toByteArray()); } } private static byte[] getWriteHeader(org.apache.avro.Schema schema) { try { byte[] fp = SchemaNormalization.parsingFingerprint("CRC-64-AVRO", schema); return Bytes.concat(V1_HEADER, fp); } catch (NoSuchAlgorithmException e) { throw new AvroRuntimeException(e); } } }
2,208
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/parquet/GenericParquetWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.parquet; import com.google.common.collect.Lists; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.parquet.ParquetTypeVisitor; import com.netflix.iceberg.parquet.ParquetValueWriter; import com.netflix.iceberg.parquet.ParquetValueWriters.PrimitiveWriter; import com.netflix.iceberg.parquet.ParquetValueWriters.StructWriter; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.DecimalMetadata; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.LocalTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.Iterator; import java.util.List; import static com.netflix.iceberg.parquet.ParquetValueWriters.byteBuffers; import static com.netflix.iceberg.parquet.ParquetValueWriters.collections; import static com.netflix.iceberg.parquet.ParquetValueWriters.decimalAsFixed; import static com.netflix.iceberg.parquet.ParquetValueWriters.decimalAsInteger; import static com.netflix.iceberg.parquet.ParquetValueWriters.decimalAsLong; import static com.netflix.iceberg.parquet.ParquetValueWriters.maps; import static com.netflix.iceberg.parquet.ParquetValueWriters.option; import static com.netflix.iceberg.parquet.ParquetValueWriters.strings; import static com.netflix.iceberg.parquet.ParquetValueWriters.unboxed; public class GenericParquetWriter { private GenericParquetWriter() { } @SuppressWarnings("unchecked") public static <T> ParquetValueWriter<T> buildWriter(MessageType type) { return (ParquetValueWriter<T>) ParquetTypeVisitor.visit(type, new WriteBuilder(type)); } private static class WriteBuilder extends ParquetTypeVisitor<ParquetValueWriter<?>> { private final MessageType type; WriteBuilder(MessageType type) { this.type = type; } @Override public ParquetValueWriter<?> message(MessageType message, List<ParquetValueWriter<?>> fieldWriters) { return struct(message.asGroupType(), fieldWriters); } @Override public ParquetValueWriter<?> struct(GroupType struct, List<ParquetValueWriter<?>> fieldWriters) { List<Type> fields = struct.getFields(); List<ParquetValueWriter<?>> writers = Lists.newArrayListWithExpectedSize(fieldWriters.size()); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = struct.getType(i); int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName())); writers.add(option(fieldType, fieldD, fieldWriters.get(i))); } return new RecordWriter(writers); } @Override public ParquetValueWriter<?> list(GroupType array, ParquetValueWriter<?> elementWriter) { GroupType repeated = array.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath); int repeatedR = type.getMaxRepetitionLevel(repeatedPath); org.apache.parquet.schema.Type elementType = repeated.getType(0); int elementD = type.getMaxDefinitionLevel(path(elementType.getName())); return collections(repeatedD, repeatedR, option(elementType, elementD, elementWriter)); } @Override public ParquetValueWriter<?> map(GroupType map, ParquetValueWriter<?> keyWriter, ParquetValueWriter<?> valueWriter) { GroupType repeatedKeyValue = map.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath); int repeatedR = type.getMaxRepetitionLevel(repeatedPath); org.apache.parquet.schema.Type keyType = repeatedKeyValue.getType(0); int keyD = type.getMaxDefinitionLevel(path(keyType.getName())); org.apache.parquet.schema.Type valueType = repeatedKeyValue.getType(1); int valueD = type.getMaxDefinitionLevel(path(valueType.getName())); return maps(repeatedD, repeatedR, option(keyType, keyD, keyWriter), option(valueType, valueD, valueWriter)); } @Override public ParquetValueWriter<?> primitive(PrimitiveType primitive) { ColumnDescriptor desc = type.getColumnDescription(currentPath()); if (primitive.getOriginalType() != null) { switch (primitive.getOriginalType()) { case ENUM: case JSON: case UTF8: return strings(desc); case INT_8: case INT_16: case INT_32: case INT_64: return unboxed(desc); case DATE: return new DateWriter(desc); case TIME_MICROS: return new TimeWriter(desc); case TIMESTAMP_MICROS: return new TimestamptzWriter(desc); case DECIMAL: DecimalMetadata decimal = primitive.getDecimalMetadata(); switch (primitive.getPrimitiveTypeName()) { case INT32: return decimalAsInteger(desc, decimal.getPrecision(), decimal.getScale()); case INT64: return decimalAsLong(desc, decimal.getPrecision(), decimal.getScale()); case BINARY: case FIXED_LEN_BYTE_ARRAY: return decimalAsFixed(desc, decimal.getPrecision(), decimal.getScale()); default: throw new UnsupportedOperationException( "Unsupported base type for decimal: " + primitive.getPrimitiveTypeName()); } case BSON: return byteBuffers(desc); default: throw new UnsupportedOperationException( "Unsupported logical type: " + primitive.getOriginalType()); } } switch (primitive.getPrimitiveTypeName()) { case FIXED_LEN_BYTE_ARRAY: return new FixedWriter(desc); case BINARY: return byteBuffers(desc); case BOOLEAN: case INT32: case INT64: case FLOAT: case DOUBLE: return unboxed(desc); default: throw new UnsupportedOperationException("Unsupported type: " + primitive); } } private String[] currentPath() { String[] path = new String[fieldNames.size()]; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } private String[] path(String name) { String[] path = new String[fieldNames.size() + 1]; path[fieldNames.size()] = name; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); private static class DateWriter extends PrimitiveWriter<LocalDate> { private DateWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, LocalDate value) { column.writeInteger(repetitionLevel, (int) ChronoUnit.DAYS.between(EPOCH_DAY, value)); } } private static class TimeWriter extends PrimitiveWriter<LocalTime> { private TimeWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, LocalTime value) { column.writeLong(repetitionLevel, value.toNanoOfDay() / 1000); } } private static class TimestampWriter extends PrimitiveWriter<LocalDateTime> { private TimestampWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, LocalDateTime value) { column.writeLong(repetitionLevel, ChronoUnit.MICROS.between(EPOCH, value.atOffset(ZoneOffset.UTC))); } } private static class TimestamptzWriter extends PrimitiveWriter<OffsetDateTime> { private TimestamptzWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, OffsetDateTime value) { column.writeLong(repetitionLevel, ChronoUnit.MICROS.between(EPOCH, value)); } } private static class FixedWriter extends PrimitiveWriter<byte[]> { private FixedWriter(ColumnDescriptor desc) { super(desc); } @Override public void write(int repetitionLevel, byte[] value) { column.writeBinary(repetitionLevel, Binary.fromReusedByteArray(value)); } } private static class RecordWriter extends StructWriter<Record> { private RecordWriter(List<ParquetValueWriter<?>> writers) { super(writers); } @Override protected Object get(Record struct, int index) { return struct.get(index); } } }
2,209
0
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data
Create_ds/iceberg/data/src/main/java/com/netflix/iceberg/data/parquet/GenericParquetReaders.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.data.parquet; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import com.netflix.iceberg.data.GenericRecord; import com.netflix.iceberg.data.Record; import com.netflix.iceberg.parquet.ParquetValueReader; import com.netflix.iceberg.parquet.ParquetValueReaders; import com.netflix.iceberg.parquet.ParquetValueReaders.BinaryAsDecimalReader; import com.netflix.iceberg.parquet.ParquetValueReaders.BytesReader; import com.netflix.iceberg.parquet.ParquetValueReaders.IntAsLongReader; import com.netflix.iceberg.parquet.ParquetValueReaders.IntegerAsDecimalReader; import com.netflix.iceberg.parquet.ParquetValueReaders.ListReader; import com.netflix.iceberg.parquet.ParquetValueReaders.LongAsDecimalReader; import com.netflix.iceberg.parquet.ParquetValueReaders.MapReader; import com.netflix.iceberg.parquet.ParquetValueReaders.PrimitiveReader; import com.netflix.iceberg.parquet.ParquetValueReaders.StringReader; import com.netflix.iceberg.parquet.ParquetValueReaders.StructReader; import com.netflix.iceberg.parquet.ParquetValueReaders.UnboxedReader; import com.netflix.iceberg.parquet.TypeWithSchemaVisitor; import com.netflix.iceberg.types.Type.TypeID; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.StructType; import com.netflix.iceberg.types.Types.TimestampType; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.schema.DecimalMetadata; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.Iterator; import java.util.List; import java.util.Map; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.hasIds; import static com.netflix.iceberg.parquet.ParquetValueReaders.option; public class GenericParquetReaders { private GenericParquetReaders() { } @SuppressWarnings("unchecked") public static ParquetValueReader<GenericRecord> buildReader(Schema expectedSchema, MessageType fileSchema) { if (hasIds(fileSchema)) { return (ParquetValueReader<GenericRecord>) TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema, new ReadBuilder(fileSchema)); } else { return (ParquetValueReader<GenericRecord>) TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema, new FallbackReadBuilder(fileSchema)); } } private static class FallbackReadBuilder extends ReadBuilder { FallbackReadBuilder(MessageType type) { super(type); } @Override public ParquetValueReader<?> message(StructType expected, MessageType message, List<ParquetValueReader<?>> fieldReaders) { // the top level matches by ID, but the remaining IDs are missing return super.struct(expected, message, fieldReaders); } @Override public ParquetValueReader<?> struct(StructType expected, GroupType struct, List<ParquetValueReader<?>> fieldReaders) { // the expected struct is ignored because nested fields are never found when the List<ParquetValueReader<?>> newFields = Lists.newArrayListWithExpectedSize( fieldReaders.size()); List<Type> types = Lists.newArrayListWithExpectedSize(fieldReaders.size()); List<Type> fields = struct.getFields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i); int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName()))-1; newFields.add(option(fieldType, fieldD, fieldReaders.get(i))); types.add(fieldType); } return new RecordReader(types, newFields, expected); } } private static class ReadBuilder extends TypeWithSchemaVisitor<ParquetValueReader<?>> { final MessageType type; ReadBuilder(MessageType type) { this.type = type; } @Override public ParquetValueReader<?> message(StructType expected, MessageType message, List<ParquetValueReader<?>> fieldReaders) { return struct(expected, message.asGroupType(), fieldReaders); } @Override public ParquetValueReader<?> struct(StructType expected, GroupType struct, List<ParquetValueReader<?>> fieldReaders) { // match the expected struct's order Map<Integer, ParquetValueReader<?>> readersById = Maps.newHashMap(); Map<Integer, Type> typesById = Maps.newHashMap(); List<Type> fields = struct.getFields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i); int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName()))-1; int id = fieldType.getId().intValue(); readersById.put(id, option(fieldType, fieldD, fieldReaders.get(i))); typesById.put(id, fieldType); } List<Types.NestedField> expectedFields = expected != null ? expected.fields() : ImmutableList.of(); List<ParquetValueReader<?>> reorderedFields = Lists.newArrayListWithExpectedSize( expectedFields.size()); List<Type> types = Lists.newArrayListWithExpectedSize(expectedFields.size()); for (Types.NestedField field : expectedFields) { int id = field.fieldId(); ParquetValueReader<?> reader = readersById.get(id); if (reader != null) { reorderedFields.add(reader); types.add(typesById.get(id)); } else { reorderedFields.add(ParquetValueReaders.nulls()); types.add(null); } } return new RecordReader(types, reorderedFields, expected); } @Override public ParquetValueReader<?> list(Types.ListType expectedList, GroupType array, ParquetValueReader<?> elementReader) { GroupType repeated = array.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath)-1; int repeatedR = type.getMaxRepetitionLevel(repeatedPath)-1; Type elementType = repeated.getType(0); int elementD = type.getMaxDefinitionLevel(path(elementType.getName()))-1; return new ListReader<>(repeatedD, repeatedR, option(elementType, elementD, elementReader)); } @Override public ParquetValueReader<?> map(Types.MapType expectedMap, GroupType map, ParquetValueReader<?> keyReader, ParquetValueReader<?> valueReader) { GroupType repeatedKeyValue = map.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath)-1; int repeatedR = type.getMaxRepetitionLevel(repeatedPath)-1; Type keyType = repeatedKeyValue.getType(0); int keyD = type.getMaxDefinitionLevel(path(keyType.getName()))-1; Type valueType = repeatedKeyValue.getType(1); int valueD = type.getMaxDefinitionLevel(path(valueType.getName()))-1; return new MapReader<>(repeatedD, repeatedR, option(keyType, keyD, keyReader), option(valueType, valueD, valueReader)); } @Override public ParquetValueReader<?> primitive(com.netflix.iceberg.types.Type.PrimitiveType expected, PrimitiveType primitive) { ColumnDescriptor desc = type.getColumnDescription(currentPath()); if (primitive.getOriginalType() != null) { switch (primitive.getOriginalType()) { case ENUM: case JSON: case UTF8: return new StringReader(desc); case INT_8: case INT_16: case INT_32: if (expected.typeId() == TypeID.LONG) { return new IntAsLongReader(desc); } else { return new UnboxedReader<>(desc); } case INT_64: return new UnboxedReader<>(desc); case DATE: return new DateReader(desc); case TIMESTAMP_MICROS: TimestampType tsMicrosType = (TimestampType) expected; if (tsMicrosType.shouldAdjustToUTC()) { return new TimestamptzReader(desc); } else { return new TimestampReader(desc); } case TIMESTAMP_MILLIS: TimestampType tsMillisType = (TimestampType) expected; if (tsMillisType.shouldAdjustToUTC()) { return new TimestamptzMillisReader(desc); } else { return new TimestampMillisReader(desc); } case DECIMAL: DecimalMetadata decimal = primitive.getDecimalMetadata(); switch (primitive.getPrimitiveTypeName()) { case BINARY: case FIXED_LEN_BYTE_ARRAY: return new BinaryAsDecimalReader(desc, decimal.getScale()); case INT64: return new LongAsDecimalReader(desc, decimal.getScale()); case INT32: return new IntegerAsDecimalReader(desc, decimal.getScale()); default: throw new UnsupportedOperationException( "Unsupported base type for decimal: " + primitive.getPrimitiveTypeName()); } case BSON: return new BytesReader(desc); default: throw new UnsupportedOperationException( "Unsupported logical type: " + primitive.getOriginalType()); } } switch (primitive.getPrimitiveTypeName()) { case FIXED_LEN_BYTE_ARRAY: return new FixedReader(desc); case BINARY: return new BytesReader(desc); case INT32: if (expected != null && expected.typeId() == TypeID.LONG) { return new IntAsLongReader(desc); } else { return new UnboxedReader<>(desc); } case FLOAT: if (expected != null && expected.typeId() == TypeID.DOUBLE) { return new ParquetValueReaders.FloatAsDoubleReader(desc); } else { return new UnboxedReader<>(desc); } case BOOLEAN: case INT64: case DOUBLE: return new UnboxedReader<>(desc); default: throw new UnsupportedOperationException("Unsupported type: " + primitive); } } private String[] currentPath() { String[] path = new String[fieldNames.size()]; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } protected String[] path(String name) { String[] path = new String[fieldNames.size() + 1]; path[fieldNames.size()] = name; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); private static class DateReader extends PrimitiveReader<LocalDate> { private DateReader(ColumnDescriptor desc) { super(desc); } @Override public LocalDate read(LocalDate reuse) { return EPOCH_DAY.plusDays(column.nextInteger()); } } private static class TimestampReader extends PrimitiveReader<LocalDateTime> { private TimestampReader(ColumnDescriptor desc) { super(desc); } @Override public LocalDateTime read(LocalDateTime reuse) { return EPOCH.plus(column.nextLong(), ChronoUnit.MICROS).toLocalDateTime(); } } private static class TimestampMillisReader extends PrimitiveReader<LocalDateTime> { private TimestampMillisReader(ColumnDescriptor desc) { super(desc); } @Override public LocalDateTime read(LocalDateTime reuse) { return EPOCH.plus(column.nextLong() * 1000, ChronoUnit.MICROS).toLocalDateTime(); } } private static class TimestamptzReader extends PrimitiveReader<OffsetDateTime> { private TimestamptzReader(ColumnDescriptor desc) { super(desc); } @Override public OffsetDateTime read(OffsetDateTime reuse) { return EPOCH.plus(column.nextLong(), ChronoUnit.MICROS); } } private static class TimestamptzMillisReader extends PrimitiveReader<OffsetDateTime> { private TimestamptzMillisReader(ColumnDescriptor desc) { super(desc); } @Override public OffsetDateTime read(OffsetDateTime reuse) { return EPOCH.plus(column.nextLong() * 1000, ChronoUnit.MICROS); } } private static class FixedReader extends PrimitiveReader<byte[]> { private FixedReader(ColumnDescriptor desc) { super(desc); } @Override public byte[] read(byte[] reuse) { if (reuse != null) { column.nextBinary().toByteBuffer().duplicate().get(reuse); return reuse; } else { return column.nextBinary().getBytes(); } } } static class RecordReader extends StructReader<Record, Record> { private final StructType struct; RecordReader(List<Type> types, List<ParquetValueReader<?>> readers, StructType struct) { super(types, readers); this.struct = struct; } @Override protected Record newStructData(Record reuse) { if (reuse != null) { return reuse; } else { return GenericRecord.create(struct); } } @Override @SuppressWarnings("unchecked") protected Object getField(Record intermediate, int pos) { return intermediate.get(pos); } @Override protected Record buildStruct(Record struct) { return struct; } @Override protected void set(Record struct, int pos, Object value) { struct.set(pos, value); } } }
2,210
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestIcebergSource.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.netflix.iceberg.Table; import org.apache.spark.sql.sources.v2.DataSourceOptions; public class TestIcebergSource extends IcebergSource { @Override public String shortName() { return "iceberg-test"; } @Override protected Table findTable(DataSourceOptions options) { return TestTables.load(options.get("iceberg.table.name").get()); } }
2,211
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestSparkReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Fixed; import org.apache.avro.generic.GenericData.Record; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.UUID; import static com.netflix.iceberg.Files.localOutput; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; import static org.apache.avro.Schema.Type.NULL; import static org.apache.avro.Schema.Type.UNION; @RunWith(Parameterized.class) public class TestSparkReadProjection extends TestReadProjection { private static SparkSession spark = null; @Parameterized.Parameters public static Object[][] parameters() { return new Object[][] { new Object[] { "parquet" }, new Object[] { "avro" } }; } public TestSparkReadProjection(String format) { super(format); } @BeforeClass public static void startSpark() { TestSparkReadProjection.spark = SparkSession.builder().master("local[2]").getOrCreate(); } @AfterClass public static void stopSpark() { SparkSession spark = TestSparkReadProjection.spark; TestSparkReadProjection.spark = null; spark.stop(); } @Override protected Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException { File parent = temp.newFolder(desc); File location = new File(parent, "test"); File dataFolder = new File(location, "data"); Assert.assertTrue("mkdirs should succeed", dataFolder.mkdirs()); FileFormat fileFormat = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); File testFile = new File(dataFolder, fileFormat.addExtension(UUID.randomUUID().toString())); Table table = TestTables.create(location, desc, writeSchema, PartitionSpec.unpartitioned()); try { // Important: use the table's schema for the rest of the test // When tables are created, the column ids are reassigned. Schema tableSchema = table.schema(); switch (fileFormat) { case AVRO: try (FileAppender<Record> writer = Avro.write(localOutput(testFile)) .schema(tableSchema) .build()) { writer.add(record); } break; case PARQUET: try (FileAppender<Record> writer = Parquet.write(localOutput(testFile)) .schema(tableSchema) .build()) { writer.add(record); } break; } DataFile file = DataFiles.builder(PartitionSpec.unpartitioned()) .withRecordCount(100) .withFileSizeInBytes(testFile.length()) .withPath(testFile.toString()) .build(); table.newAppend().appendFile(file).commit(); // rewrite the read schema for the table's reassigned ids Map<Integer, Integer> idMapping = Maps.newHashMap(); for (int id : allIds(writeSchema)) { // translate each id to the original schema's column name, then to the new schema's id String originalName = writeSchema.findColumnName(id); idMapping.put(id, tableSchema.findField(originalName).fieldId()); } Schema expectedSchema = reassignIds(readSchema, idMapping); // Set the schema to the expected schema directly to simulate the table schema evolving TestTables.replaceMetadata(desc, TestTables.readMetadata(desc).updateSchema(expectedSchema, 100)); Dataset<Row> df = spark.read() .format("com.netflix.iceberg.spark.source.TestIcebergSource") .option("iceberg.table.name", desc) .load(); // convert to Avro using the read schema so that the record schemas match return convert(AvroSchemaUtil.convert(readSchema, "table"), df.collectAsList().get(0)); } finally { TestTables.clearTables(); } } @SuppressWarnings("unchecked") private Object convert(org.apache.avro.Schema schema, Object object) { switch (schema.getType()) { case RECORD: return convert(schema, (Row) object); case ARRAY: List<Object> convertedList = Lists.newArrayList(); List<?> list = (List<?>) object; for (Object element : list) { convertedList.add(convert(schema.getElementType(), element)); } return convertedList; case MAP: Map<String, Object> convertedMap = Maps.newLinkedHashMap(); Map<String, ?> map = (Map<String, ?>) object; for (Map.Entry<String, ?> entry : map.entrySet()) { convertedMap.put(entry.getKey(), convert(schema.getValueType(), entry.getValue())); } return convertedMap; case UNION: if (object == null) { return null; } List<org.apache.avro.Schema> types = schema.getTypes(); if (types.get(0).getType() != NULL) { return convert(types.get(0), object); } else { return convert(types.get(1), object); } case FIXED: Fixed convertedFixed = new Fixed(schema); convertedFixed.bytes((byte[]) object); return convertedFixed; case BYTES: return ByteBuffer.wrap((byte[]) object); case BOOLEAN: case INT: case LONG: case FLOAT: case DOUBLE: case STRING: return object; case NULL: return null; default: throw new UnsupportedOperationException("Not a supported type: " + schema); } } private Record convert(org.apache.avro.Schema schema, Row row) { if (schema.getType() == UNION) { if (schema.getTypes().get(0).getType() != NULL) { schema = schema.getTypes().get(0); } else { schema = schema.getTypes().get(1); } } Record record = new Record(schema); List<org.apache.avro.Schema.Field> fields = schema.getFields(); for (int i = 0; i < fields.size(); i += 1) { org.apache.avro.Schema.Field field = fields.get(i); org.apache.avro.Schema fieldSchema = field.schema(); if (fieldSchema.getType() == UNION) { if (fieldSchema.getTypes().get(0).getType() != NULL) { fieldSchema = fieldSchema.getTypes().get(0); } else { fieldSchema = fieldSchema.getTypes().get(1); } } switch (fieldSchema.getType()) { case RECORD: record.put(i, convert(field.schema(), row.getStruct(i))); break; case ARRAY: record.put(i, convert(field.schema(), row.getList(i))); break; case MAP: record.put(i, convert(field.schema(), row.getJavaMap(i))); break; default: record.put(i, convert(field.schema(), row.get(i))); } } return record; } private List<Integer> allIds(Schema schema) { List<Integer> ids = Lists.newArrayList(); TypeUtil.visit(schema, new TypeUtil.SchemaVisitor<Void>() { @Override public Void field(Types.NestedField field, Void fieldResult) { ids.add(field.fieldId()); return null; } @Override public Void list(Types.ListType list, Void elementResult) { ids.add(list.elementId()); return null; } @Override public Void map(Types.MapType map, Void keyResult, Void valueResult) { ids.add(map.keyId()); ids.add(map.valueId()); return null; } }); return ids; } private Schema reassignIds(Schema schema, Map<Integer, Integer> idMapping) { return new Schema(TypeUtil.visit(schema, new TypeUtil.SchemaVisitor<Type>() { private int map(int id) { if (idMapping.containsKey(id)) { return idMapping.get(id); } return 1000 + id; // make sure the new IDs don't conflict with reassignment } @Override public Type schema(Schema schema, Type structResult) { return structResult; } @Override public Type struct(Types.StructType struct, List<Type> fieldResults) { List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(fieldResults.size()); List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { Types.NestedField field = fields.get(i); if (field.isOptional()) { newFields.add(optional(map(field.fieldId()), field.name(), fieldResults.get(i))); } else { newFields.add(required(map(field.fieldId()), field.name(), fieldResults.get(i))); } } return Types.StructType.of(newFields); } @Override public Type field(Types.NestedField field, Type fieldResult) { return fieldResult; } @Override public Type list(Types.ListType list, Type elementResult) { if (list.isElementOptional()) { return Types.ListType.ofOptional(map(list.elementId()), elementResult); } else { return Types.ListType.ofRequired(map(list.elementId()), elementResult); } } @Override public Type map(Types.MapType map, Type keyResult, Type valueResult) { if (map.isValueOptional()) { return Types.MapType.ofOptional( map(map.keyId()), map(map.valueId()), keyResult, valueResult); } else { return Types.MapType.ofRequired( map(map.keyId()), map(map.valueId()), keyResult, valueResult); } } @Override public Type primitive(Type.PrimitiveType primitive) { return primitive; } }).asNestedType().asStructType().fields()); } }
2,212
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestOrcScan.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.orc.ORC; import com.netflix.iceberg.orc.OrcFileAppender; import com.netflix.iceberg.spark.data.AvroDataTest; import com.netflix.iceberg.spark.data.RandomData; import com.netflix.iceberg.spark.data.SparkOrcWriter; import com.netflix.iceberg.spark.data.TestHelpers; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.serde2.io.TimestampWritable; import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch; import org.apache.orc.storage.serde2.io.DateWritable; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.SpecializedGetters; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.DateTimeUtils; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.sql.Date; import java.sql.Timestamp; import java.util.Iterator; import java.util.List; import java.util.UUID; import static com.netflix.iceberg.Files.localOutput; public class TestOrcScan extends AvroDataTest { private static final Configuration CONF = new Configuration(); @Rule public TemporaryFolder temp = new TemporaryFolder(); private static SparkSession spark = null; @BeforeClass public static void startSpark() { TestOrcScan.spark = SparkSession.builder().master("local[2]").getOrCreate(); } @AfterClass public static void stopSpark() { SparkSession spark = TestOrcScan.spark; TestOrcScan.spark = null; spark.stop(); } @Override protected void writeAndValidate(Schema schema) throws IOException { System.out.println("Starting ORC test with " + schema); final int ROW_COUNT = 100; final long SEED = 1; File parent = temp.newFolder("orc"); File location = new File(parent, "test"); File dataFolder = new File(location, "data"); dataFolder.mkdirs(); File orcFile = new File(dataFolder, FileFormat.ORC.addExtension(UUID.randomUUID().toString())); HadoopTables tables = new HadoopTables(CONF); Table table = tables.create(schema, PartitionSpec.unpartitioned(), location.toString()); // Important: use the table's schema for the rest of the test // When tables are created, the column ids are reassigned. Schema tableSchema = table.schema(); Metrics metrics; SparkOrcWriter writer = new SparkOrcWriter(ORC.write(localOutput(orcFile)) .schema(tableSchema) .build()); try { writer.addAll(RandomData.generateSpark(tableSchema, ROW_COUNT, SEED)); } finally { writer.close(); // close writes the last batch, so metrics are not correct until after close is called metrics = writer.metrics(); } DataFile file = DataFiles.builder(PartitionSpec.unpartitioned()) .withFileSizeInBytes(orcFile.length()) .withPath(orcFile.toString()) .withMetrics(metrics) .build(); table.newAppend().appendFile(file).commit(); Dataset<Row> df = spark.read() .format("iceberg") .load(location.toString()); List<Row> rows = df.collectAsList(); Assert.assertEquals("Wrong number of rows", ROW_COUNT, rows.size()); Iterator<InternalRow> expected = RandomData.generateSpark(tableSchema, ROW_COUNT, SEED); for(int i=0; i < ROW_COUNT; ++i) { TestHelpers.assertEquals("row " + i, schema.asStruct(), expected.next(), rows.get(i)); } } }
2,213
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/SimpleRecord.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.base.Objects; public class SimpleRecord { private Integer id; private String data; public SimpleRecord() { } SimpleRecord(Integer id, String data) { this.id = id; this.data = data; } public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public String getData() { return data; } public void setData(String data) { this.data = data; } @Override public boolean equals(Object o) { if (this == o){ return true; } if (o == null || getClass() != o.getClass()){ return false; } SimpleRecord record = (SimpleRecord) o; return Objects.equal(id, record.id) && Objects.equal(data, record.data); } @Override public int hashCode() { return Objects.hashCode(id, data); } @Override public String toString() { StringBuilder buffer = new StringBuilder(); buffer.append("{\"id\"="); buffer.append(id); buffer.append(",\"data\"=\""); buffer.append(data); buffer.append("\"}"); return buffer.toString(); } }
2,214
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestDataFrameWrites.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.Lists; import com.netflix.iceberg.Files; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.TableProperties; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.avro.AvroIterable; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.spark.data.AvroDataTest; import com.netflix.iceberg.spark.data.RandomData; import com.netflix.iceberg.spark.data.SparkAvroReader; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Record; import org.apache.hadoop.conf.Configuration; import org.apache.spark.api.java.JavaRDD; import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.sql.DataFrameWriter; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.catalyst.InternalRow; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; import java.net.URI; import java.util.List; import static com.netflix.iceberg.spark.SparkSchemaUtil.convert; import static com.netflix.iceberg.spark.data.TestHelpers.assertEqualsSafe; import static com.netflix.iceberg.spark.data.TestHelpers.assertEqualsUnsafe; @RunWith(Parameterized.class) public class TestDataFrameWrites extends AvroDataTest { private static final Configuration CONF = new Configuration(); private final String format; @Parameterized.Parameters public static Object[][] parameters() { return new Object[][] { new Object[] { "parquet" }, new Object[] { "orc" }, new Object[] { "avro" } }; } public TestDataFrameWrites(String format) { this.format = format; } private static SparkSession spark = null; private static JavaSparkContext sc = null; @BeforeClass public static void startSpark() { TestDataFrameWrites.spark = SparkSession.builder().master("local[2]").getOrCreate(); TestDataFrameWrites.sc = new JavaSparkContext(spark.sparkContext()); } @AfterClass public static void stopSpark() { SparkSession spark = TestDataFrameWrites.spark; TestDataFrameWrites.spark = null; TestDataFrameWrites.sc = null; spark.stop(); } @Override protected void writeAndValidate(Schema schema) throws IOException { File location = createTableFolder(); Table table = createTable(schema, location); writeAndValidateWithLocations(table, location, new File(location, "data")); } @Test public void testWriteWithCustomDataLocation() throws IOException { File location = createTableFolder(); File tablePropertyDataLocation = temp.newFolder("test-table-property-data-dir"); Table table = createTable(new Schema(SUPPORTED_PRIMITIVES.fields()), location); table.updateProperties().set( TableProperties.WRITE_NEW_DATA_LOCATION, tablePropertyDataLocation.getAbsolutePath()).commit(); writeAndValidateWithLocations(table, location, tablePropertyDataLocation); } private File createTableFolder() throws IOException { File parent = temp.newFolder("parquet"); File location = new File(parent, "test"); Assert.assertTrue("Mkdir should succeed", location.mkdirs()); return location; } private Table createTable(Schema schema, File location) { HadoopTables tables = new HadoopTables(CONF); return tables.create(schema, PartitionSpec.unpartitioned(), location.toString()); } private void writeAndValidateWithLocations(Table table, File location, File expectedDataDir) throws IOException { Schema tableSchema = table.schema(); // use the table schema because ids are reassigned table.updateProperties().set(TableProperties.DEFAULT_FILE_FORMAT, format).commit(); List<Record> expected = RandomData.generateList(tableSchema, 100, 0L); Dataset<Row> df = createDataset(expected, tableSchema); DataFrameWriter<?> writer = df.write().format("iceberg").mode("append"); writer.save(location.toString()); table.refresh(); Dataset<Row> result = spark.read() .format("iceberg") .load(location.toString()); List<Row> actual = result.collectAsList(); Assert.assertEquals("Result size should match expected", expected.size(), actual.size()); for (int i = 0; i < expected.size(); i += 1) { assertEqualsSafe(tableSchema.asStruct(), expected.get(i), actual.get(i)); } table.currentSnapshot().addedFiles().forEach(dataFile -> Assert.assertTrue( String.format( "File should have the parent directory %s, but has: %s.", expectedDataDir.getAbsolutePath(), dataFile.path()), URI.create(dataFile.path().toString()).getPath().startsWith(expectedDataDir.getAbsolutePath()))); } private Dataset<Row> createDataset(List<Record> records, Schema schema) throws IOException { // this uses the SparkAvroReader to create a DataFrame from the list of records // it assumes that SparkAvroReader is correct File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> writer = Avro.write(Files.localOutput(testFile)) .schema(schema) .named("test") .build()) { for (Record rec : records) { writer.add(rec); } } List<InternalRow> rows; try (AvroIterable<InternalRow> reader = Avro.read(Files.localInput(testFile)) .createReaderFunc(SparkAvroReader::new) .project(schema) .build()) { rows = Lists.newArrayList(reader); } // make sure the dataframe matches the records before moving on for (int i = 0; i < records.size(); i += 1) { assertEqualsUnsafe(schema.asStruct(), records.get(i), rows.get(i)); } JavaRDD<InternalRow> rdd = sc.parallelize(rows); return spark.internalCreateDataFrame(JavaRDD.toRDD(rdd), convert(schema), false); } }
2,215
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestFilteredScan.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.spark.SparkExpressions; import com.netflix.iceberg.spark.data.TestHelpers; import com.netflix.iceberg.transforms.Transform; import com.netflix.iceberg.transforms.Transforms; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Record; import org.apache.hadoop.conf.Configuration; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.api.java.UDF1; import org.apache.spark.sql.catalyst.expressions.Expression; import org.apache.spark.sql.catalyst.expressions.UnsafeRow; import org.apache.spark.sql.sources.v2.DataSourceOptions; import org.apache.spark.sql.sources.v2.reader.DataSourceReader; import org.apache.spark.sql.sources.v2.reader.DataReaderFactory; import org.apache.spark.sql.sources.v2.reader.SupportsPushDownCatalystFilters; import org.apache.spark.sql.sources.v2.reader.SupportsScanUnsafeRow; import org.apache.spark.sql.types.DateType$; import org.apache.spark.sql.types.IntegerType$; import org.apache.spark.sql.types.StringType$; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import java.io.File; import java.io.IOException; import java.sql.Timestamp; import java.util.List; import java.util.Locale; import java.util.UUID; import static com.netflix.iceberg.Files.localOutput; import static org.apache.spark.sql.catalyst.util.DateTimeUtils.fromJavaTimestamp; import static org.apache.spark.sql.functions.callUDF; import static org.apache.spark.sql.functions.col; import static org.apache.spark.sql.functions.column; import static org.apache.spark.sql.functions.lit; import static org.apache.spark.sql.functions.to_date; @RunWith(Parameterized.class) public class TestFilteredScan { private static final Configuration CONF = new Configuration(); private static final HadoopTables TABLES = new HadoopTables(CONF); private static final Schema SCHEMA = new Schema( Types.NestedField.required(1, "id", Types.LongType.get()), Types.NestedField.optional(2, "ts", Types.TimestampType.withZone()), Types.NestedField.optional(3, "data", Types.StringType.get()) ); private static final PartitionSpec BUCKET_BY_ID = PartitionSpec.builderFor(SCHEMA) .bucket("id", 4) .build(); private static final PartitionSpec PARTITION_BY_DAY = PartitionSpec.builderFor(SCHEMA) .day("ts") .build(); private static final PartitionSpec PARTITION_BY_HOUR = PartitionSpec.builderFor(SCHEMA) .hour("ts") .build(); private static final PartitionSpec PARTITION_BY_FIRST_LETTER = PartitionSpec.builderFor(SCHEMA) .truncate("data", 1) .build(); private static SparkSession spark = null; @BeforeClass public static void startSpark() { TestFilteredScan.spark = SparkSession.builder().master("local[2]").getOrCreate(); // define UDFs used by partition tests Transform<Long, Integer> bucket4 = Transforms.bucket(Types.LongType.get(), 4); spark.udf().register("bucket4", (UDF1<Long, Integer>) bucket4::apply, IntegerType$.MODULE$); Transform<Long, Integer> day = Transforms.day(Types.TimestampType.withZone()); spark.udf().register("ts_day", (UDF1<Timestamp, Integer>) timestamp -> day.apply(fromJavaTimestamp(timestamp)), IntegerType$.MODULE$); Transform<Long, Integer> hour = Transforms.hour(Types.TimestampType.withZone()); spark.udf().register("ts_hour", (UDF1<Timestamp, Integer>) timestamp -> hour.apply(fromJavaTimestamp(timestamp)), IntegerType$.MODULE$); Transform<CharSequence, CharSequence> trunc1 = Transforms.truncate(Types.StringType.get(), 1); spark.udf().register("trunc1", (UDF1<CharSequence, CharSequence>) str -> trunc1.apply(str.toString()), StringType$.MODULE$); } @AfterClass public static void stopSpark() { SparkSession spark = TestFilteredScan.spark; TestFilteredScan.spark = null; spark.stop(); } @Rule public TemporaryFolder temp = new TemporaryFolder(); private final String format; @Parameterized.Parameters public static Object[][] parameters() { return new Object[][] { new Object[] { "parquet" }, new Object[] { "avro" } }; } public TestFilteredScan(String format) { this.format = format; } private File parent = null; private File unpartitioned = null; private List<Record> records = null; @Before public void writeUnpartitionedTable() throws IOException { this.parent = temp.newFolder("TestFilteredScan"); this.unpartitioned = new File(parent, "unpartitioned"); File dataFolder = new File(unpartitioned, "data"); Assert.assertTrue("Mkdir should succeed", dataFolder.mkdirs()); Table table = TABLES.create(SCHEMA, PartitionSpec.unpartitioned(), unpartitioned.toString()); Schema tableSchema = table.schema(); // use the table schema because ids are reassigned FileFormat fileFormat = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH)); File testFile = new File(dataFolder, fileFormat.addExtension(UUID.randomUUID().toString())); // create records using the table's schema org.apache.avro.Schema avroSchema = AvroSchemaUtil.convert(tableSchema, "test"); this.records = testRecords(avroSchema); switch (fileFormat) { case AVRO: try (FileAppender<Record> writer = Avro.write(localOutput(testFile)) .schema(tableSchema) .build()) { writer.addAll(records); } break; case PARQUET: try (FileAppender<Record> writer = Parquet.write(localOutput(testFile)) .schema(tableSchema) .build()) { writer.addAll(records); } break; } DataFile file = DataFiles.builder(PartitionSpec.unpartitioned()) .withRecordCount(records.size()) .withFileSizeInBytes(testFile.length()) .withPath(testFile.toString()) .build(); table.newAppend().appendFile(file).commit(); } @Test public void testUnpartitionedIDFilters() { DataSourceOptions options = new DataSourceOptions(ImmutableMap.of( "path", unpartitioned.toString()) ); IcebergSource source = new IcebergSource(); for (int i = 0; i < 10; i += 1) { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.equal("id", i)); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should only create one task for a small file", 1, tasks.size()); // validate row filtering assertEqualsSafe(SCHEMA.asStruct(), expected(i), read(unpartitioned.toString(), "id = " + i)); } } @Test public void testUnpartitionedTimestampFilter() { DataSourceOptions options = new DataSourceOptions(ImmutableMap.of( "path", unpartitioned.toString()) ); IcebergSource source = new IcebergSource(); DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.lessThan("ts", "2017-12-22T00:00:00+00:00")); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should only create one task for a small file", 1, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(5,6,7,8,9), read(unpartitioned.toString(), "ts < cast('2017-12-22 00:00:00+00:00' as timestamp)")); } @Test public void testBucketPartitionedIDFilters() { File location = buildPartitionedTable("bucketed_by_id", BUCKET_BY_ID, "bucket4", "id"); DataSourceOptions options = new DataSourceOptions(ImmutableMap.of( "path", location.toString()) ); IcebergSource source = new IcebergSource(); DataSourceReader unfiltered = source.createReader(options); Assert.assertEquals("Unfiltered table should created 4 read tasks", 4, planTasks(unfiltered).size()); for (int i = 0; i < 10; i += 1) { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.equal("id", i)); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); // validate predicate push-down Assert.assertEquals("Should create one task for a single bucket", 1, tasks.size()); // validate row filtering assertEqualsSafe(SCHEMA.asStruct(), expected(i), read(location.toString(), "id = " + i)); } } @Test public void testDayPartitionedTimestampFilters() { File location = buildPartitionedTable("partitioned_by_day", PARTITION_BY_DAY, "ts_day", "ts"); DataSourceOptions options = new DataSourceOptions(ImmutableMap.of( "path", location.toString()) ); int day = Literal.of("2017-12-21").<Integer>to(Types.DateType.get()).value(); IcebergSource source = new IcebergSource(); DataSourceReader unfiltered = source.createReader(options); Assert.assertEquals("Unfiltered table should created 2 read tasks", 2, planTasks(unfiltered).size()); { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.lessThan("ts", "2017-12-22T00:00:00+00:00")); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create one task for 2017-12-21", 1, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(5, 6, 7, 8, 9), read(location.toString(), "ts < cast('2017-12-22 00:00:00+00:00' as timestamp)")); } { DataSourceReader reader = source.createReader(options); pushFilters(reader, col("ts").cast(DateType$.MODULE$).$eq$eq$eq(lit(day)).expr()); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create one task for 2017-12-21", 1, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(5, 6, 7, 8, 9), read(location.toString(), "cast(ts as date) = date '2017-12-21'")); } { DataSourceReader reader = source.createReader(options); pushFilters(reader, to_date(col("ts")).$eq$eq$eq(lit(day)).expr()); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create one task for 2017-12-21", 1, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(5, 6, 7, 8, 9), read(location.toString(), "to_date(ts) = date '2017-12-21'")); } { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.and( Expressions.greaterThan("ts", "2017-12-22T06:00:00+00:00"), Expressions.lessThan("ts", "2017-12-22T08:00:00+00:00"))); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create one task for 2017-12-22", 1, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(1, 2), read(location.toString(), "ts > cast('2017-12-22 06:00:00+00:00' as timestamp) and " + "ts < cast('2017-12-22 08:00:00+00:00' as timestamp)")); } } @Test public void testHourPartitionedTimestampFilters() { File location = buildPartitionedTable("partitioned_by_hour", PARTITION_BY_HOUR, "ts_hour", "ts"); DataSourceOptions options = new DataSourceOptions(ImmutableMap.of( "path", location.toString()) ); IcebergSource source = new IcebergSource(); DataSourceReader unfiltered = source.createReader(options); Assert.assertEquals("Unfiltered table should created 9 read tasks", 9, planTasks(unfiltered).size()); { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.lessThan("ts", "2017-12-22T00:00:00+00:00")); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create 4 tasks for 2017-12-21: 15, 17, 21, 22", 4, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(8, 9, 7, 6, 5), read(location.toString(), "ts < cast('2017-12-22 00:00:00+00:00' as timestamp)")); } { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.and( Expressions.greaterThan("ts", "2017-12-22T06:00:00+00:00"), Expressions.lessThan("ts", "2017-12-22T08:00:00+00:00"))); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create 2 tasks for 2017-12-22: 6, 7", 2, tasks.size()); assertEqualsSafe(SCHEMA.asStruct(), expected(2, 1), read(location.toString(), "ts > cast('2017-12-22 06:00:00+00:00' as timestamp) and " + "ts < cast('2017-12-22 08:00:00+00:00' as timestamp)")); } } @Test public void testTrunctateDataPartitionedFilters() { File location = buildPartitionedTable("trunc", PARTITION_BY_FIRST_LETTER, "trunc1", "data"); DataSourceOptions options = new DataSourceOptions(ImmutableMap.of( "path", location.toString()) ); IcebergSource source = new IcebergSource(); DataSourceReader unfiltered = source.createReader(options); Assert.assertEquals("Unfiltered table should have created 9 read tasks", 9, planTasks(unfiltered).size()); { DataSourceReader reader = source.createReader(options); pushFilters(reader, Expressions.equal("data", "goldfish")); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create 1 task for 'goldfish' (g)", 1, tasks.size()); } { DataSourceReader reader = source.createReader(options); pushFilters(reader, col("data").$eq$eq$eq("goldfish").expr()); List<DataReaderFactory<UnsafeRow>> tasks = planTasks(reader); Assert.assertEquals("Should create 1 task for 'goldfish' (g)", 1, tasks.size()); } assertEqualsSafe(SCHEMA.asStruct(), expected(9), read(location.toString(), "data = 'goldfish'")); } @Test public void testFilterByNonProjectedColumn() { { Schema actualProjection = SCHEMA.select("id", "data"); List<Record> expected = Lists.newArrayList(); for (Record rec : expected(5, 6 ,7, 8, 9)) { expected.add(projectFlat(actualProjection, rec)); } assertEqualsSafe(actualProjection.asStruct(), expected, read( unpartitioned.toString(), "cast('2017-12-22 00:00:00+00:00' as timestamp) > ts", "id", "data")); } { // only project id: ts will be projected because of the filter, but data will not be included Schema actualProjection = SCHEMA.select("id"); List<Record> expected = Lists.newArrayList(); for (Record rec : expected(1, 2)) { expected.add(projectFlat(actualProjection, rec)); } assertEqualsSafe(actualProjection.asStruct(), expected, read( unpartitioned.toString(), "ts > cast('2017-12-22 06:00:00+00:00' as timestamp) and " + "cast('2017-12-22 08:00:00+00:00' as timestamp) > ts", "id")); } } private static Record projectFlat(Schema projection, Record record) { org.apache.avro.Schema avroSchema = AvroSchemaUtil.convert(projection, "test"); Record result = new Record(avroSchema); List<Types.NestedField> fields = projection.asStruct().fields(); for (int i = 0; i < fields.size(); i += 1) { Types.NestedField field = fields.get(i); result.put(i, record.get(field.name())); } return result; } public static void assertEqualsSafe(Types.StructType struct, List<Record> expected, List<Row> actual) { // TODO: match records by ID int numRecords = Math.min(expected.size(), actual.size()); for (int i = 0; i < numRecords; i += 1) { TestHelpers.assertEqualsSafe(struct, expected.get(i), actual.get(i)); } Assert.assertEquals("Number of results should match expected", expected.size(), actual.size()); } private List<Record> expected(int... ordinals) { List<Record> expected = Lists.newArrayListWithExpectedSize(ordinals.length); for (int ord : ordinals) { expected.add(records.get(ord)); } return expected; } private void pushFilters(DataSourceReader reader, com.netflix.iceberg.expressions.Expression... filters) { Expression[] expressions = new Expression[filters.length]; for (int i = 0; i < filters.length; i += 1) { expressions[i] = SparkExpressions.convert(filters[i], SCHEMA); } pushFilters(reader, expressions); } private void pushFilters(DataSourceReader reader, Expression... expressions) { Assert.assertTrue(reader instanceof SupportsPushDownCatalystFilters); SupportsPushDownCatalystFilters filterable = (SupportsPushDownCatalystFilters) reader; filterable.pushCatalystFilters(expressions); } private List<DataReaderFactory<UnsafeRow>> planTasks(DataSourceReader reader) { Assert.assertTrue(reader instanceof SupportsScanUnsafeRow); SupportsScanUnsafeRow unsafeReader = (SupportsScanUnsafeRow) reader; return unsafeReader.createUnsafeRowReaderFactories(); } private File buildPartitionedTable(String desc, PartitionSpec spec, String udf, String partitionColumn) { File location = new File(parent, desc); Table byId = TABLES.create(SCHEMA, spec, location.toString()); // do not combine splits because the tests expect a split per partition byId.updateProperties().set("read.split.target-size", "1").commit(); // copy the unpartitioned table into the partitioned table to produce the partitioned data Dataset<Row> allRows = spark.read() .format("iceberg") .load(unpartitioned.toString()); allRows .coalesce(1) // ensure only 1 file per partition is written .withColumn("part", callUDF(udf, column(partitionColumn))) .sortWithinPartitions("part") .drop("part") .write() .format("iceberg") .mode("append") .save(byId.location()); return location; } private List<Record> testRecords(org.apache.avro.Schema avroSchema) { return Lists.newArrayList( record(avroSchema, 0L, timestamp("2017-12-22T09:20:44.294658+00:00"), "junction"), record(avroSchema, 1L, timestamp("2017-12-22T07:15:34.582910+00:00"), "alligator"), record(avroSchema, 2L, timestamp("2017-12-22T06:02:09.243857+00:00"), "forrest"), record(avroSchema, 3L, timestamp("2017-12-22T03:10:11.134509+00:00"), "clapping"), record(avroSchema, 4L, timestamp("2017-12-22T00:34:00.184671+00:00"), "brush"), record(avroSchema, 5L, timestamp("2017-12-21T22:20:08.935889+00:00"), "trap"), record(avroSchema, 6L, timestamp("2017-12-21T21:55:30.589712+00:00"), "element"), record(avroSchema, 7L, timestamp("2017-12-21T17:31:14.532797+00:00"), "limited"), record(avroSchema, 8L, timestamp("2017-12-21T15:21:51.237521+00:00"), "global"), record(avroSchema, 9L, timestamp("2017-12-21T15:02:15.230570+00:00"), "goldfish") ); } private static List<Row> read(String table, String expr) { return read(table, expr, "*"); } private static List<Row> read(String table, String expr, String select0, String... selectN) { Dataset<Row> dataset = spark.read().format("iceberg").load(table).filter(expr) .select(select0, selectN); return dataset.collectAsList(); } private static long timestamp(String timestamp) { return Literal.of(timestamp).<Long>to(Types.TimestampType.withZone()).value(); } private static Record record(org.apache.avro.Schema schema, Object... values) { Record rec = new Record(schema); for (int i = 0; i < values.length; i += 1) { rec.put(i, values[i]); } return rec; } }
2,216
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestParquetWrite.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.Lists; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.types.Types; import org.apache.hadoop.conf.Configuration; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Encoders; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.List; import static com.netflix.iceberg.types.Types.NestedField.optional; public class TestParquetWrite { private static final Configuration CONF = new Configuration(); private static final Schema SCHEMA = new Schema( optional(1, "id", Types.IntegerType.get()), optional(2, "data", Types.StringType.get()) ); @Rule public TemporaryFolder temp = new TemporaryFolder(); private static SparkSession spark = null; @BeforeClass public static void startSpark() { TestParquetWrite.spark = SparkSession.builder().master("local[2]").getOrCreate(); } @AfterClass public static void stopSpark() { SparkSession spark = TestParquetWrite.spark; TestParquetWrite.spark = null; spark.stop(); } @Test public void testBasicWrite() throws IOException { File parent = temp.newFolder("parquet"); File location = new File(parent, "test"); HadoopTables tables = new HadoopTables(CONF); PartitionSpec spec = PartitionSpec.builderFor(SCHEMA).identity("data").build(); Table table = tables.create(SCHEMA, spec, location.toString()); List<SimpleRecord> expected = Lists.newArrayList( new SimpleRecord(1, "a"), new SimpleRecord(2, "b"), new SimpleRecord(3, "c") ); Dataset<Row> df = spark.createDataFrame(expected, SimpleRecord.class); // TODO: incoming columns must be ordered according to the table's schema df.select("id", "data").write() .format("iceberg") .mode("append") .save(location.toString()); table.refresh(); Dataset<Row> result = spark.read() .format("iceberg") .load(location.toString()); List<SimpleRecord> actual = result.orderBy("id").as(Encoders.bean(SimpleRecord.class)).collectAsList(); Assert.assertEquals("Number of rows should match", expected.size(), actual.size()); Assert.assertEquals("Result rows should match", expected, actual); } }
2,217
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestOrcWrite.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.Lists; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.types.Types; import org.apache.hadoop.conf.Configuration; import org.apache.orc.CompressionKind; import org.apache.orc.OrcConf; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Encoders; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.List; import static com.netflix.iceberg.types.Types.NestedField.optional; public class TestOrcWrite { private static final Configuration CONF = new Configuration(); private static final Schema SCHEMA = new Schema( optional(1, "id", Types.IntegerType.get()), optional(2, "data", Types.StringType.get()) ); @Rule public TemporaryFolder temp = new TemporaryFolder(); private static SparkSession spark = null; @BeforeClass public static void startSpark() { TestOrcWrite.spark = SparkSession.builder().master("local[2]").getOrCreate(); } @AfterClass public static void stopSpark() { SparkSession spark = TestOrcWrite.spark; TestOrcWrite.spark = null; spark.stop(); } @Test public void testBasicWrite() throws IOException { File parent = temp.newFolder("orc"); File location = new File(parent, "test"); location.mkdirs(); HadoopTables tables = new HadoopTables(CONF); PartitionSpec spec = PartitionSpec.builderFor(SCHEMA).identity("data").build(); Table table = tables.create(SCHEMA, spec, location.toString()); table.updateProperties() .defaultFormat(FileFormat.ORC) .set(OrcConf.COMPRESS.getAttribute(), CompressionKind.NONE.name()) .commit(); List<SimpleRecord> expected = Lists.newArrayList( new SimpleRecord(1, "a"), new SimpleRecord(2, "b"), new SimpleRecord(3, "c") ); Dataset<Row> df = spark.createDataFrame(expected, SimpleRecord.class); // TODO: incoming columns must be ordered according to the table's schema df.select("id", "data").write() .format("iceberg") .mode("append") .save(location.toString()); table.refresh(); Dataset<Row> result = spark.read() .format("iceberg") .load(location.toString()); List<SimpleRecord> actual = result.orderBy("id").as( Encoders.bean(SimpleRecord.class)).collectAsList(); Assert.assertEquals("Number of rows should match", expected.size(), actual.size()); Assert.assertEquals("Result rows should match", expected, actual); } }
2,218
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestParquetScan.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.spark.data.AvroDataTest; import com.netflix.iceberg.spark.data.RandomData; import com.netflix.iceberg.spark.data.TestHelpers; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData; import org.apache.hadoop.conf.Configuration; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Assume; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.List; import java.util.UUID; import static com.netflix.iceberg.Files.localInput; import static com.netflix.iceberg.Files.localOutput; import static com.netflix.iceberg.parquet.ParquetMetrics.fromInputFile; public class TestParquetScan extends AvroDataTest { private static final Configuration CONF = new Configuration(); @Rule public TemporaryFolder temp = new TemporaryFolder(); private static SparkSession spark = null; @BeforeClass public static void startSpark() { TestParquetScan.spark = SparkSession.builder().master("local[2]").getOrCreate(); } @AfterClass public static void stopSpark() { SparkSession spark = TestParquetScan.spark; TestParquetScan.spark = null; spark.stop(); } @Override protected void writeAndValidate(Schema schema) throws IOException { Assume.assumeTrue("Cannot handle non-string map keys in parquet-avro", null == TypeUtil.find( schema, type -> type.isMapType() && type.asMapType().keyType() != Types.StringType.get())); File parent = temp.newFolder("parquet"); File location = new File(parent, "test"); File dataFolder = new File(location, "data"); dataFolder.mkdirs(); File parquetFile = new File(dataFolder, FileFormat.PARQUET.addExtension(UUID.randomUUID().toString())); HadoopTables tables = new HadoopTables(CONF); Table table = tables.create(schema, PartitionSpec.unpartitioned(), location.toString()); // Important: use the table's schema for the rest of the test // When tables are created, the column ids are reassigned. Schema tableSchema = table.schema(); List<GenericData.Record> expected = RandomData.generateList(tableSchema, 100, 1L); try (FileAppender<GenericData.Record> writer = Parquet.write(localOutput(parquetFile)) .schema(tableSchema) .build()) { writer.addAll(expected); } DataFile file = DataFiles.builder(PartitionSpec.unpartitioned()) .withFileSizeInBytes(parquetFile.length()) .withPath(parquetFile.toString()) .withMetrics(fromInputFile(localInput(parquetFile))) .build(); table.newAppend().appendFile(file).commit(); Dataset<Row> df = spark.read() .format("iceberg") .load(location.toString()); List<Row> rows = df.collectAsList(); Assert.assertEquals("Should contain 100 rows", 100, rows.size()); for (int i = 0; i < expected.size(); i += 1) { TestHelpers.assertEqualsSafe(tableSchema.asStruct(), expected.get(i), rows.get(i)); } } }
2,219
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestReadProjection.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.types.Comparators; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Record; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.IOException; import java.util.List; import java.util.Map; import static org.apache.avro.Schema.Type.UNION; public abstract class TestReadProjection { final String format; TestReadProjection(String format) { this.format = format; } protected abstract Record writeAndRead(String desc, Schema writeSchema, Schema readSchema, Record record) throws IOException; @Rule public TemporaryFolder temp = new TemporaryFolder(); @Test public void testFullProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Record projected = writeAndRead("full_projection", schema, schema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("data")); Assert.assertEquals("Should contain the correct data value", 0, cmp); } @Test public void testReorderedFullProjection() throws Exception { // Assume.assumeTrue( // "Spark's Parquet read support does not support reordered columns", // !format.equalsIgnoreCase("parquet")); Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("reordered_full_projection", schema, reordered, record); Assert.assertEquals("Should contain the correct 0 value", "test", projected.get(0).toString()); Assert.assertEquals("Should contain the correct 1 value", 34L, projected.get(1)); } @Test public void testReorderedProjection() throws Exception { // Assume.assumeTrue( // "Spark's Parquet read support does not support reordered columns", // !format.equalsIgnoreCase("parquet")); Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(2, "missing_1", Types.StringType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.optional(3, "missing_2", Types.LongType.get()) ); Record projected = writeAndRead("reordered_projection", schema, reordered, record); Assert.assertNull("Should contain the correct 0 value", projected.get(0)); Assert.assertEquals("Should contain the correct 1 value", "test", projected.get(1).toString()); Assert.assertNull("Should contain the correct 2 value", projected.get(2)); } @Test public void testEmptyProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Record projected = writeAndRead("empty_projection", schema, schema.select(), record); Assert.assertNotNull("Should read a non-null record", projected); try { projected.get(0); Assert.fail("Should not retrieve value with ordinal 0"); } catch (ArrayIndexOutOfBoundsException e) { // this is expected because there are no values } } @Test public void testBasicProjection() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("data", "test"); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("basic_projection_id", writeSchema, idOnly, record); Assert.assertNull("Should not project data", projected.get("data")); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Schema dataOnly = new Schema( Types.NestedField.optional(1, "data", Types.StringType.get()) ); projected = writeAndRead("basic_projection_data", writeSchema, dataOnly, record); Assert.assertNull("Should not project id", projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("data")); Assert.assertEquals("Should contain the correct data value", 0, cmp); } @Test public void testRename() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("data", "test"); Schema readSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "renamed", Types.StringType.get()) ); Record projected = writeAndRead("project_and_rename", writeSchema, readSchema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("renamed")); Assert.assertEquals("Should contain the correct data/renamed value", 0, cmp); } @Test public void testNestedStructProjection() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()), Types.NestedField.required(2, "long", Types.FloatType.get()) )) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); Record location = new Record(fromOption(record.getSchema().getField("location").schema())); location.put("lat", 52.995143f); location.put("long", -1.539054f); record.put("location", location); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Record projectedLocation = (Record) projected.get("location"); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project location", projectedLocation); Schema latOnly = new Schema( Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()) )) ); projected = writeAndRead("latitude_only", writeSchema, latOnly, record); projectedLocation = (Record) projected.get("location"); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project location", projected.get("location")); Assert.assertNull("Should not project longitude", projectedLocation.get("long")); Assert.assertEquals("Should project latitude", 52.995143f, (float) projectedLocation.get("lat"), 0.000001f); Schema longOnly = new Schema( Types.NestedField.optional(3, "location", Types.StructType.of( Types.NestedField.required(2, "long", Types.FloatType.get()) )) ); projected = writeAndRead("longitude_only", writeSchema, longOnly, record); projectedLocation = (Record) projected.get("location"); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project location", projected.get("location")); Assert.assertNull("Should not project latitutde", projectedLocation.get("lat")); Assert.assertEquals("Should project longitude", -1.539054f, (float) projectedLocation.get("long"), 0.000001f); Schema locationOnly = writeSchema.select("location"); projected = writeAndRead("location_only", writeSchema, locationOnly, record); projectedLocation = (Record) projected.get("location"); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project location", projected.get("location")); Assert.assertEquals("Should project latitude", 52.995143f, (float) projectedLocation.get("lat"), 0.000001f); Assert.assertEquals("Should project longitude", -1.539054f, (float) projectedLocation.get("long"), 0.000001f); } @Test public void testMapProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(5, "properties", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StringType.get())) ); Map<String, String> properties = ImmutableMap.of("a", "A", "b", "B"); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("properties", properties); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project properties map", projected.get("properties")); Schema keyOnly = writeSchema.select("properties.key"); projected = writeAndRead("key_only", writeSchema, keyOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.get("properties"))); Schema valueOnly = writeSchema.select("properties.value"); projected = writeAndRead("value_only", writeSchema, valueOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.get("properties"))); Schema mapOnly = writeSchema.select("properties"); projected = writeAndRead("map_only", writeSchema, mapOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire map", properties, toStringMap((Map) projected.get("properties"))); } private Map<String, ?> toStringMap(Map<?, ?> map) { Map<String, Object> stringMap = Maps.newHashMap(); for (Map.Entry<?, ?> entry : map.entrySet()) { if (entry.getValue() instanceof CharSequence) { stringMap.put(entry.getKey().toString(), entry.getValue().toString()); } else { stringMap.put(entry.getKey().toString(), entry.getValue()); } } return stringMap; } @Test public void testMapOfStructsProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StructType.of( Types.NestedField.required(1, "lat", Types.FloatType.get()), Types.NestedField.required(2, "long", Types.FloatType.get()) ) )) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); Record l1 = new Record(fromOption( fromOption(record.getSchema().getField("locations").schema()).getValueType())); l1.put("lat", 53.992811f); l1.put("long", -1.542616f); Record l2 = new Record(l1.getSchema()); l2.put("lat", 52.995143f); l2.put("long", -1.539054f); record.put("locations", ImmutableMap.of("L1", l1, "L2", l2)); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project locations map", projected.get("locations")); projected = writeAndRead("all_locations", writeSchema, writeSchema.select("locations"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project locations map", record.get("locations"), toStringMap((Map) projected.get("locations"))); projected = writeAndRead("lat_only", writeSchema, writeSchema.select("locations.lat"), record); Assert.assertNull("Should not project id", projected.get("id")); Map<String, ?> locations = toStringMap((Map) projected.get("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); Record projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertEquals("L1 should contain lat", 53.992811f, (float) projectedL1.get("lat"), 0.000001); Assert.assertNull("L1 should not contain long", projectedL1.get("long")); Record projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertEquals("L2 should contain lat", 52.995143f, (float) projectedL2.get("lat"), 0.000001); Assert.assertNull("L2 should not contain long", projectedL2.get("long")); projected = writeAndRead("long_only", writeSchema, writeSchema.select("locations.long"), record); Assert.assertNull("Should not project id", projected.get("id")); locations = toStringMap((Map) projected.get("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertNull("L1 should not contain lat", projectedL1.get("lat")); Assert.assertEquals("L1 should contain long", -1.542616f, (float) projectedL1.get("long"), 0.000001); projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertNull("L2 should not contain lat", projectedL2.get("lat")); Assert.assertEquals("L2 should contain long", -1.539054f, (float) projectedL2.get("long"), 0.000001); Schema latitiudeRenamed = new Schema( Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StructType.of( Types.NestedField.required(1, "latitude", Types.FloatType.get()) ) )) ); projected = writeAndRead("latitude_renamed", writeSchema, latitiudeRenamed, record); Assert.assertNull("Should not project id", projected.get("id")); locations = toStringMap((Map) projected.get("locations")); Assert.assertNotNull("Should project locations map", locations); Assert.assertEquals("Should contain L1 and L2", Sets.newHashSet("L1", "L2"), locations.keySet()); projectedL1 = (Record) locations.get("L1"); Assert.assertNotNull("L1 should not be null", projectedL1); Assert.assertEquals("L1 should contain latitude", 53.992811f, (float) projectedL1.get("latitude"), 0.000001); Assert.assertNull("L1 should not contain lat", projectedL1.get("lat")); Assert.assertNull("L1 should not contain long", projectedL1.get("long")); projectedL2 = (Record) locations.get("L2"); Assert.assertNotNull("L2 should not be null", projectedL2); Assert.assertEquals("L2 should contain latitude", 52.995143f, (float) projectedL2.get("latitude"), 0.000001); Assert.assertNull("L2 should not contain lat", projectedL2.get("lat")); Assert.assertNull("L2 should not contain long", projectedL2.get("long")); } @Test public void testListProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(10, "values", Types.ListType.ofOptional(11, Types.LongType.get())) ); List<Long> values = ImmutableList.of(56L, 57L, 58L); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("values", values); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project values list", projected.get("values")); Schema elementOnly = writeSchema.select("values.element"); projected = writeAndRead("element_only", writeSchema, elementOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire list", values, projected.get("values")); Schema listOnly = writeSchema.select("values"); projected = writeAndRead("list_only", writeSchema, listOnly, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project entire list", values, projected.get("values")); } @Test @SuppressWarnings("unchecked") public void testListOfStructsProjection() throws IOException { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.required(19, "x", Types.IntegerType.get()), Types.NestedField.optional(18, "y", Types.IntegerType.get()) )) ) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); Record p1 = new Record(fromOption( fromOption(record.getSchema().getField("points").schema()).getElementType())); p1.put("x", 1); p1.put("y", 2); Record p2 = new Record(p1.getSchema()); p2.put("x", 3); p2.put("y", null); record.put("points", ImmutableList.of(p1, p2)); Schema idOnly = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()) ); Record projected = writeAndRead("id_only", writeSchema, idOnly, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); Assert.assertNull("Should not project points list", projected.get("points")); projected = writeAndRead("all_points", writeSchema, writeSchema.select("points"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertEquals("Should project points list", record.get("points"), projected.get("points")); projected = writeAndRead("x_only", writeSchema, writeSchema.select("points.x"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); List<Record> points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); Record projectedP1 = points.get(0); Assert.assertEquals("Should project x", 1, (int) projectedP1.get("x")); Assert.assertNull("Should not project y", projectedP1.get("y")); Record projectedP2 = points.get(1); Assert.assertEquals("Should project x", 3, (int) projectedP2.get("x")); Assert.assertNull("Should not project y", projectedP2.get("y")); projected = writeAndRead("y_only", writeSchema, writeSchema.select("points.y"), record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertNull("Should not project x", projectedP1.get("x")); Assert.assertEquals("Should project y", 2, (int) projectedP1.get("y")); projectedP2 = points.get(1); Assert.assertNull("Should not project x", projectedP2.get("x")); Assert.assertNull("Should project null y", projectedP2.get("y")); Schema yRenamed = new Schema( Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.optional(18, "z", Types.IntegerType.get()) )) ) ); projected = writeAndRead("y_renamed", writeSchema, yRenamed, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertNull("Should not project x", projectedP1.get("x")); Assert.assertNull("Should not project y", projectedP1.get("y")); Assert.assertEquals("Should project z", 2, (int) projectedP1.get("z")); projectedP2 = points.get(1); Assert.assertNull("Should not project x", projectedP2.get("x")); Assert.assertNull("Should not project y", projectedP2.get("y")); Assert.assertNull("Should project null z", projectedP2.get("z")); Schema zAdded = new Schema( Types.NestedField.optional(22, "points", Types.ListType.ofOptional(21, Types.StructType.of( Types.NestedField.required(19, "x", Types.IntegerType.get()), Types.NestedField.optional(18, "y", Types.IntegerType.get()), Types.NestedField.optional(20, "z", Types.IntegerType.get()) )) ) ); projected = writeAndRead("z_added", writeSchema, zAdded, record); Assert.assertNull("Should not project id", projected.get("id")); Assert.assertNotNull("Should project points list", projected.get("points")); points = (List<Record>) projected.get("points"); Assert.assertEquals("Should read 2 points", 2, points.size()); projectedP1 = points.get(0); Assert.assertEquals("Should project x", 1, (int) projectedP1.get("x")); Assert.assertEquals("Should project y", 2, (int) projectedP1.get("y")); Assert.assertNull("Should contain null z", projectedP1.get("z")); projectedP2 = points.get(1); Assert.assertEquals("Should project x", 3, (int) projectedP2.get("x")); Assert.assertNull("Should project null y", projectedP2.get("y")); Assert.assertNull("Should contain null z", projectedP2.get("z")); } private static org.apache.avro.Schema fromOption(org.apache.avro.Schema schema) { Preconditions.checkArgument(schema.getType() == UNION, "Expected union schema but was passed: {}", schema); Preconditions.checkArgument(schema.getTypes().size() == 2, "Expected optional schema, but was passed: {}", schema); if (schema.getTypes().get(0).getType() == org.apache.avro.Schema.Type.NULL) { return schema.getTypes().get(1); } else { return schema.getTypes().get(0); } } }
2,220
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestTables.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.Maps; import com.netflix.iceberg.BaseTable; import com.netflix.iceberg.FileIO; import com.netflix.iceberg.Files; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Snapshot; import com.netflix.iceberg.TableMetadata; import com.netflix.iceberg.TableOperations; import com.netflix.iceberg.exceptions.AlreadyExistsException; import com.netflix.iceberg.exceptions.CommitFailedException; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import java.io.File; import java.io.IOException; import java.util.Map; // TODO: Use the copy of this from core. class TestTables { private TestTables() { } static TestTable create(File temp, String name, Schema schema, PartitionSpec spec) { TestTableOperations ops = new TestTableOperations(name); if (ops.current() != null) { throw new AlreadyExistsException("Table %s already exists at location: %s", name, temp); } ops.commit(null, TableMetadata.newTableMetadata(ops, schema, spec, temp.toString())); return new TestTable(ops, name); } static TestTable load(String name) { TestTableOperations ops = new TestTableOperations(name); return new TestTable(ops, name); } static class TestTable extends BaseTable { private final TestTableOperations ops; private TestTable(TestTableOperations ops, String name) { super(ops, name); this.ops = ops; } TestTableOperations ops() { return ops; } } private static final Map<String, TableMetadata> METADATA = Maps.newHashMap(); static void clearTables() { synchronized (METADATA) { METADATA.clear(); } } static TableMetadata readMetadata(String tableName) { synchronized (METADATA) { return METADATA.get(tableName); } } static void replaceMetadata(String tableName, TableMetadata metadata) { synchronized (METADATA) { METADATA.put(tableName, metadata); } } static class TestTableOperations implements TableOperations { private final String tableName; private TableMetadata current = null; private long lastSnapshotId = 0; private int failCommits = 0; TestTableOperations(String tableName) { this.tableName = tableName; refresh(); if (current != null) { for (Snapshot snap : current.snapshots()) { this.lastSnapshotId = Math.max(lastSnapshotId, snap.snapshotId()); } } else { this.lastSnapshotId = 0; } } void failCommits(int numFailures) { this.failCommits = numFailures; } @Override public TableMetadata current() { return current; } @Override public TableMetadata refresh() { synchronized (METADATA) { this.current = METADATA.get(tableName); } return current; } @Override public void commit(TableMetadata base, TableMetadata metadata) { if (base != current) { throw new CommitFailedException("Cannot commit changes based on stale metadata"); } synchronized (METADATA) { refresh(); if (base == current) { if (failCommits > 0) { this.failCommits -= 1; throw new CommitFailedException("Injected failure"); } METADATA.put(tableName, metadata); this.current = metadata; } else { throw new CommitFailedException( "Commit failed: table was updated at %d", base.lastUpdatedMillis()); } } } @Override public FileIO io() { return new LocalFileIO(); } @Override public String metadataFileLocation(String fileName) { return new File(new File(current.location(), "metadata"), fileName).getAbsolutePath(); } @Override public long newSnapshotId() { long nextSnapshotId = lastSnapshotId + 1; this.lastSnapshotId = nextSnapshotId; return nextSnapshotId; } } static class LocalFileIO implements FileIO { @Override public InputFile newInputFile(String path) { return Files.localInput(path); } @Override public OutputFile newOutputFile(String path) { return Files.localOutput(new File(path)); } @Override public void deleteFile(String path) { if (!new File(path).delete()) { throw new RuntimeIOException("Failed to delete file: " + path); } } } }
2,221
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/source/TestAvroScan.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.spark.data.AvroDataTest; import com.netflix.iceberg.spark.data.RandomData; import com.netflix.iceberg.spark.data.TestHelpers; import org.apache.avro.generic.GenericData.Record; import org.apache.hadoop.conf.Configuration; import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Row; import org.apache.spark.sql.SparkSession; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.List; import java.util.UUID; import static com.netflix.iceberg.Files.localOutput; public class TestAvroScan extends AvroDataTest { private static final Configuration CONF = new Configuration(); @Rule public TemporaryFolder temp = new TemporaryFolder(); private static SparkSession spark = null; @BeforeClass public static void startSpark() { TestAvroScan.spark = SparkSession.builder().master("local[2]").getOrCreate(); } @AfterClass public static void stopSpark() { SparkSession spark = TestAvroScan.spark; TestAvroScan.spark = null; spark.stop(); } protected void writeAndValidate(Schema schema) throws IOException { File parent = temp.newFolder("avro"); File location = new File(parent, "test"); File dataFolder = new File(location, "data"); dataFolder.mkdirs(); File avroFile = new File(dataFolder, FileFormat.AVRO.addExtension(UUID.randomUUID().toString())); HadoopTables tables = new HadoopTables(CONF); Table table = tables.create(schema, PartitionSpec.unpartitioned(), location.toString()); // Important: use the table's schema for the rest of the test // When tables are created, the column ids are reassigned. Schema tableSchema = table.schema(); List<Record> expected = RandomData.generateList(tableSchema, 100, 1L); try (FileAppender<Record> writer = Avro.write(localOutput(avroFile)) .schema(tableSchema) .build()) { writer.addAll(expected); } DataFile file = DataFiles.builder(PartitionSpec.unpartitioned()) .withRecordCount(100) .withFileSizeInBytes(avroFile.length()) .withPath(avroFile.toString()) .build(); table.newAppend().appendFile(file).commit(); Dataset<Row> df = spark.read() .format("iceberg") .load(location.toString()); List<Row> rows = df.collectAsList(); Assert.assertEquals("Should contain 100 rows", 100, rows.size()); for (int i = 0; i < expected.size(); i += 1) { TestHelpers.assertEqualsSafe(tableSchema.asStruct(), expected.get(i), rows.get(i)); } } }
2,222
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/TestSparkParquetReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData; import org.apache.spark.sql.catalyst.InternalRow; import org.junit.Assert; import org.junit.Assume; import java.io.File; import java.io.IOException; import java.util.Iterator; import java.util.List; import static com.netflix.iceberg.spark.data.TestHelpers.assertEqualsUnsafe; public class TestSparkParquetReader extends AvroDataTest { protected void writeAndValidate(Schema schema) throws IOException { Assume.assumeTrue("Parquet Avro cannot write non-string map keys", null == TypeUtil.find(schema, type -> type.isMapType() && type.asMapType().keyType() != Types.StringType.get())); List<GenericData.Record> expected = RandomData.generateList(schema, 100, 0L); File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<GenericData.Record> writer = Parquet.write(Files.localOutput(testFile)) .schema(schema) .named("test") .build()) { writer.addAll(expected); } try (CloseableIterable<InternalRow> reader = Parquet.read(Files.localInput(testFile)) .project(schema) .createReaderFunc(type -> SparkParquetReaders.buildReader(schema, type)) .build()) { Iterator<InternalRow> rows = reader.iterator(); for (int i = 0; i < expected.size(); i += 1) { Assert.assertTrue("Should have expected number of rows", rows.hasNext()); assertEqualsUnsafe(schema.asStruct(), expected.get(i), rows.next()); } Assert.assertFalse("Should not have extra rows", rows.hasNext()); } } }
2,223
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/CodegenExamples.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.UnsafeArrayData; import org.apache.spark.sql.catalyst.expressions.UnsafeMapData; import org.apache.spark.sql.catalyst.expressions.UnsafeRow; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.MapData; import org.apache.spark.unsafe.Platform; import org.apache.spark.unsafe.types.UTF8String; public class CodegenExamples { class Example1 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; public Example1(Object[] references) { this.references = references; result = new UnsafeRow(2); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 2); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); long value = isNull ? -1L : (i.getLong(0)); if (isNull) { rowWriter.setNullAt(0); } else { rowWriter.write(0, value); } boolean isNull1 = i.isNullAt(1); UTF8String value1 = isNull1 ? null : (i.getUTF8String(1)); if (isNull1) { rowWriter.setNullAt(1); } else { rowWriter.write(1, value1); } result.setTotalSize(holder.totalSize()); return result; } } class Example2 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter1; public Example2(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); InternalRow value = isNull ? null : (i.getStruct(0, 1)); if (isNull) { rowWriter.setNullAt(0); } else { // Remember the current cursor so that we can calculate how many bytes are // written later. final int tmpCursor = holder.cursor; if (value instanceof UnsafeRow) { final int sizeInBytes = ((UnsafeRow) value).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes); ((UnsafeRow) value).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes; } else { rowWriter1.reset(); boolean isNull1 = value.isNullAt(0); float value1 = isNull1 ? -1.0f : value.getFloat(0); if (isNull1) { rowWriter1.setNullAt(0); } else { rowWriter1.write(0, value1); } } rowWriter.setOffsetAndSize(0, tmpCursor, holder.cursor - tmpCursor); } result.setTotalSize(holder.totalSize()); return result; } } class Example3 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter1; public Example3(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 2); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); InternalRow value = isNull ? null : (i.getStruct(0, 2)); if (isNull) { rowWriter.setNullAt(0); } else { // Remember the current cursor so that we can calculate how many bytes are // written later. final int tmpCursor = holder.cursor; if (value instanceof UnsafeRow) { final int sizeInBytes = ((UnsafeRow) value).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes); ((UnsafeRow) value).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes; } else { rowWriter1.reset(); boolean isNull1 = value.isNullAt(0); float value1 = isNull1 ? -1.0f : value.getFloat(0); if (isNull1) { rowWriter1.setNullAt(0); } else { rowWriter1.write(0, value1); } boolean isNull2 = value.isNullAt(1); float value2 = isNull2 ? -1.0f : value.getFloat(1); if (isNull2) { rowWriter1.setNullAt(1); } else { rowWriter1.write(1, value2); } } rowWriter.setOffsetAndSize(0, tmpCursor, holder.cursor - tmpCursor); } result.setTotalSize(holder.totalSize()); return result; } } class Example4 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter1; public Example4(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.arrayWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.arrayWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); MapData value = isNull ? null : (i.getMap(0)); if (isNull) { rowWriter.setNullAt(0); } else { // Remember the current cursor so that we can calculate how many bytes are // written later. final int tmpCursor = holder.cursor; if (value instanceof UnsafeMapData) { final int sizeInBytes = ((UnsafeMapData) value).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes); ((UnsafeMapData) value).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes; } else { final ArrayData keys = value.keyArray(); final ArrayData values = value.valueArray(); // preserve 8 bytes to write the key array numBytes later. holder.grow(8); holder.cursor += 8; // Remember the current cursor so that we can write numBytes of key array later. final int tmpCursor1 = holder.cursor; if (keys instanceof UnsafeArrayData) { final int sizeInBytes1 = ((UnsafeArrayData) keys).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes1); ((UnsafeArrayData) keys).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes1; } else { final int numElements = keys.numElements(); arrayWriter.initialize(holder, numElements, 8); for (int index = 0; index < numElements; index++) { if (keys.isNullAt(index)) { arrayWriter.setNull(index); } else { final UTF8String element = keys.getUTF8String(index); arrayWriter.write(index, element); } } } // Write the numBytes of key array into the first 8 bytes. Platform.putLong(holder.buffer, tmpCursor1 - 8, holder.cursor - tmpCursor1); if (values instanceof UnsafeArrayData) { final int sizeInBytes2 = ((UnsafeArrayData) values).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes2); ((UnsafeArrayData) values).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes2; } else { final int numElements1 = values.numElements(); arrayWriter1.initialize(holder, numElements1, 8); for (int index1 = 0; index1 < numElements1; index1++) { if (values.isNullAt(index1)) { arrayWriter1.setNull(index1); } else { final UTF8String element1 = values.getUTF8String(index1); arrayWriter1.write(index1, element1); } } } } rowWriter.setOffsetAndSize(0, tmpCursor, holder.cursor - tmpCursor); } result.setTotalSize(holder.totalSize()); return result; } } class Example5 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter1; public Example5(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.arrayWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 2); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); ArrayData value = isNull ? null : (i.getArray(0)); if (isNull) { rowWriter.setNullAt(0); } else { // Remember the current cursor so that we can calculate how many bytes are // written later. final int tmpCursor = holder.cursor; if (value instanceof UnsafeArrayData) { final int sizeInBytes1 = ((UnsafeArrayData) value).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes1); ((UnsafeArrayData) value).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes1; } else { final int numElements = value.numElements(); arrayWriter.initialize(holder, numElements, 8); for (int index = 0; index < numElements; index++) { if (value.isNullAt(index)) { arrayWriter.setNull(index); } else { final InternalRow element = value.getStruct(index, 2); final int tmpCursor1 = holder.cursor; if (element instanceof UnsafeRow) { final int sizeInBytes = ((UnsafeRow) element).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes); ((UnsafeRow) element).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes; } else { rowWriter1.reset(); boolean isNull1 = element.isNullAt(0); int value1 = isNull1 ? -1 : element.getInt(0); if (isNull1) { rowWriter1.setNullAt(0); } else { rowWriter1.write(0, value1); } boolean isNull2 = element.isNullAt(1); int value2 = isNull2 ? -1 : element.getInt(1); if (isNull2) { rowWriter1.setNullAt(1); } else { rowWriter1.write(1, value2); } } arrayWriter.setOffsetAndSize(index, tmpCursor1, holder.cursor - tmpCursor1); } } } rowWriter.setOffsetAndSize(0, tmpCursor, holder.cursor - tmpCursor); } result.setTotalSize(holder.totalSize()); return result; } } class Example6 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter1; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter1; public Example6(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.arrayWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.arrayWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.rowWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 2); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); MapData value = isNull ? null : (i.getMap(0)); if (isNull) { rowWriter.setNullAt(0); } else { // Remember the current cursor so that we can calculate how many bytes are // written later. final int tmpCursor = holder.cursor; if (value instanceof UnsafeMapData) { final int sizeInBytes = ((UnsafeMapData) value).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes); ((UnsafeMapData) value).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes; } else { final ArrayData keys = value.keyArray(); final ArrayData values = value.valueArray(); // preserve 8 bytes to write the key array numBytes later. holder.grow(8); holder.cursor += 8; // Remember the current cursor so that we can write numBytes of key array later. final int tmpCursor1 = holder.cursor; if (keys instanceof UnsafeArrayData) { final int sizeInBytes1 = ((UnsafeArrayData) keys).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes1); ((UnsafeArrayData) keys).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes1; } else { final int numElements = keys.numElements(); arrayWriter.initialize(holder, numElements, 8); for (int index = 0; index < numElements; index++) { if (keys.isNullAt(index)) { arrayWriter.setNull(index); } else { final UTF8String element = keys.getUTF8String(index); arrayWriter.write(index, element); } } } // Write the numBytes of key array into the first 8 bytes. Platform.putLong(holder.buffer, tmpCursor1 - 8, holder.cursor - tmpCursor1); if (values instanceof UnsafeArrayData) { final int sizeInBytes3 = ((UnsafeArrayData) values).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes3); ((UnsafeArrayData) values).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes3; } else { final int numElements1 = values.numElements(); arrayWriter1.initialize(holder, numElements1, 8); for (int index1 = 0; index1 < numElements1; index1++) { if (values.isNullAt(index1)) { arrayWriter1.setNull(index1); } else { final InternalRow element1 = values.getStruct(index1, 2); final int tmpCursor3 = holder.cursor; if (element1 instanceof UnsafeRow) { final int sizeInBytes2 = ((UnsafeRow) element1).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes2); ((UnsafeRow) element1).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes2; } else { rowWriter1.reset(); boolean isNull1 = element1.isNullAt(0); float value1 = isNull1 ? -1.0f : element1.getFloat(0); if (isNull1) { rowWriter1.setNullAt(0); } else { rowWriter1.write(0, value1); } boolean isNull2 = element1.isNullAt(1); float value2 = isNull2 ? -1.0f : element1.getFloat(1); if (isNull2) { rowWriter1.setNullAt(1); } else { rowWriter1.write(1, value2); } } arrayWriter1.setOffsetAndSize(index1, tmpCursor3, holder.cursor - tmpCursor3); } } } } rowWriter.setOffsetAndSize(0, tmpCursor, holder.cursor - tmpCursor); } result.setTotalSize(holder.totalSize()); return result; } } class Example7 extends org.apache.spark.sql.catalyst.expressions.UnsafeProjection { private Object[] references; private UnsafeRow result; private org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder holder; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter rowWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter; private org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter arrayWriter1; public Example7(Object[] references) { this.references = references; result = new UnsafeRow(1); this.holder = new org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(result, 32); this.rowWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(holder, 1); this.arrayWriter = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); this.arrayWriter1 = new org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter(); } public void initialize(int partitionIndex) { } public UnsafeRow apply(InternalRow i) { holder.reset(); rowWriter.zeroOutNullBytes(); boolean isNull = i.isNullAt(0); MapData value = isNull ? null : (i.getMap(0)); if (isNull) { rowWriter.setNullAt(0); } else { // Remember the current cursor so that we can calculate how many bytes are // written later. final int tmpCursor = holder.cursor; if (value instanceof UnsafeMapData) { final int sizeInBytes = ((UnsafeMapData) value).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes); ((UnsafeMapData) value).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes; } else { final ArrayData keys = value.keyArray(); final ArrayData values = value.valueArray(); // preserve 8 bytes to write the key array numBytes later. holder.grow(8); holder.cursor += 8; // Remember the current cursor so that we can write numBytes of key array later. final int tmpCursor1 = holder.cursor; if (keys instanceof UnsafeArrayData) { final int sizeInBytes1 = ((UnsafeArrayData) keys).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes1); ((UnsafeArrayData) keys).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes1; } else { final int numElements = keys.numElements(); arrayWriter.initialize(holder, numElements, 8); for (int index = 0; index < numElements; index++) { if (keys.isNullAt(index)) { arrayWriter.setNull(index); } else { final UTF8String element = keys.getUTF8String(index); arrayWriter.write(index, element); } } } // Write the numBytes of key array into the first 8 bytes. Platform.putLong(holder.buffer, tmpCursor1 - 8, holder.cursor - tmpCursor1); if (values instanceof UnsafeArrayData) { final int sizeInBytes2 = ((UnsafeArrayData) values).getSizeInBytes(); // grow the global buffer before writing data. holder.grow(sizeInBytes2); ((UnsafeArrayData) values).writeToMemory(holder.buffer, holder.cursor); holder.cursor += sizeInBytes2; } else { final int numElements1 = values.numElements(); arrayWriter1.initialize(holder, numElements1, 8); for (int index1 = 0; index1 < numElements1; index1++) { if (values.isNullAt(index1)) { arrayWriter1.setNull(index1); } else { final UTF8String element1 = values.getUTF8String(index1); arrayWriter1.write(index1, element1); } } } } rowWriter.setOffsetAndSize(0, tmpCursor, holder.cursor - tmpCursor); } result.setTotalSize(holder.totalSize()); return result; } } }
2,224
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/TestParquetAvroReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.parquet.ParquetAvroValueReaders; import com.netflix.iceberg.parquet.ParquetReader; import com.netflix.iceberg.parquet.ParquetSchemaUtil; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Record; import org.apache.parquet.ParquetReadOptions; import org.apache.parquet.schema.MessageType; import org.junit.Assert; import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.Iterator; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestParquetAvroReader { @Rule public TemporaryFolder temp = new TemporaryFolder(); private static final Schema COMPLEX_SCHEMA = new Schema( required(1, "roots", Types.LongType.get()), optional(3, "lime", Types.ListType.ofRequired(4, Types.DoubleType.get())), required(5, "strict", Types.StructType.of( required(9, "tangerine", Types.StringType.get()), optional(6, "hopeful", Types.StructType.of( required(7, "steel", Types.FloatType.get()), required(8, "lantern", Types.DateType.get()) )), optional(10, "vehement", Types.LongType.get()) )), optional(11, "metamorphosis", Types.MapType.ofRequired(12, 13, Types.StringType.get(), Types.TimestampType.withoutZone())), required(14, "winter", Types.ListType.ofOptional(15, Types.StructType.of( optional(16, "beet", Types.DoubleType.get()), required(17, "stamp", Types.TimeType.get()), optional(18, "wheeze", Types.StringType.get()) ))), optional(19, "renovate", Types.MapType.ofRequired(20, 21, Types.StringType.get(), Types.StructType.of( optional(22, "jumpy", Types.DoubleType.get()), required(23, "koala", Types.TimeType.get()) ))), optional(2, "slide", Types.StringType.get()) ); @Ignore public void testStructSchema() throws IOException { Schema structSchema = new Schema( required(1, "circumvent", Types.LongType.get()), optional(2, "antarctica", Types.StringType.get()), optional(3, "fluent", Types.DoubleType.get()), required(4, "quell", Types.StructType.of( required(5, "operator", Types.BooleanType.get()), optional(6, "fanta", Types.IntegerType.get()), optional(7, "cable", Types.FloatType.get()) )), required(8, "chimney", Types.TimestampType.withZone()), required(9, "wool", Types.DateType.get()) ); File testFile = writeTestData(structSchema, 5_000_000, 1059); MessageType readSchema = ParquetSchemaUtil.convert(structSchema, "test"); long sum = 0; long sumSq = 0; int warmups = 2; int n = 10; for (int i = 0; i < warmups + n; i += 1) { // clean up as much memory as possible to avoid a large GC during the timed run System.gc(); try (ParquetReader<Record> reader = new ParquetReader<>( Files.localInput(testFile), structSchema, ParquetReadOptions.builder().build(), fileSchema -> ParquetAvroValueReaders.buildReader(structSchema, readSchema), Expressions.alwaysTrue(), true)) { long start = System.currentTimeMillis(); long val = 0; long count = 0; for (Record record : reader) { // access something to ensure the compiler doesn't optimize this away val ^= (Long) record.get(0); count += 1; } long end = System.currentTimeMillis(); long duration = end - start; System.err.println("XOR val: " + val); System.err.println(String.format("Reassembled %d records in %d ms", count, duration)); if (i >= warmups) { sum += duration; sumSq += (duration * duration); } } } double mean = ((double) sum) / n; double stddev = Math.sqrt((((double) sumSq) / n) - (mean * mean)); System.err.println(String.format( "Ran %d trials: mean time: %.3f ms, stddev: %.3f ms", n, mean, stddev)); } @Ignore public void testWithOldReadPath() throws IOException { File testFile = writeTestData(COMPLEX_SCHEMA, 500_000, 1985); MessageType readSchema = ParquetSchemaUtil.convert(COMPLEX_SCHEMA, "test"); for (int i = 0; i < 5; i += 1) { // clean up as much memory as possible to avoid a large GC during the timed run System.gc(); try (CloseableIterable<Record> reader = Parquet.read(Files.localInput(testFile)) .project(COMPLEX_SCHEMA) .build()) { long start = System.currentTimeMillis(); long val = 0; long count = 0; for (Record record : reader) { // access something to ensure the compiler doesn't optimize this away val ^= (Long) record.get(0); count += 1; } long end = System.currentTimeMillis(); System.err.println("XOR val: " + val); System.err.println("Old read path: read " + count + " records in " + (end - start) + " ms"); } // clean up as much memory as possible to avoid a large GC during the timed run System.gc(); try (ParquetReader<Record> reader = new ParquetReader<>( Files.localInput(testFile), COMPLEX_SCHEMA, ParquetReadOptions.builder().build(), fileSchema -> ParquetAvroValueReaders.buildReader(COMPLEX_SCHEMA, readSchema), Expressions.alwaysTrue(), true)) { long start = System.currentTimeMillis(); long val = 0; long count = 0; for (Record record : reader) { // access something to ensure the compiler doesn't optimize this away val ^= (Long) record.get(0); count += 1; } long end = System.currentTimeMillis(); System.err.println("XOR val: " + val); System.err.println("New read path: read " + count + " records in " + (end - start) + " ms"); } } } @Test public void testCorrectness() throws IOException { Iterable<Record> records = RandomData.generate(COMPLEX_SCHEMA, 250_000, 34139); File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> writer = Parquet.write(Files.localOutput(testFile)) .schema(COMPLEX_SCHEMA) .build()) { writer.addAll(records); } MessageType readSchema = ParquetSchemaUtil.convert(COMPLEX_SCHEMA, "test"); // verify that the new read path is correct try (ParquetReader<Record> reader = new ParquetReader<>( Files.localInput(testFile), COMPLEX_SCHEMA, ParquetReadOptions.builder().build(), fileSchema -> ParquetAvroValueReaders.buildReader(COMPLEX_SCHEMA, readSchema), Expressions.alwaysTrue(), true)) { int i = 0; Iterator<Record> iter = records.iterator(); for (Record actual : reader) { Record expected = iter.next(); Assert.assertEquals("Record " + i + " should match expected", expected, actual); i += 1; } } } private File writeTestData(Schema schema, int n, int seed) throws IOException { File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> writer = Parquet.write(Files.localOutput(testFile)) .schema(schema) .build()) { writer.addAll(RandomData.generate(schema, n, seed)); } return testFile; } }
2,225
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/TestHelpers.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.google.common.collect.Lists; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericData.Record; import org.apache.orc.storage.serde2.io.DateWritable; import org.apache.spark.sql.Row; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.GenericRow; import org.apache.spark.sql.catalyst.expressions.SpecializedGetters; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.DateTimeUtils; import org.apache.spark.sql.catalyst.util.MapData; import org.apache.spark.sql.types.Decimal; import org.apache.spark.unsafe.types.UTF8String; import org.junit.Assert; import scala.collection.Seq; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.sql.Timestamp; import java.time.Instant; import java.time.LocalDate; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.Collection; import java.util.Date; import java.util.List; import java.util.Map; import java.util.UUID; import static com.netflix.iceberg.spark.SparkSchemaUtil.convert; import static scala.collection.JavaConverters.mapAsJavaMapConverter; import static scala.collection.JavaConverters.seqAsJavaListConverter; public class TestHelpers { public static void assertEqualsSafe(Types.StructType struct, Record rec, Row row) { List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i).type(); Object expectedValue = rec.get(i); Object actualValue = row.get(i); assertEqualsSafe(fieldType, expectedValue, actualValue); } } private static void assertEqualsSafe(Types.ListType list, Collection<?> expected, List actual) { Type elementType = list.elementType(); List<?> expectedElements = Lists.newArrayList(expected); for (int i = 0; i < expectedElements.size(); i += 1) { Object expectedValue = expectedElements.get(i); Object actualValue = actual.get(i); assertEqualsSafe(elementType, expectedValue, actualValue); } } private static void assertEqualsSafe(Types.MapType map, Map<?, ?> expected, Map<?, ?> actual) { Type keyType = map.keyType(); Type valueType = map.valueType(); for (Object expectedKey : expected.keySet()) { Object matchingKey = null; for (Object actualKey : actual.keySet()) { try { assertEqualsSafe(keyType, expectedKey, actualKey); matchingKey = actualKey; } catch (AssertionError e) { // failed } } Assert.assertNotNull("Should have a matching key", matchingKey); assertEqualsSafe(valueType, expected.get(expectedKey), actual.get(matchingKey)); } } private static final OffsetDateTime EPOCH = Instant.ofEpochMilli(0L).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); @SuppressWarnings("unchecked") private static void assertEqualsSafe(Type type, Object expected, Object actual) { if (expected == null && actual == null) { return; } switch (type.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: Assert.assertEquals("Primitive value should be equal to expected", expected, actual); break; case DATE: Assert.assertTrue("Should be an int", expected instanceof Integer); Assert.assertTrue("Should be a Date", actual instanceof Date); int daysFrom1970_01_01 = (Integer) expected; LocalDate date = ChronoUnit.DAYS.addTo(EPOCH_DAY, daysFrom1970_01_01); Assert.assertEquals("ISO-8601 date should be equal", date.toString(), actual.toString()); break; case TIMESTAMP: Assert.assertTrue("Should be a long", expected instanceof Long); Assert.assertTrue("Should be a Timestamp", actual instanceof Timestamp); Timestamp ts = (Timestamp) actual; // milliseconds from nanos has already been added by getTime long tsMicros = (ts.getTime() * 1000) + ((ts.getNanos() / 1000) % 1000); Assert.assertEquals("Timestamp micros should be equal", expected, tsMicros); break; case STRING: Assert.assertTrue("Should be a String", actual instanceof String); Assert.assertEquals("Strings should be equal", expected, actual); break; case UUID: Assert.assertTrue("Should expect a UUID", expected instanceof UUID); Assert.assertTrue("Should be a String", actual instanceof String); Assert.assertEquals("UUID string representation should match", expected.toString(), actual); break; case FIXED: Assert.assertTrue("Should expect a Fixed", expected instanceof GenericData.Fixed); Assert.assertTrue("Should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Bytes should match", ((GenericData.Fixed) expected).bytes(), (byte[]) actual); break; case BINARY: Assert.assertTrue("Should expect a ByteBuffer", expected instanceof ByteBuffer); Assert.assertTrue("Should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Bytes should match", ((ByteBuffer) expected).array(), (byte[]) actual); break; case DECIMAL: Assert.assertTrue("Should expect a BigDecimal", expected instanceof BigDecimal); Assert.assertTrue("Should be a BigDecimal", actual instanceof BigDecimal); Assert.assertEquals("BigDecimals should be equal", expected, actual); break; case STRUCT: Assert.assertTrue("Should expect a Record", expected instanceof Record); Assert.assertTrue("Should be a Row", actual instanceof Row); assertEqualsSafe(type.asNestedType().asStructType(), (Record) expected, (Row) actual); break; case LIST: Assert.assertTrue("Should expect a Collection", expected instanceof Collection); Assert.assertTrue("Should be a Seq", actual instanceof Seq); List<?> asList = seqAsJavaListConverter((Seq<?>) actual).asJava(); assertEqualsSafe(type.asNestedType().asListType(), (Collection) expected, asList); break; case MAP: Assert.assertTrue("Should expect a Collection", expected instanceof Map); Assert.assertTrue("Should be a Map", actual instanceof scala.collection.Map); Map<String, ?> asMap = mapAsJavaMapConverter( (scala.collection.Map<String, ?>) actual).asJava(); assertEqualsSafe(type.asNestedType().asMapType(), (Map<String, ?>) expected, asMap); break; case TIME: default: throw new IllegalArgumentException("Not a supported type: " + type); } } public static void assertEqualsUnsafe(Types.StructType struct, Record rec, InternalRow row) { List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i).type(); Object expectedValue = rec.get(i); Object actualValue = row.get(i, convert(fieldType)); assertEqualsUnsafe(fieldType, expectedValue, actualValue); } } private static void assertEqualsUnsafe(Types.ListType list, Collection<?> expected, ArrayData actual) { Type elementType = list.elementType(); List<?> expectedElements = Lists.newArrayList(expected); for (int i = 0; i < expectedElements.size(); i += 1) { Object expectedValue = expectedElements.get(i); Object actualValue = actual.get(i, convert(elementType)); assertEqualsUnsafe(elementType, expectedValue, actualValue); } } private static void assertEqualsUnsafe(Types.MapType map, Map<?, ?> expected, MapData actual) { Type keyType = map.keyType(); Type valueType = map.valueType(); List<Map.Entry<?, ?>> expectedElements = Lists.newArrayList(expected.entrySet()); ArrayData actualKeys = actual.keyArray(); ArrayData actualValues = actual.valueArray(); for (int i = 0; i < expectedElements.size(); i += 1) { Map.Entry<?, ?> expectedPair = expectedElements.get(i); Object actualKey = actualKeys.get(i, convert(keyType)); Object actualValue = actualValues.get(i, convert(keyType)); assertEqualsUnsafe(keyType, expectedPair.getKey(), actualKey); assertEqualsUnsafe(valueType, expectedPair.getValue(), actualValue); } } private static void assertEqualsUnsafe(Type type, Object expected, Object actual) { if (expected == null && actual == null) { return; } switch (type.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case DATE: case TIMESTAMP: Assert.assertEquals("Primitive value should be equal to expected", expected, actual); break; case STRING: Assert.assertTrue("Should be a UTF8String", actual instanceof UTF8String); Assert.assertEquals("Strings should be equal", expected, actual.toString()); break; case UUID: Assert.assertTrue("Should expect a UUID", expected instanceof UUID); Assert.assertTrue("Should be a UTF8String", actual instanceof UTF8String); Assert.assertEquals("UUID string representation should match", expected.toString(), actual.toString()); break; case FIXED: Assert.assertTrue("Should expect a Fixed", expected instanceof GenericData.Fixed); Assert.assertTrue("Should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Bytes should match", ((GenericData.Fixed) expected).bytes(), (byte[]) actual); break; case BINARY: Assert.assertTrue("Should expect a ByteBuffer", expected instanceof ByteBuffer); Assert.assertTrue("Should be a byte[]", actual instanceof byte[]); Assert.assertArrayEquals("Bytes should match", ((ByteBuffer) expected).array(), (byte[]) actual); break; case DECIMAL: Assert.assertTrue("Should expect a BigDecimal", expected instanceof BigDecimal); Assert.assertTrue("Should be a Decimal", actual instanceof Decimal); Assert.assertEquals("BigDecimals should be equal", expected, ((Decimal) actual).toJavaBigDecimal()); break; case STRUCT: Assert.assertTrue("Should expect a Record", expected instanceof Record); Assert.assertTrue("Should be an InternalRow", actual instanceof InternalRow); assertEqualsUnsafe(type.asNestedType().asStructType(), (Record) expected, (InternalRow) actual); break; case LIST: Assert.assertTrue("Should expect a Collection", expected instanceof Collection); Assert.assertTrue("Should be an ArrayData", actual instanceof ArrayData); assertEqualsUnsafe(type.asNestedType().asListType(), (Collection) expected, (ArrayData) actual); break; case MAP: Assert.assertTrue("Should expect a Map", expected instanceof Map); Assert.assertTrue("Should be an ArrayBasedMapData", actual instanceof MapData); assertEqualsUnsafe(type.asNestedType().asMapType(), (Map) expected, (MapData) actual); break; case TIME: default: throw new IllegalArgumentException("Not a supported type: " + type); } } /** * Check that the given InternalRow is equivalent to the Row. * @param prefix context for error messages * @param type the type of the row * @param expected the expected value of the row * @param actual the actual value of the row */ public static void assertEquals(String prefix, Types.StructType type, InternalRow expected, Row actual) { if (expected == null || actual == null) { Assert.assertEquals(prefix, expected, actual); } else { List<Types.NestedField> fields = type.fields(); for (int c = 0; c < fields.size(); ++c) { String fieldName = fields.get(c).name(); Type childType = fields.get(c).type(); switch (childType.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case STRING: case DECIMAL: case DATE: case TIMESTAMP: Assert.assertEquals(prefix + "." + fieldName + " - " + childType, getValue(expected, c, childType), getPrimitiveValue(actual, c, childType)); break; case UUID: case FIXED: case BINARY: assertEqualBytes(prefix + "." + fieldName, (byte[]) getValue(expected, c, childType), (byte[]) actual.get(c)); break; case STRUCT: { Types.StructType st = (Types.StructType) childType; assertEquals(prefix + "." + fieldName, st, expected.getStruct(c, st.fields().size()), actual.getStruct(c)); break; } case LIST: assertEqualsLists(prefix + "." + fieldName, childType.asListType(), expected.getArray(c), toList((Seq<?>) actual.get(c))); break; case MAP: assertEqualsMaps(prefix + "." + fieldName, childType.asMapType(), expected.getMap(c), toJavaMap((scala.collection.Map<?, ?>) actual.getMap(c))); break; default: throw new IllegalArgumentException("Unhandled type " + childType); } } } } private static void assertEqualsLists(String prefix, Types.ListType type, ArrayData expected, List actual) { if (expected == null || actual == null) { Assert.assertEquals(prefix, expected, actual); } else { Assert.assertEquals(prefix + " length", expected.numElements(), actual.size()); Type childType = type.elementType(); for (int e = 0; e < expected.numElements(); ++e) { switch (childType.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case STRING: case DECIMAL: case DATE: case TIMESTAMP: Assert.assertEquals(prefix + ".elem " + e + " - " + childType, getValue(expected, e, childType), actual.get(e)); break; case UUID: case FIXED: case BINARY: assertEqualBytes(prefix + ".elem " + e, (byte[]) getValue(expected, e, childType), (byte[]) actual.get(e)); break; case STRUCT: { Types.StructType st = (Types.StructType) childType; assertEquals(prefix + ".elem " + e, st, expected.getStruct(e, st.fields().size()), (Row) actual.get(e)); break; } case LIST: assertEqualsLists(prefix + ".elem " + e, childType.asListType(), expected.getArray(e), toList((Seq<?>) actual.get(e))); break; case MAP: assertEqualsMaps(prefix + ".elem " + e, childType.asMapType(), expected.getMap(e), toJavaMap((scala.collection.Map<?, ?>) actual.get(e))); break; default: throw new IllegalArgumentException("Unhandled type " + childType); } } } } private static void assertEqualsMaps(String prefix, Types.MapType type, MapData expected, Map<?, ?> actual) { if (expected == null || actual == null) { Assert.assertEquals(prefix, expected, actual); } else { Type keyType = type.keyType(); Type valueType = type.valueType(); ArrayData expectedKeyArray = expected.keyArray(); ArrayData expectedValueArray = expected.valueArray(); Assert.assertEquals(prefix + " length", expected.numElements(), actual.size()); for (int e = 0; e < expected.numElements(); ++e) { Object expectedKey = getValue(expectedKeyArray, e, keyType); Object actualValue = actual.get(expectedKey); if (actualValue == null) { Assert.assertEquals(prefix + ".key=" + expectedKey + " has null", true, expected.valueArray().isNullAt(e)); } else { switch (valueType.typeId()) { case BOOLEAN: case INTEGER: case LONG: case FLOAT: case DOUBLE: case STRING: case DECIMAL: case DATE: case TIMESTAMP: Assert.assertEquals(prefix + ".key=" + expectedKey + " - " + valueType, getValue(expectedValueArray, e, valueType), actual.get(expectedKey)); break; case UUID: case FIXED: case BINARY: assertEqualBytes(prefix + ".key=" + expectedKey, (byte[]) getValue(expectedValueArray, e, valueType), (byte[]) actual.get(expectedKey)); break; case STRUCT: { Types.StructType st = (Types.StructType) valueType; assertEquals(prefix + ".key=" + expectedKey, st, expectedValueArray.getStruct(e, st.fields().size()), (Row) actual.get(expectedKey)); break; } case LIST: assertEqualsLists(prefix + ".key=" + expectedKey, valueType.asListType(), expectedValueArray.getArray(e), toList((Seq<?>) actual.get(expectedKey))); break; case MAP: assertEqualsMaps(prefix + ".key=" + expectedKey, valueType.asMapType(), expectedValueArray.getMap(e), toJavaMap((scala.collection.Map<?, ?>) actual.get(expectedKey))); break; default: throw new IllegalArgumentException("Unhandled type " + valueType); } } } } } private static Object getValue(SpecializedGetters container, int ord, Type type) { if (container.isNullAt(ord)) { return null; } switch (type.typeId()) { case BOOLEAN: return container.getBoolean(ord); case INTEGER: return container.getInt(ord); case LONG: return container.getLong(ord); case FLOAT: return container.getFloat(ord); case DOUBLE: return container.getDouble(ord); case STRING: return container.getUTF8String(ord).toString(); case BINARY: case FIXED: case UUID: return container.getBinary(ord); case DATE: return new DateWritable(container.getInt(ord)).get(); case TIMESTAMP: return DateTimeUtils.toJavaTimestamp(container.getLong(ord)); case DECIMAL: { Types.DecimalType dt = (Types.DecimalType) type; return container.getDecimal(ord, dt.precision(), dt.scale()).toJavaBigDecimal(); } case STRUCT: Types.StructType struct = type.asStructType(); InternalRow internalRow = container.getStruct(ord, struct.fields().size()); Object[] data = new Object[struct.fields().size()]; for (int i = 0; i < data.length; i += 1) { if (internalRow.isNullAt(i)) { data[i] = null; } else { data[i] = getValue(internalRow, i, struct.fields().get(i).type()); } } return new GenericRow(data); default: throw new IllegalArgumentException("Unhandled type " + type); } } private static Object getPrimitiveValue(Row row, int ord, Type type) { if (row.isNullAt(ord)) { return null; } switch (type.typeId()) { case BOOLEAN: return row.getBoolean(ord); case INTEGER: return row.getInt(ord); case LONG: return row.getLong(ord); case FLOAT: return row.getFloat(ord); case DOUBLE: return row.getDouble(ord); case STRING: return row.getString(ord); case BINARY: case FIXED: case UUID: return row.get(ord); case DATE: return row.getDate(ord); case TIMESTAMP: return row.getTimestamp(ord); case DECIMAL: return row.getDecimal(ord); default: throw new IllegalArgumentException("Unhandled type " + type); } } private static <K, V> Map<K, V> toJavaMap(scala.collection.Map<K, V> map) { return map == null ? null : mapAsJavaMapConverter(map).asJava(); } private static List toList(Seq<?> val) { return val == null ? null : seqAsJavaListConverter(val).asJava(); } private static void assertEqualBytes(String context, byte[] expected, byte[] actual) { if (expected == null || actual == null) { Assert.assertEquals(context, expected, actual); } else { Assert.assertArrayEquals(context, expected, actual); } } }
2,226
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/TestParquetAvroWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Expressions; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.parquet.ParquetAvroValueReaders; import com.netflix.iceberg.parquet.ParquetAvroWriter; import com.netflix.iceberg.parquet.ParquetReader; import com.netflix.iceberg.parquet.ParquetSchemaUtil; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData.Record; import org.apache.parquet.ParquetReadOptions; import org.apache.parquet.schema.MessageType; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.File; import java.io.IOException; import java.util.Iterator; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public class TestParquetAvroWriter { @Rule public TemporaryFolder temp = new TemporaryFolder(); private static final Schema COMPLEX_SCHEMA = new Schema( required(1, "roots", Types.LongType.get()), optional(3, "lime", Types.ListType.ofRequired(4, Types.DoubleType.get())), required(5, "strict", Types.StructType.of( required(9, "tangerine", Types.StringType.get()), optional(6, "hopeful", Types.StructType.of( required(7, "steel", Types.FloatType.get()), required(8, "lantern", Types.DateType.get()) )), optional(10, "vehement", Types.LongType.get()) )), optional(11, "metamorphosis", Types.MapType.ofRequired(12, 13, Types.StringType.get(), Types.TimestampType.withoutZone())), required(14, "winter", Types.ListType.ofOptional(15, Types.StructType.of( optional(16, "beet", Types.DoubleType.get()), required(17, "stamp", Types.TimeType.get()), optional(18, "wheeze", Types.StringType.get()) ))), optional(19, "renovate", Types.MapType.ofRequired(20, 21, Types.StringType.get(), Types.StructType.of( optional(22, "jumpy", Types.DoubleType.get()), required(23, "koala", Types.TimeType.get()) ))), optional(2, "slide", Types.StringType.get()) ); @Test public void testCorrectness() throws IOException { Iterable<Record> records = RandomData.generate(COMPLEX_SCHEMA, 250_000, 34139); File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> writer = Parquet.write(Files.localOutput(testFile)) .schema(COMPLEX_SCHEMA) .createWriterFunc(ParquetAvroWriter::buildWriter) .build()) { writer.addAll(records); } MessageType readSchema = ParquetSchemaUtil.convert(COMPLEX_SCHEMA, "test"); // verify that the new read path is correct try (ParquetReader<Record> reader = new ParquetReader<>( Files.localInput(testFile), COMPLEX_SCHEMA, ParquetReadOptions.builder().build(), fileSchema -> ParquetAvroValueReaders.buildReader(COMPLEX_SCHEMA, readSchema), Expressions.alwaysTrue(), false)) { int i = 0; Iterator<Record> iter = records.iterator(); for (Record actual : reader) { Record expected = iter.next(); Assert.assertEquals("Record " + i + " should match expected", expected, actual); i += 1; } } } }
2,227
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/RandomData.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.avro.generic.GenericData; import org.apache.avro.generic.GenericData.Record; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.GenericInternalRow; import org.apache.spark.sql.catalyst.util.ArrayBasedMapData; import org.apache.spark.sql.catalyst.util.GenericArrayData; import org.apache.spark.sql.types.Decimal; import org.apache.spark.unsafe.types.UTF8String; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; import java.util.Random; import java.util.Set; import java.util.UUID; import java.util.function.Supplier; public class RandomData { public static List<Record> generateList(Schema schema, int numRecords, long seed) { RandomDataGenerator generator = new RandomDataGenerator(schema, seed); List<Record> records = Lists.newArrayListWithExpectedSize(numRecords); for (int i = 0; i < numRecords; i += 1) { records.add((Record) TypeUtil.visit(schema, generator)); } return records; } public static Iterator<InternalRow> generateSpark(Schema schema, int rows, long seed) { return new Iterator<InternalRow>() { private int rowsLeft = rows; private final SparkRandomDataGenerator generator = new SparkRandomDataGenerator(seed); @Override public boolean hasNext() { return rowsLeft > 0; } @Override public InternalRow next() { rowsLeft -= 1; return (InternalRow) TypeUtil.visit(schema, generator); } }; } public static Iterable<Record> generate(Schema schema, int numRecords, long seed) { return () -> new Iterator<Record>() { private RandomDataGenerator generator = new RandomDataGenerator(schema, seed); private int count = 0; @Override public boolean hasNext() { return count < numRecords; } @Override public Record next() { if (count >= numRecords) { throw new NoSuchElementException(); } count += 1; return (Record) TypeUtil.visit(schema, generator); } }; } private static class RandomDataGenerator extends TypeUtil.CustomOrderSchemaVisitor<Object> { private final Map<Type, org.apache.avro.Schema> typeToSchema; private final Random random; private RandomDataGenerator(Schema schema, long seed) { this.typeToSchema = AvroSchemaUtil.convertTypes(schema.asStruct(), "test"); this.random = new Random(seed); } @Override public Record schema(Schema schema, Supplier<Object> structResult) { return (Record) structResult.get(); } @Override public Record struct(Types.StructType struct, Iterable<Object> fieldResults) { Record rec = new Record(typeToSchema.get(struct)); List<Object> values = Lists.newArrayList(fieldResults); for (int i = 0; i < values.size(); i += 1) { rec.put(i, values.get(i)); } return rec; } @Override public Object field(Types.NestedField field, Supplier<Object> fieldResult) { // return null 5% of the time when the value is optional if (field.isOptional() && random.nextInt(20) == 1) { return null; } return fieldResult.get(); } @Override public Object list(Types.ListType list, Supplier<Object> elementResult) { int numElements = random.nextInt(20); List<Object> result = Lists.newArrayListWithExpectedSize(numElements); for (int i = 0; i < numElements; i += 1) { // return null 5% of the time when the value is optional if (list.isElementOptional() && random.nextInt(20) == 1) { result.add(null); } else { result.add(elementResult.get()); } } return result; } @Override public Object map(Types.MapType map, Supplier<Object> keyResult, Supplier<Object> valueResult) { int numEntries = random.nextInt(20); Map<Object, Object> result = Maps.newLinkedHashMap(); Set<Object> keySet = Sets.newHashSet(); for (int i = 0; i < numEntries; i += 1) { Object key = keyResult.get(); // ensure no collisions while (keySet.contains(key)) { key = keyResult.get(); } keySet.add(key); // return null 5% of the time when the value is optional if (map.isValueOptional() && random.nextInt(20) == 1) { result.put(key, null); } else { result.put(key, valueResult.get()); } } return result; } @Override public Object primitive(Type.PrimitiveType primitive) { Object result = generatePrimitive(primitive, random); // For the primitives that Avro needs a different type than Spark, fix // them here. switch (primitive.typeId()) { case STRING: return ((UTF8String) result).toString(); case FIXED: return new GenericData.Fixed(typeToSchema.get(primitive), (byte[]) result); case BINARY: return ByteBuffer.wrap((byte[]) result); case UUID: return UUID.nameUUIDFromBytes((byte[]) result); case DECIMAL: return ((Decimal) result).toJavaBigDecimal(); default: return result; } } } private static class SparkRandomDataGenerator extends TypeUtil.CustomOrderSchemaVisitor<Object> { private final Random random; private SparkRandomDataGenerator(long seed) { this.random = new Random(seed); } @Override public InternalRow schema(Schema schema, Supplier<Object> structResult) { return (InternalRow) structResult.get(); } @Override public InternalRow struct(Types.StructType struct, Iterable<Object> fieldResults) { List<Object> values = Lists.newArrayList(fieldResults); GenericInternalRow row = new GenericInternalRow(values.size()); for (int i = 0; i < values.size(); i += 1) { row.update(i, values.get(i)); } return row; } @Override public Object field(Types.NestedField field, Supplier<Object> fieldResult) { // return null 5% of the time when the value is optional if (field.isOptional() && random.nextInt(20) == 1) { return null; } return fieldResult.get(); } @Override public GenericArrayData list(Types.ListType list, Supplier<Object> elementResult) { int numElements = random.nextInt(20); Object[] arr = new Object[numElements]; GenericArrayData result = new GenericArrayData(arr); for (int i = 0; i < numElements; i += 1) { // return null 5% of the time when the value is optional if (list.isElementOptional() && random.nextInt(20) == 1) { arr[i] = null; } else { arr[i] = elementResult.get(); } } return result; } @Override public Object map(Types.MapType map, Supplier<Object> keyResult, Supplier<Object> valueResult) { int numEntries = random.nextInt(20); Object[] keysArr = new Object[numEntries]; Object[] valuesArr = new Object[numEntries]; GenericArrayData keys = new GenericArrayData(keysArr); GenericArrayData values = new GenericArrayData(valuesArr); ArrayBasedMapData result = new ArrayBasedMapData(keys, values); Set<Object> keySet = Sets.newHashSet(); for (int i = 0; i < numEntries; i += 1) { Object key = keyResult.get(); // ensure no collisions while (keySet.contains(key)) { key = keyResult.get(); } keySet.add(key); keysArr[i] = key; // return null 5% of the time when the value is optional if (map.isValueOptional() && random.nextInt(20) == 1) { valuesArr[i] = null; } else { valuesArr[i] = valueResult.get(); } } return result; } @Override public Object primitive(Type.PrimitiveType primitive) { return generatePrimitive(primitive, random); } } private static Object generatePrimitive(Type.PrimitiveType primitive, Random random) { int choice = random.nextInt(20); switch (primitive.typeId()) { case BOOLEAN: return choice < 10; case INTEGER: switch (choice) { case 1: return Integer.MIN_VALUE; case 2: return Integer.MAX_VALUE; case 3: return 0; default: return random.nextInt(); } case LONG: switch (choice) { case 1: return Long.MIN_VALUE; case 2: return Long.MAX_VALUE; case 3: return 0L; default: return random.nextLong(); } case FLOAT: switch (choice) { case 1: return Float.MIN_VALUE; case 2: return -Float.MIN_VALUE; case 3: return Float.MAX_VALUE; case 4: return -Float.MAX_VALUE; case 5: return Float.NEGATIVE_INFINITY; case 6: return Float.POSITIVE_INFINITY; case 7: return 0.0F; case 8: return Float.NaN; default: return random.nextFloat(); } case DOUBLE: switch (choice) { case 1: return Double.MIN_VALUE; case 2: return -Double.MIN_VALUE; case 3: return Double.MAX_VALUE; case 4: return -Double.MAX_VALUE; case 5: return Double.NEGATIVE_INFINITY; case 6: return Double.POSITIVE_INFINITY; case 7: return 0.0D; case 8: return Double.NaN; default: return random.nextDouble(); } case DATE: // this will include negative values (dates before 1970-01-01) return random.nextInt() % ABOUT_380_YEARS_IN_DAYS; case TIME: return (random.nextLong() & Integer.MAX_VALUE) % ONE_DAY_IN_MICROS; case TIMESTAMP: return random.nextLong() % FIFTY_YEARS_IN_MICROS; case STRING: return randomString(random); case UUID: byte[] uuidBytes = new byte[16]; random.nextBytes(uuidBytes); // this will hash the uuidBytes return uuidBytes; case FIXED: byte[] fixed = new byte[((Types.FixedType) primitive).length()]; random.nextBytes(fixed); return fixed; case BINARY: byte[] binary = new byte[random.nextInt(50)]; random.nextBytes(binary); return binary; case DECIMAL: Types.DecimalType type = (Types.DecimalType) primitive; BigInteger unscaled = randomUnscaled(type.precision(), random); return Decimal.apply(new BigDecimal(unscaled, type.scale())); default: throw new IllegalArgumentException( "Cannot generate random value for unknown type: " + primitive); } } private static final long FIFTY_YEARS_IN_MICROS = (50L * (365 * 3 + 366) * 24 * 60 * 60 * 1_000_000) / 4; private static final int ABOUT_380_YEARS_IN_DAYS = 380 * 365; private static final long ONE_DAY_IN_MICROS = 24 * 60 * 60 * 1_000_000L; private static final String CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.!?"; private static UTF8String randomString(Random random) { int length = random.nextInt(50); byte[] buffer = new byte[length]; for (int i = 0; i < length; i += 1) { buffer[i] = (byte) CHARS.charAt(random.nextInt(CHARS.length())); } return UTF8String.fromBytes(buffer); } private static final String DIGITS = "0123456789"; private static BigInteger randomUnscaled(int precision, Random random) { int length = random.nextInt(precision); if (length == 0) { return BigInteger.ZERO; } StringBuilder sb = new StringBuilder(); for (int i = 0; i < length; i += 1) { sb.append(DIGITS.charAt(random.nextInt(DIGITS.length()))); } return new BigInteger(sb.toString()); } }
2,228
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/TestSparkAvroReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.google.common.collect.Lists; import com.netflix.iceberg.Files; import com.netflix.iceberg.Schema; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.avro.AvroIterable; import com.netflix.iceberg.io.FileAppender; import org.apache.avro.generic.GenericData.Record; import org.apache.spark.sql.catalyst.InternalRow; import org.junit.Assert; import java.io.File; import java.io.IOException; import java.util.List; import static com.netflix.iceberg.spark.data.TestHelpers.assertEqualsUnsafe; public class TestSparkAvroReader extends AvroDataTest { protected void writeAndValidate(Schema schema) throws IOException { List<Record> expected = RandomData.generateList(schema, 100, 0L); File testFile = temp.newFile(); Assert.assertTrue("Delete should succeed", testFile.delete()); try (FileAppender<Record> writer = Avro.write(Files.localOutput(testFile)) .schema(schema) .named("test") .build()) { for (Record rec : expected) { writer.add(rec); } } List<InternalRow> rows; try (AvroIterable<InternalRow> reader = Avro.read(Files.localInput(testFile)) .createReaderFunc(SparkAvroReader::new) .project(schema) .build()) { rows = Lists.newArrayList(reader); } for (int i = 0; i < expected.size(); i += 1) { assertEqualsUnsafe(schema.asStruct(), expected.get(i), rows.get(i)); } } }
2,229
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/AvroDataTest.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Types; import com.netflix.iceberg.types.Types.ListType; import com.netflix.iceberg.types.Types.LongType; import com.netflix.iceberg.types.Types.MapType; import com.netflix.iceberg.types.Types.StructType; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; import java.io.IOException; import static com.netflix.iceberg.types.Types.NestedField.optional; import static com.netflix.iceberg.types.Types.NestedField.required; public abstract class AvroDataTest { protected abstract void writeAndValidate(Schema schema) throws IOException; protected static final StructType SUPPORTED_PRIMITIVES = StructType.of( required(100, "id", LongType.get()), optional(101, "data", Types.StringType.get()), required(102, "b", Types.BooleanType.get()), optional(103, "i", Types.IntegerType.get()), required(104, "l", LongType.get()), optional(105, "f", Types.FloatType.get()), required(106, "d", Types.DoubleType.get()), optional(107, "date", Types.DateType.get()), required(108, "ts", Types.TimestampType.withZone()), required(110, "s", Types.StringType.get()), //required(111, "uuid", Types.UUIDType.get()), //required(112, "fixed", Types.FixedType.ofLength(7)), optional(113, "bytes", Types.BinaryType.get()), required(114, "dec_9_0", Types.DecimalType.of(9, 0)), required(115, "dec_11_2", Types.DecimalType.of(11, 2)), required(116, "dec_38_10", Types.DecimalType.of(38, 10)) // spark's maximum precision ); @Rule public TemporaryFolder temp = new TemporaryFolder(); @Test public void testSimpleStruct() throws IOException { writeAndValidate(new Schema(SUPPORTED_PRIMITIVES.fields())); } @Test public void testArray() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", ListType.ofOptional(2, Types.StringType.get()))); writeAndValidate(schema); } @Test public void testArrayOfStructs() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", ListType.ofOptional(2, SUPPORTED_PRIMITIVES))); writeAndValidate(schema); } @Test public void testMap() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, Types.StringType.get(), Types.StringType.get()))); writeAndValidate(schema); } @Test public void testNumericMapKey() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, Types.LongType.get(), Types.StringType.get()))); writeAndValidate(schema); } @Test public void testComplexMapKey() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, Types.StructType.of( required(4, "i", Types.IntegerType.get()), optional(5, "s", Types.StringType.get())), Types.StringType.get()))); writeAndValidate(schema); } @Test public void testMapOfStructs() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "data", MapType.ofOptional(2, 3, Types.StringType.get(), SUPPORTED_PRIMITIVES))); writeAndValidate(schema); } @Test public void testMixedTypes() throws IOException { Schema schema = new Schema( required(0, "id", LongType.get()), optional(1, "list_of_maps", ListType.ofOptional(2, MapType.ofOptional(3, 4, Types.StringType.get(), SUPPORTED_PRIMITIVES))), optional(5, "map_of_lists", MapType.ofOptional(6, 7, Types.StringType.get(), ListType.ofOptional(8, SUPPORTED_PRIMITIVES))), required(9, "list_of_lists", ListType.ofOptional(10, ListType.ofOptional(11, SUPPORTED_PRIMITIVES))), required(12, "map_of_maps", MapType.ofOptional(13, 14, Types.StringType.get(), MapType.ofOptional(15, 16, Types.StringType.get(), SUPPORTED_PRIMITIVES))), required(17, "list_of_struct_of_nested_types", ListType.ofOptional(19, StructType.of( Types.NestedField.required(20, "m1", MapType.ofOptional(21, 22, Types.StringType.get(), SUPPORTED_PRIMITIVES)), Types.NestedField.optional(23, "l1", ListType.ofRequired(24, SUPPORTED_PRIMITIVES)), Types.NestedField.required(25, "l2", ListType.ofRequired(26, SUPPORTED_PRIMITIVES)), Types.NestedField.optional(27, "m2", MapType.ofOptional(28, 29, Types.StringType.get(), SUPPORTED_PRIMITIVES)) ))) ); writeAndValidate(schema); } }
2,230
0
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/test/java/com/netflix/iceberg/spark/data/TestSparkDateTimes.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.netflix.iceberg.expressions.Literal; import com.netflix.iceberg.types.Types; import org.apache.spark.sql.catalyst.util.DateTimeUtils; import org.junit.Assert; import org.junit.Test; import java.util.TimeZone; public class TestSparkDateTimes { @Test public void testSparkDate() { // checkSparkDate("1582-10-14"); // -141428 checkSparkDate("1582-10-15"); // first day of the gregorian calendar checkSparkDate("1601-08-12"); checkSparkDate("1801-07-04"); checkSparkDate("1901-08-12"); checkSparkDate("1969-12-31"); checkSparkDate("1970-01-01"); checkSparkDate("2017-12-25"); checkSparkDate("2043-08-11"); checkSparkDate("2111-05-03"); checkSparkDate("2224-02-29"); checkSparkDate("3224-10-05"); } public void checkSparkDate(String dateString) { Literal<Integer> date = Literal.of(dateString).to(Types.DateType.get()); String sparkDate = DateTimeUtils.toJavaDate(date.value()).toString(); System.err.println(dateString + ": " + date.value()); Assert.assertEquals("Should be the same date (" + date.value() + ")", dateString, sparkDate); } @Test public void testSparkTimestamp() { TimeZone currentTz = TimeZone.getDefault(); try { TimeZone.setDefault(TimeZone.getTimeZone("UTC")); checkSparkTimestamp("1582-10-15T15:51:08.440219+00:00", "1582-10-15 15:51:08.440219"); checkSparkTimestamp("1970-01-01T00:00:00.000000+00:00", "1970-01-01 00:00:00"); checkSparkTimestamp("2043-08-11T12:30:01.000001+00:00", "2043-08-11 12:30:01.000001"); } finally { TimeZone.setDefault(currentTz); } } public void checkSparkTimestamp(String timestampString, String sparkRepr) { Literal<Long> ts = Literal.of(timestampString).to(Types.TimestampType.withZone()); String sparkTimestamp = DateTimeUtils.timestampToString(ts.value()); System.err.println(timestampString + ": " + ts.value()); Assert.assertEquals("Should be the same timestamp (" + ts.value() + ")", sparkRepr, sparkTimestamp); } }
2,231
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/SparkSchemaUtil.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Binder; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.spark.sql.AnalysisException; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.catalog.Column; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.StructType; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; import static com.netflix.iceberg.spark.SparkTypeVisitor.visit; import static com.netflix.iceberg.types.TypeUtil.visit; /** * Helper methods for working with Spark/Hive metadata. */ public class SparkSchemaUtil { private SparkSchemaUtil() { } /** * Returns a {@link Schema} for the given table with fresh field ids. * <p> * This creates a Schema for an existing table by looking up the table's schema with Spark and * converting that schema. Spark/Hive partition columns are included in the schema. * * @param spark a Spark session * @param name a table name and (optional) database * @return a Schema for the table, if found */ public static Schema schemaForTable(SparkSession spark, String name) { StructType sparkType = spark.table(name).schema(); Type converted = visit(sparkType, new SparkTypeToType(sparkType)); return new Schema(converted.asNestedType().asStructType().fields()); } /** * Returns a {@link PartitionSpec} for the given table. * <p> * This creates a partition spec for an existing table by looking up the table's schema and * creating a spec with identity partitions for each partition column. * * @param spark a Spark session * @param name a table name and (optional) database * @return a PartitionSpec for the table, if found * @throws AnalysisException if thrown by the Spark catalog */ public static PartitionSpec specForTable(SparkSession spark, String name) throws AnalysisException { List<String> parts = Lists.newArrayList(Splitter.on('.').limit(2).split(name)); String db = parts.size() == 1 ? "default" : parts.get(0); String table = parts.get(parts.size() == 1 ? 0 : 1); return identitySpec( schemaForTable(spark, name), spark.catalog().listColumns(db, table).collectAsList()); } /** * Convert a {@link Schema} to a {@link DataType Spark type}. * * @param schema a Schema * @return the equivalent Spark type * @throws IllegalArgumentException if the type cannot be converted to Spark */ public static StructType convert(Schema schema) { return (StructType) visit(schema, new TypeToSparkType()); } /** * Convert a {@link Type} to a {@link DataType Spark type}. * * @param type a Type * @return the equivalent Spark type * @throws IllegalArgumentException if the type cannot be converted to Spark */ public static DataType convert(Type type) { return visit(type, new TypeToSparkType()); } /** * Convert a Spark {@link StructType struct} to a {@link Schema} with new field ids. * <p> * This conversion assigns fresh ids. * <p> * Some data types are represented as the same Spark type. These are converted to a default type. * <p> * To convert using a reference schema for field ids and ambiguous types, use * {@link #convert(Schema, StructType)}. * * @param sparkType a Spark StructType * @return the equivalent Schema * @throws IllegalArgumentException if the type cannot be converted */ public static Schema convert(StructType sparkType) { Type converted = visit(sparkType, new SparkTypeToType(sparkType)); return new Schema(converted.asNestedType().asStructType().fields()); } /** * Convert a Spark {@link DataType struct} to a {@link Type} with new field ids. * <p> * This conversion assigns fresh ids. * <p> * Some data types are represented as the same Spark type. These are converted to a default type. * <p> * To convert using a reference schema for field ids and ambiguous types, use * {@link #convert(Schema, StructType)}. * * @param sparkType a Spark DataType * @return the equivalent Type * @throws IllegalArgumentException if the type cannot be converted */ public static Type convert(DataType sparkType) { return visit(sparkType, new SparkTypeToType()); } /** * Convert a Spark {@link StructType struct} to a {@link Schema} based on the given schema. * <p> * This conversion does not assign new ids; it uses ids from the base schema. * <p> * Data types, field order, and nullability will match the spark type. This conversion may return * a schema that is not compatible with base schema. * * @param baseSchema a Schema on which conversion is based * @param sparkType a Spark StructType * @return the equivalent Schema * @throws IllegalArgumentException if the type cannot be converted or there are missing ids */ public static Schema convert(Schema baseSchema, StructType sparkType) { // convert to a type with fresh ids Types.StructType struct = visit(sparkType, new SparkTypeToType(sparkType)).asStructType(); // reassign ids to match the base schema Schema schema = TypeUtil.reassignIds(new Schema(struct.fields()), baseSchema); // fix types that can't be represented in Spark (UUID and Fixed) return FixupTypes.fixup(schema, baseSchema); } /** * Prune columns from a {@link Schema} using a {@link StructType Spark type} projection. * <p> * This requires that the Spark type is a projection of the Schema. Nullability and types must * match. * * @param schema a Schema * @param requestedType a projection of the Spark representation of the Schema * @return a Schema corresponding to the Spark projection * @throws IllegalArgumentException if the Spark type does not match the Schema */ public static Schema prune(Schema schema, StructType requestedType) { return new Schema(visit(schema, new PruneColumnsWithoutReordering(requestedType, ImmutableSet.of())) .asNestedType() .asStructType() .fields()); } /** * Prune columns from a {@link Schema} using a {@link StructType Spark type} projection. * <p> * This requires that the Spark type is a projection of the Schema. Nullability and types must * match. * <p> * The filters list of {@link Expression} is used to ensure that columns referenced by filters * are projected. * * @param schema a Schema * @param requestedType a projection of the Spark representation of the Schema * @param filters a list of filters * @return a Schema corresponding to the Spark projection * @throws IllegalArgumentException if the Spark type does not match the Schema */ public static Schema prune(Schema schema, StructType requestedType, List<Expression> filters) { Set<Integer> filterRefs = Binder.boundReferences(schema.asStruct(), filters); return new Schema(visit(schema, new PruneColumnsWithoutReordering(requestedType, filterRefs)) .asNestedType() .asStructType() .fields()); } /** * Prune columns from a {@link Schema} using a {@link StructType Spark type} projection. * <p> * This requires that the Spark type is a projection of the Schema. Nullability and types must * match. * <p> * The filters list of {@link Expression} is used to ensure that columns referenced by filters * are projected. * * @param schema a Schema * @param requestedType a projection of the Spark representation of the Schema * @param filter a filters * @return a Schema corresponding to the Spark projection * @throws IllegalArgumentException if the Spark type does not match the Schema */ public static Schema prune(Schema schema, StructType requestedType, Expression filter) { Set<Integer> filterRefs = Binder.boundReferences(schema.asStruct(), Collections.singletonList(filter)); return new Schema(visit(schema, new PruneColumnsWithoutReordering(requestedType, filterRefs)) .asNestedType() .asStructType() .fields()); } private static PartitionSpec identitySpec(Schema schema, Collection<Column> columns) { List<String> names = Lists.newArrayList(); for (Column column : columns) { if (column.isPartition()) { names.add(column.name()); } } return identitySpec(schema, names); } private static PartitionSpec identitySpec(Schema schema, String... partitionNames) { return identitySpec(schema, Lists.newArrayList(partitionNames)); } private static PartitionSpec identitySpec(Schema schema, List<String> partitionNames) { if (partitionNames == null || partitionNames.isEmpty()) { return null; } PartitionSpec.Builder builder = PartitionSpec.builderFor(schema); for (String partitionName : partitionNames) { builder.identity(partitionName); } return builder.build(); } }
2,232
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/SparkExpressions.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Binder; import com.netflix.iceberg.expressions.BoundReference; import com.netflix.iceberg.expressions.Expression.Operation; import com.netflix.iceberg.expressions.ExpressionVisitors; import com.netflix.iceberg.types.Types.TimestampType; import com.netflix.iceberg.util.Pair; import org.apache.spark.sql.Column; import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute; import org.apache.spark.sql.catalyst.expressions.And; import org.apache.spark.sql.catalyst.expressions.And$; import org.apache.spark.sql.catalyst.expressions.Attribute; import org.apache.spark.sql.catalyst.expressions.AttributeReference; import org.apache.spark.sql.catalyst.expressions.BinaryExpression; import org.apache.spark.sql.catalyst.expressions.Cast; import org.apache.spark.sql.catalyst.expressions.EqualNullSafe; import org.apache.spark.sql.catalyst.expressions.EqualTo; import org.apache.spark.sql.catalyst.expressions.Expression; import org.apache.spark.sql.catalyst.expressions.GreaterThan; import org.apache.spark.sql.catalyst.expressions.GreaterThanOrEqual; import org.apache.spark.sql.catalyst.expressions.In; import org.apache.spark.sql.catalyst.expressions.InSet; import org.apache.spark.sql.catalyst.expressions.IsNotNull; import org.apache.spark.sql.catalyst.expressions.IsNull; import org.apache.spark.sql.catalyst.expressions.LessThan; import org.apache.spark.sql.catalyst.expressions.LessThanOrEqual; import org.apache.spark.sql.catalyst.expressions.Literal; import org.apache.spark.sql.catalyst.expressions.Not; import org.apache.spark.sql.catalyst.expressions.Not$; import org.apache.spark.sql.catalyst.expressions.Or; import org.apache.spark.sql.catalyst.expressions.Or$; import org.apache.spark.sql.catalyst.expressions.ParseToDate; import org.apache.spark.sql.catalyst.expressions.UnaryExpression; import org.apache.spark.sql.catalyst.expressions.Year; import org.apache.spark.sql.functions$; import org.apache.spark.sql.types.DateType$; import org.apache.spark.sql.types.Decimal; import org.apache.spark.unsafe.types.UTF8String; import java.time.Instant; import java.time.LocalDate; import java.time.LocalDateTime; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Function; import static com.netflix.iceberg.expressions.ExpressionVisitors.visit; import static com.netflix.iceberg.expressions.Expressions.alwaysFalse; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.or; import static com.netflix.iceberg.expressions.Expressions.predicate; import static scala.collection.JavaConverters.seqAsJavaListConverter; import static scala.collection.JavaConverters.setAsJavaSetConverter; public class SparkExpressions { private SparkExpressions() { } private static final Map<Class<? extends Expression>, Operation> FILTERS = ImmutableMap .<Class<? extends Expression>, Operation>builder() .put(EqualTo.class, Operation.EQ) .put(EqualNullSafe.class, Operation.EQ) .put(GreaterThan.class, Operation.GT) .put(GreaterThanOrEqual.class, Operation.GT_EQ) .put(LessThan.class, Operation.LT) .put(LessThanOrEqual.class, Operation.LT_EQ) .put(In.class, Operation.IN) .put(InSet.class, Operation.IN) .put(IsNull.class, Operation.IS_NULL) .put(IsNotNull.class, Operation.NOT_NULL) .put(And.class, Operation.AND) .put(Or.class, Operation.OR) .put(Not.class, Operation.NOT) .build(); public static com.netflix.iceberg.expressions.Expression convert(Expression expr) { Class<? extends Expression> exprClass = expr.getClass(); Operation op = FILTERS.get(exprClass); if (op != null) { switch (op) { case IS_NULL: case NOT_NULL: UnaryExpression unary = (UnaryExpression) expr; if (unary.child() instanceof Attribute) { Attribute attr = (Attribute) unary.child(); return predicate(op, attr.name()); } return null; case LT: case LT_EQ: case GT: case GT_EQ: case EQ: case NOT_EQ: BinaryExpression binary = (BinaryExpression) expr; return convert(op, binary.left(), binary.right()); case NOT: com.netflix.iceberg.expressions.Expression child = convert(((Not) expr).child()); if (child != null) { return not(child); } return null; case AND: And andExpr = (And) expr; com.netflix.iceberg.expressions.Expression andLeft = convert(andExpr.left()); com.netflix.iceberg.expressions.Expression andRight = convert(andExpr.right()); if (andLeft != null && andRight != null) { return and(convert(andExpr.left()), convert(andExpr.right())); } return null; case OR: Or orExpr = (Or) expr; com.netflix.iceberg.expressions.Expression orLeft = convert(orExpr.left()); com.netflix.iceberg.expressions.Expression orRight = convert(orExpr.right()); if (orLeft != null && orRight != null) { return or(orLeft, orRight); } return null; case IN: if (expr instanceof In) { In inExpr = (In) expr; List<Object> literals = convertLiterals(seqAsJavaListConverter(inExpr.list()).asJava()); if (literals != null) { return convertIn(inExpr.value(), literals); } else { // if the list contained a non-literal, it can't be converted return null; } } else if (expr instanceof InSet) { InSet inExpr = (InSet) expr; // expressions are already converted to Java objects Set<Object> literals = setAsJavaSetConverter(inExpr.hset()).asJava(); return convertIn(inExpr.child(), literals); } default: } } return null; // can't convert } private enum Transform { IDENTITY, YEAR, // literal is an integer year, like 2018 DAY, // literal is an integer date } private static final Map<Class<? extends Expression>, Transform> TRANSFORMS = ImmutableMap .<Class<? extends Expression>, Transform>builder() .put(UnresolvedAttribute.class, Transform.IDENTITY) .put(AttributeReference.class, Transform.IDENTITY) .put(Year.class, Transform.YEAR) .put(ParseToDate.class, Transform.DAY) .put(Cast.class, Transform.DAY) .build(); private static com.netflix.iceberg.expressions.Expression convertIn(Expression expr, Collection<Object> values) { if (expr instanceof Attribute) { Attribute attr = (Attribute) expr; com.netflix.iceberg.expressions.Expression converted = alwaysFalse(); for (Object item : values) { converted = or(converted, equal(attr.name(), item)); } return converted; } return null; } private static List<Object> convertLiterals(List<Expression> values) { List<Object> converted = Lists.newArrayListWithExpectedSize(values.size()); for (Expression value : values) { if (value instanceof Literal) { Literal lit = (Literal) value; converted.add(valueFromSpark(lit)); } else { return null; } } return converted; } private static com.netflix.iceberg.expressions.Expression convert(Operation op, Expression left, Expression right) { Pair<Transform, String> attrPair = null; Operation leftOperation = null; Literal lit = null; if (right instanceof Literal) { lit = (Literal) right; attrPair = convertAttr(left); leftOperation = op; } else if (left instanceof Literal) { lit = (Literal) left; attrPair = convertAttr(right); leftOperation = op.flipLR(); } if (attrPair != null) { switch (attrPair.first()) { case IDENTITY: return predicate(leftOperation, attrPair.second(), valueFromSpark(lit)); case YEAR: return filter(leftOperation, attrPair.second(), (int) lit.value(), SparkExpressions::yearToTimestampMicros); case DAY: return filter(leftOperation, attrPair.second(), (int) lit.value(), SparkExpressions::dayToTimestampMicros); default: } } return null; } private static Object valueFromSpark(Literal lit) { if (lit.value() instanceof UTF8String) { return lit.value().toString(); } else if (lit.value() instanceof Decimal) { return ((Decimal) lit.value()).toJavaBigDecimal(); } return lit.value(); } private static Pair<Transform, String> convertAttr(Expression expr) { Transform type = TRANSFORMS.get(expr.getClass()); if (type == Transform.IDENTITY) { Attribute attr = (Attribute) expr; return Pair.of(type, attr.name()); } else if (expr instanceof Cast) { Cast cast = (Cast) expr; if (DateType$.MODULE$.sameType(cast.dataType()) && cast.child() instanceof Attribute) { Attribute attr = (Attribute) cast.child(); return Pair.of(Transform.DAY, attr.name()); } } else if (expr instanceof ParseToDate) { ParseToDate toDate = (ParseToDate) expr; if (toDate.left() instanceof Attribute) { Attribute attr = (Attribute) toDate.left(); return Pair.of(Transform.DAY, attr.name()); } } else if (expr instanceof UnaryExpression) { UnaryExpression func = (UnaryExpression) expr; if (func.child() instanceof Attribute) { Attribute attr = (Attribute) func.child(); return Pair.of(type, attr.name()); } } return null; } private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC); private static final LocalDate EPOCH_DAY = EPOCH.toLocalDate(); private static long yearToTimestampMicros(int year) { return ChronoUnit.MICROS.between(EPOCH, LocalDateTime.of(year, 1, 1, 0, 0).atOffset(ZoneOffset.UTC)); } private static long dayToTimestampMicros(int daysFromEpoch) { return ChronoUnit.MICROS.between(EPOCH, EPOCH_DAY.plusDays(daysFromEpoch).atStartOfDay().atOffset(ZoneOffset.UTC)); } private static com.netflix.iceberg.expressions.Literal<Long> tsLiteral(long timestampMicros) { return com.netflix.iceberg.expressions.Literal .of(timestampMicros) .to(TimestampType.withoutZone()); } private static com.netflix.iceberg.expressions.Expression filter( Operation op, String name, int value, Function<Integer, Long> startTsMicros) { switch (op) { case LT: return predicate(Operation.LT, name, tsLiteral(startTsMicros.apply(value))); case LT_EQ: return predicate(Operation.LT, name, tsLiteral(startTsMicros.apply(value + 1))); case GT: return predicate(Operation.GT_EQ, name, tsLiteral(startTsMicros.apply(value + 1))); case GT_EQ: return predicate(Operation.GT_EQ, name, tsLiteral(startTsMicros.apply(value))); case EQ: return and( predicate(Operation.GT_EQ, name, tsLiteral(startTsMicros.apply(value))), predicate(Operation.LT, name, tsLiteral(startTsMicros.apply(value + 1))) ); case NOT_EQ: return or( predicate(Operation.GT_EQ, name, tsLiteral(startTsMicros.apply(value + 1))), predicate(Operation.LT, name, tsLiteral(startTsMicros.apply(value))) ); case IN: case NOT_IN: default: throw new IllegalArgumentException("Cannot convert operation to year filter: " + op); } } public static Expression convert(com.netflix.iceberg.expressions.Expression filter, Schema schema) { return visit(Binder.bind(schema.asStruct(), filter), new ExpressionToSpark(schema)); } private static class ExpressionToSpark extends ExpressionVisitors. BoundExpressionVisitor<Expression> { private final Schema schema; public ExpressionToSpark(Schema schema) { this.schema = schema; } @Override public Expression alwaysTrue() { return functions$.MODULE$.lit(true).expr(); } @Override public Expression alwaysFalse() { return functions$.MODULE$.lit(false).expr(); } @Override public Expression not(Expression child) { return Not$.MODULE$.apply(child); } @Override public Expression and(Expression left, Expression right) { return And$.MODULE$.apply(left, right); } @Override public Expression or(Expression left, Expression right) { return Or$.MODULE$.apply(left, right); } @Override public <T> Expression isNull(BoundReference<T> ref) { return column(ref).isNull().expr(); } @Override public <T> Expression notNull(BoundReference<T> ref) { return column(ref).isNotNull().expr(); } @Override public <T> Expression lt(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { return column(ref).lt(lit.value()).expr(); } @Override public <T> Expression ltEq(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { return column(ref).leq(lit.value()).expr(); } @Override public <T> Expression gt(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { return column(ref).gt(lit.value()).expr(); } @Override public <T> Expression gtEq(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { return column(ref).geq(lit.value()).expr(); } @Override public <T> Expression eq(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { return column(ref).equalTo(lit.value()).expr(); } @Override public <T> Expression notEq(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { return column(ref).notEqual(lit.value()).expr(); } @Override public <T> Expression in(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { throw new UnsupportedOperationException("Not implemented: in"); } @Override public <T> Expression notIn(BoundReference<T> ref, com.netflix.iceberg.expressions.Literal<T> lit) { throw new UnsupportedOperationException("Not implemented: notIn"); } private Column column(BoundReference ref) { return functions$.MODULE$.column(schema.findColumnName(ref.fieldId())); } } }
2,233
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/SparkFilters.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.netflix.iceberg.Schema; import com.netflix.iceberg.expressions.Binder; import com.netflix.iceberg.expressions.BoundReference; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.expressions.Expression.Operation; import com.netflix.iceberg.expressions.ExpressionVisitors; import com.netflix.iceberg.expressions.Literal; import org.apache.spark.sql.Column; import org.apache.spark.sql.catalyst.expressions.And$; import org.apache.spark.sql.catalyst.expressions.Not$; import org.apache.spark.sql.catalyst.expressions.Or$; import org.apache.spark.sql.catalyst.util.DateTimeUtils; import org.apache.spark.sql.functions$; import org.apache.spark.sql.sources.And; import org.apache.spark.sql.sources.EqualNullSafe; import org.apache.spark.sql.sources.EqualTo; import org.apache.spark.sql.sources.Filter; import org.apache.spark.sql.sources.GreaterThan; import org.apache.spark.sql.sources.GreaterThanOrEqual; import org.apache.spark.sql.sources.In; import org.apache.spark.sql.sources.IsNotNull; import org.apache.spark.sql.sources.IsNull; import org.apache.spark.sql.sources.LessThan; import org.apache.spark.sql.sources.LessThanOrEqual; import org.apache.spark.sql.sources.Not; import org.apache.spark.sql.sources.Or; import java.sql.Date; import java.sql.Timestamp; import java.util.Map; import static com.netflix.iceberg.expressions.ExpressionVisitors.visit; import static com.netflix.iceberg.expressions.Expressions.alwaysFalse; import static com.netflix.iceberg.expressions.Expressions.and; import static com.netflix.iceberg.expressions.Expressions.equal; import static com.netflix.iceberg.expressions.Expressions.greaterThan; import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.isNull; import static com.netflix.iceberg.expressions.Expressions.lessThan; import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual; import static com.netflix.iceberg.expressions.Expressions.not; import static com.netflix.iceberg.expressions.Expressions.notNull; import static com.netflix.iceberg.expressions.Expressions.or; public class SparkFilters { private SparkFilters() { } private static final Map<Class<? extends Filter>, Operation> FILTERS = ImmutableMap .<Class<? extends Filter>, Operation>builder() .put(EqualTo.class, Operation.EQ) .put(EqualNullSafe.class, Operation.EQ) .put(GreaterThan.class, Operation.GT) .put(GreaterThanOrEqual.class, Operation.GT_EQ) .put(LessThan.class, Operation.LT) .put(LessThanOrEqual.class, Operation.LT_EQ) .put(In.class, Operation.IN) .put(IsNull.class, Operation.IS_NULL) .put(IsNotNull.class, Operation.NOT_NULL) .put(And.class, Operation.AND) .put(Or.class, Operation.OR) .put(Not.class, Operation.NOT) .build(); public static Expression convert(Filter filter) { // avoid using a chain of if instanceof statements by mapping to the expression enum. Operation op = FILTERS.get(filter.getClass()); if (op != null) { switch (op) { case IS_NULL: IsNull isNullFilter = (IsNull) filter; return isNull(isNullFilter.attribute()); case NOT_NULL: IsNotNull notNullFilter = (IsNotNull) filter; return notNull(notNullFilter.attribute()); case LT: LessThan lt = (LessThan) filter; return lessThan(lt.attribute(), convertLiteral(lt.value())); case LT_EQ: LessThanOrEqual ltEq = (LessThanOrEqual) filter; return lessThanOrEqual(ltEq.attribute(), convertLiteral(ltEq.value())); case GT: GreaterThan gt = (GreaterThan) filter; return greaterThan(gt.attribute(), convertLiteral(gt.value())); case GT_EQ: GreaterThanOrEqual gtEq = (GreaterThanOrEqual) filter; return greaterThanOrEqual(gtEq.attribute(), convertLiteral(gtEq.value())); case EQ: // used for both eq and null-safe-eq if (filter instanceof EqualTo) { EqualTo eq = (EqualTo) filter; // comparison with null in normal equality is always null. this is probably a mistake. Preconditions.checkNotNull(eq.value(), "Expression is always false (eq is not null-safe): " + filter); return equal(eq.attribute(), convertLiteral(eq.value())); } else { EqualNullSafe eq = (EqualNullSafe) filter; if (eq.value() == null) { return isNull(eq.attribute()); } else { return equal(eq.attribute(), convertLiteral(eq.value())); } } case IN: In inFilter = (In) filter; Expression in = alwaysFalse(); for (Object value : inFilter.values()) { in = or(in, equal(inFilter.attribute(), convertLiteral(value))); } return in; case NOT: Not notFilter = (Not) filter; Expression child = convert(notFilter.child()); if (child != null) { return not(child); } return null; case AND: { And andFilter = (And) filter; Expression left = convert(andFilter.left()); Expression right = convert(andFilter.right()); if (left != null && right != null) { return and(left, right); } return null; } case OR: { Or orFilter = (Or) filter; Expression left = convert(orFilter.left()); Expression right = convert(orFilter.right()); if (left != null && right != null) { return or(left, right); } return null; } } } return null; } private static Object convertLiteral(Object value) { if (value instanceof Timestamp) { return DateTimeUtils.fromJavaTimestamp((Timestamp) value); } else if (value instanceof Date) { return DateTimeUtils.fromJavaDate((Date) value); } return value; } }
2,234
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/PruneColumnsWithReordering.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Type.TypeID; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.spark.sql.types.ArrayType; import org.apache.spark.sql.types.BinaryType; import org.apache.spark.sql.types.BooleanType; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.DateType; import org.apache.spark.sql.types.DecimalType; import org.apache.spark.sql.types.DoubleType; import org.apache.spark.sql.types.FloatType; import org.apache.spark.sql.types.IntegerType; import org.apache.spark.sql.types.LongType; import org.apache.spark.sql.types.MapType; import org.apache.spark.sql.types.StringType; import org.apache.spark.sql.types.StructField; import org.apache.spark.sql.types.StructType; import org.apache.spark.sql.types.TimestampType; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Supplier; public class PruneColumnsWithReordering extends TypeUtil.CustomOrderSchemaVisitor<Type> { private final StructType requestedType; private final Set<Integer> filterRefs; private DataType current = null; PruneColumnsWithReordering(StructType requestedType, Set<Integer> filterRefs) { this.requestedType = requestedType; this.filterRefs = filterRefs; } @Override public Type schema(Schema schema, Supplier<Type> structResult) { this.current = requestedType; try { return structResult.get(); } finally { this.current = null; } } @Override public Type struct(Types.StructType struct, Iterable<Type> fieldResults) { Preconditions.checkNotNull(struct, "Cannot prune null struct. Pruning must start with a schema."); Preconditions.checkArgument(current instanceof StructType, "Not a struct: %s", current); StructType s = (StructType) current; List<Types.NestedField> fields = struct.fields(); List<Type> types = Lists.newArrayList(fieldResults); boolean changed = false; // use a LinkedHashMap to preserve the original order of filter fields that are not projected Map<String, Types.NestedField> projectedFields = Maps.newLinkedHashMap(); for (int i = 0; i < fields.size(); i += 1) { Types.NestedField field = fields.get(i); Type type = types.get(i); if (type == null) { changed = true; } else if (field.type() == type) { projectedFields.put(field.name(), field); } else if (field.isOptional()) { changed = true; projectedFields.put(field.name(), Types.NestedField.optional(field.fieldId(), field.name(), type)); } else { changed = true; projectedFields.put(field.name(), Types.NestedField.required(field.fieldId(), field.name(), type)); } } // Construct a new struct with the projected struct's order boolean reordered = false; StructField[] requestedFields = s.fields(); List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(requestedFields.length); for (int i = 0; i < requestedFields.length; i += 1) { // fields are resolved by name because Spark only sees the current table schema. String name = requestedFields[i].name(); if (!fields.get(i).name().equals(name)) { reordered = true; } newFields.add(projectedFields.remove(name)); } // Add remaining filter fields that were not explicitly projected if (!projectedFields.isEmpty()) { newFields.addAll(projectedFields.values()); changed = true; // order probably changed } if (reordered || changed) { return Types.StructType.of(newFields); } return struct; } @Override public Type field(Types.NestedField field, Supplier<Type> fieldResult) { Preconditions.checkArgument(current instanceof StructType, "Not a struct: %s", current); StructType struct = (StructType) current; // fields are resolved by name because Spark only sees the current table schema. if (struct.getFieldIndex(field.name()).isEmpty()) { // make sure that filter fields are projected even if they aren't in the requested schema. if (filterRefs.contains(field.fieldId())) { return field.type(); } return null; } int fieldIndex = struct.fieldIndex(field.name()); StructField f = struct.fields()[fieldIndex]; Preconditions.checkArgument(f.nullable() || field.isRequired(), "Cannot project an optional field as non-null: %s", field.name()); this.current = f.dataType(); try { return fieldResult.get(); } catch (IllegalArgumentException e) { throw new IllegalArgumentException( "Invalid projection for field " + field.name() + ": " + e.getMessage(), e); } finally { this.current = struct; } } @Override public Type list(Types.ListType list, Supplier<Type> elementResult) { Preconditions.checkArgument(current instanceof ArrayType, "Not an array: %s", current); ArrayType array = (ArrayType) current; Preconditions.checkArgument(array.containsNull() || !list.isElementOptional(), "Cannot project an array of optional elements as required elements: %s", array); this.current = array.elementType(); try { Type elementType = elementResult.get(); if (list.elementType() == elementType) { return list; } // must be a projected element type, create a new list if (list.isElementOptional()) { return Types.ListType.ofOptional(list.elementId(), elementType); } else { return Types.ListType.ofRequired(list.elementId(), elementType); } } finally { this.current = array; } } @Override public Type map(Types.MapType map, Supplier<Type> keyResult, Supplier<Type> valueResult) { Preconditions.checkArgument(current instanceof MapType, "Not a map: %s", current); MapType m = (MapType) current; Preconditions.checkArgument(m.valueContainsNull() || !map.isValueOptional(), "Cannot project a map of optional values as required values: %s", map); Preconditions.checkArgument(StringType.class.isInstance(m.keyType()), "Invalid map key type (not string): %s", m.keyType()); this.current = m.valueType(); try { Type valueType = valueResult.get(); if (map.valueType() == valueType) { return map; } if (map.isValueOptional()) { return Types.MapType.ofOptional(map.keyId(), map.valueId(), map.keyType(), valueType); } else { return Types.MapType.ofRequired(map.keyId(), map.valueId(), map.keyType(), valueType); } } finally { this.current = m; } } @Override public Type primitive(Type.PrimitiveType primitive) { Class<? extends DataType> expectedType = TYPES.get(primitive.typeId()); Preconditions.checkArgument(expectedType != null && expectedType.isInstance(current), "Cannot project %s to incompatible type: %s", primitive, current); // additional checks based on type switch (primitive.typeId()) { case DECIMAL: Types.DecimalType decimal = (Types.DecimalType) primitive; DecimalType d = (DecimalType) current; Preconditions.checkArgument(d.scale() == decimal.scale(), "Cannot project decimal with incompatible scale: %s != %s", d.scale(), decimal.scale()); Preconditions.checkArgument(d.precision() >= decimal.precision(), "Cannot project decimal with incompatible precision: %s < %s", d.precision(), decimal.precision()); break; case TIMESTAMP: Types.TimestampType timestamp = (Types.TimestampType) primitive; Preconditions.checkArgument(timestamp.shouldAdjustToUTC(), "Cannot project timestamp (without time zone) as timestamptz (with time zone)"); break; default: } return primitive; } private static final Map<TypeID, Class<? extends DataType>> TYPES = ImmutableMap .<TypeID, Class<? extends DataType>>builder() .put(TypeID.BOOLEAN, BooleanType.class) .put(TypeID.INTEGER, IntegerType.class) .put(TypeID.LONG, LongType.class) .put(TypeID.FLOAT, FloatType.class) .put(TypeID.DOUBLE, DoubleType.class) .put(TypeID.DATE, DateType.class) .put(TypeID.TIMESTAMP, TimestampType.class) .put(TypeID.DECIMAL, DecimalType.class) .put(TypeID.UUID, StringType.class) .put(TypeID.STRING, StringType.class) .put(TypeID.FIXED, BinaryType.class) .put(TypeID.BINARY, BinaryType.class) .build(); }
2,235
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/FixupTypes.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import java.util.List; import java.util.function.Supplier; /** * This is used to fix primitive types to match a table schema. Some types, like binary and fixed, * are converted to the same Spark type. Conversion back can produce only one, which may not be * correct. This uses a reference schema to override types that were lost in round-trip conversion. */ class FixupTypes extends TypeUtil.CustomOrderSchemaVisitor<Type> { private final Schema referenceSchema; private Type sourceType; static Schema fixup(Schema schema, Schema referenceSchema) { return new Schema(TypeUtil.visit(schema, new FixupTypes(referenceSchema)).asStructType().fields()); } private FixupTypes(Schema referenceSchema) { this.referenceSchema = referenceSchema; this.sourceType = referenceSchema.asStruct(); } @Override public Type schema(Schema schema, Supplier<Type> future) { this.sourceType = referenceSchema.asStruct(); return future.get(); } @Override public Type struct(Types.StructType struct, Iterable<Type> fieldTypes) { Preconditions.checkArgument(sourceType.isStructType(), "Not a struct: " + sourceType); List<Types.NestedField> fields = struct.fields(); int length = fields.size(); List<Type> types = Lists.newArrayList(fieldTypes); List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(length); boolean hasChange = false; for (int i = 0; i < length; i += 1) { Types.NestedField field = fields.get(i); Type resultType = types.get(i); if (field.type() == resultType) { newFields.add(field); } else if (field.isRequired()) { hasChange = true; newFields.add(Types.NestedField.required(field.fieldId(), field.name(), resultType)); } else { hasChange = true; newFields.add(Types.NestedField.optional(field.fieldId(), field.name(), resultType)); } } if (hasChange) { return Types.StructType.of(newFields); } return struct; } @Override public Type field(Types.NestedField field, Supplier<Type> future) { Preconditions.checkArgument(sourceType.isStructType(), "Not a struct: " + sourceType); Types.StructType sourceStruct = sourceType.asStructType(); this.sourceType = sourceStruct.field(field.fieldId()).type(); try { return future.get(); } finally { sourceType = sourceStruct; } } @Override public Type list(Types.ListType list, Supplier<Type> elementTypeFuture) { Preconditions.checkArgument(sourceType.isListType(), "Not a list: " + sourceType); Types.ListType sourceList = sourceType.asListType(); this.sourceType = sourceList.elementType(); try { Type elementType = elementTypeFuture.get(); if (list.elementType() == elementType) { return list; } if (list.isElementOptional()) { return Types.ListType.ofOptional(list.elementId(), elementType); } else { return Types.ListType.ofRequired(list.elementId(), elementType); } } finally { this.sourceType = sourceList; } } @Override public Type map(Types.MapType map, Supplier<Type> keyTypeFuture, Supplier<Type> valueTypeFuture) { Preconditions.checkArgument(sourceType.isMapType(), "Not a map: " + sourceType); Types.MapType sourceMap = sourceType.asMapType(); try { this.sourceType = sourceMap.keyType(); Type keyType = keyTypeFuture.get(); this.sourceType = sourceMap.valueType(); Type valueType = valueTypeFuture.get(); if (map.keyType() == keyType && map.valueType() == valueType) { return map; } if (map.isValueOptional()) { return Types.MapType.ofOptional(map.keyId(), map.valueId(), keyType, valueType); } else { return Types.MapType.ofRequired(map.keyId(), map.valueId(), keyType, valueType); } } finally { this.sourceType = sourceMap; } } @Override public Type primitive(Type.PrimitiveType primitive) { if (sourceType.equals(primitive)) { return primitive; // already correct } switch (primitive.typeId()) { case STRING: if (sourceType.typeId() == Type.TypeID.UUID) { return sourceType; } break; case BINARY: if (sourceType.typeId() == Type.TypeID.FIXED) { return sourceType; } break; default: } // nothing to fix up, let validation catch promotion errors return primitive; } }
2,236
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/PruneColumnsWithoutReordering.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Type.TypeID; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.spark.sql.types.ArrayType; import org.apache.spark.sql.types.BinaryType; import org.apache.spark.sql.types.BooleanType; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.DateType; import org.apache.spark.sql.types.DecimalType; import org.apache.spark.sql.types.DoubleType; import org.apache.spark.sql.types.FloatType; import org.apache.spark.sql.types.IntegerType; import org.apache.spark.sql.types.LongType; import org.apache.spark.sql.types.MapType; import org.apache.spark.sql.types.StringType; import org.apache.spark.sql.types.StructField; import org.apache.spark.sql.types.StructType; import org.apache.spark.sql.types.TimestampType; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Supplier; public class PruneColumnsWithoutReordering extends TypeUtil.CustomOrderSchemaVisitor<Type> { private final StructType requestedType; private final Set<Integer> filterRefs; private DataType current = null; PruneColumnsWithoutReordering(StructType requestedType, Set<Integer> filterRefs) { this.requestedType = requestedType; this.filterRefs = filterRefs; } @Override public Type schema(Schema schema, Supplier<Type> structResult) { this.current = requestedType; try { return structResult.get(); } finally { this.current = null; } } @Override public Type struct(Types.StructType struct, Iterable<Type> fieldResults) { Preconditions.checkNotNull(struct, "Cannot prune null struct. Pruning must start with a schema."); Preconditions.checkArgument(current instanceof StructType, "Not a struct: %s", current); List<Types.NestedField> fields = struct.fields(); List<Type> types = Lists.newArrayList(fieldResults); boolean changed = false; List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(types.size()); for (int i = 0; i < fields.size(); i += 1) { Types.NestedField field = fields.get(i); Type type = types.get(i); if (type == null) { changed = true; } else if (field.type() == type) { newFields.add(field); } else if (field.isOptional()) { changed = true; newFields.add(Types.NestedField.optional(field.fieldId(), field.name(), type)); } else { changed = true; newFields.add(Types.NestedField.required(field.fieldId(), field.name(), type)); } } if (changed) { return Types.StructType.of(newFields); } return struct; } @Override public Type field(Types.NestedField field, Supplier<Type> fieldResult) { Preconditions.checkArgument(current instanceof StructType, "Not a struct: %s", current); StructType struct = (StructType) current; // fields are resolved by name because Spark only sees the current table schema. if (struct.getFieldIndex(field.name()).isEmpty()) { // make sure that filter fields are projected even if they aren't in the requested schema. if (filterRefs.contains(field.fieldId())) { return field.type(); } return null; } int fieldIndex = struct.fieldIndex(field.name()); StructField f = struct.fields()[fieldIndex]; Preconditions.checkArgument(f.nullable() || field.isRequired(), "Cannot project an optional field as non-null: %s", field.name()); this.current = f.dataType(); try { return fieldResult.get(); } catch (IllegalArgumentException e) { throw new IllegalArgumentException( "Invalid projection for field " + field.name() + ": " + e.getMessage(), e); } finally { this.current = struct; } } @Override public Type list(Types.ListType list, Supplier<Type> elementResult) { Preconditions.checkArgument(current instanceof ArrayType, "Not an array: %s", current); ArrayType array = (ArrayType) current; Preconditions.checkArgument(array.containsNull() || !list.isElementOptional(), "Cannot project an array of optional elements as required elements: %s", array); this.current = array.elementType(); try { Type elementType = elementResult.get(); if (list.elementType() == elementType) { return list; } // must be a projected element type, create a new list if (list.isElementOptional()) { return Types.ListType.ofOptional(list.elementId(), elementType); } else { return Types.ListType.ofRequired(list.elementId(), elementType); } } finally { this.current = array; } } @Override public Type map(Types.MapType map, Supplier<Type> keyResult, Supplier<Type> valueResult) { Preconditions.checkArgument(current instanceof MapType, "Not a map: %s", current); MapType m = (MapType) current; Preconditions.checkArgument(m.valueContainsNull() || !map.isValueOptional(), "Cannot project a map of optional values as required values: %s", map); this.current = m.valueType(); try { Type valueType = valueResult.get(); if (map.valueType() == valueType) { return map; } if (map.isValueOptional()) { return Types.MapType.ofOptional(map.keyId(), map.valueId(), map.keyType(), valueType); } else { return Types.MapType.ofRequired(map.keyId(), map.valueId(), map.keyType(), valueType); } } finally { this.current = m; } } @Override public Type primitive(Type.PrimitiveType primitive) { Class<? extends DataType> expectedType = TYPES.get(primitive.typeId()); Preconditions.checkArgument(expectedType != null && expectedType.isInstance(current), "Cannot project %s to incompatible type: %s", primitive, current); // additional checks based on type switch (primitive.typeId()) { case DECIMAL: Types.DecimalType decimal = (Types.DecimalType) primitive; DecimalType d = (DecimalType) current; Preconditions.checkArgument(d.scale() == decimal.scale(), "Cannot project decimal with incompatible scale: %s != %s", d.scale(), decimal.scale()); Preconditions.checkArgument(d.precision() >= decimal.precision(), "Cannot project decimal with incompatible precision: %s < %s", d.precision(), decimal.precision()); break; case TIMESTAMP: Types.TimestampType timestamp = (Types.TimestampType) primitive; Preconditions.checkArgument(timestamp.shouldAdjustToUTC(), "Cannot project timestamp (without time zone) as timestamptz (with time zone)"); break; default: } return primitive; } private static final Map<TypeID, Class<? extends DataType>> TYPES = ImmutableMap .<TypeID, Class<? extends DataType>>builder() .put(TypeID.BOOLEAN, BooleanType.class) .put(TypeID.INTEGER, IntegerType.class) .put(TypeID.LONG, LongType.class) .put(TypeID.FLOAT, FloatType.class) .put(TypeID.DOUBLE, DoubleType.class) .put(TypeID.DATE, DateType.class) .put(TypeID.TIMESTAMP, TimestampType.class) .put(TypeID.DECIMAL, DecimalType.class) .put(TypeID.UUID, StringType.class) .put(TypeID.STRING, StringType.class) .put(TypeID.FIXED, BinaryType.class) .put(TypeID.BINARY, BinaryType.class) .build(); }
2,237
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/TypeToSparkType.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark; import com.google.common.collect.Lists; import com.netflix.iceberg.Schema; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.spark.sql.types.ArrayType$; import org.apache.spark.sql.types.BinaryType$; import org.apache.spark.sql.types.BooleanType$; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.DateType$; import org.apache.spark.sql.types.DecimalType$; import org.apache.spark.sql.types.DoubleType$; import org.apache.spark.sql.types.FloatType$; import org.apache.spark.sql.types.IntegerType$; import org.apache.spark.sql.types.LongType$; import org.apache.spark.sql.types.MapType$; import org.apache.spark.sql.types.Metadata; import org.apache.spark.sql.types.StringType$; import org.apache.spark.sql.types.StructField; import org.apache.spark.sql.types.StructType$; import org.apache.spark.sql.types.TimestampType$; import java.util.List; class TypeToSparkType extends TypeUtil.SchemaVisitor<DataType> { TypeToSparkType() { } @Override public DataType schema(Schema schema, DataType structType) { return structType; } @Override public DataType struct(Types.StructType struct, List<DataType> fieldResults) { List<Types.NestedField> fields = struct.fields(); List<StructField> sparkFields = Lists.newArrayListWithExpectedSize(fieldResults.size()); for (int i = 0; i < fields.size(); i += 1) { Types.NestedField field = fields.get(i); DataType type = fieldResults.get(i); sparkFields.add(StructField.apply(field.name(), type, field.isOptional(), Metadata.empty())); } return StructType$.MODULE$.apply(sparkFields); } @Override public DataType field(Types.NestedField field, DataType fieldResult) { return fieldResult; } @Override public DataType list(Types.ListType list, DataType elementResult) { return ArrayType$.MODULE$.apply(elementResult, list.isElementOptional()); } @Override public DataType map(Types.MapType map, DataType keyResult, DataType valueResult) { return MapType$.MODULE$.apply(keyResult, valueResult, map.isValueOptional()); } @Override public DataType primitive(Type.PrimitiveType primitive) { switch (primitive.typeId()) { case BOOLEAN: return BooleanType$.MODULE$; case INTEGER: return IntegerType$.MODULE$; case LONG: return LongType$.MODULE$; case FLOAT: return FloatType$.MODULE$; case DOUBLE: return DoubleType$.MODULE$; case DATE: return DateType$.MODULE$; case TIME: throw new UnsupportedOperationException( "Spark does not support time fields"); case TIMESTAMP: Types.TimestampType timestamp = (Types.TimestampType) primitive; if (timestamp.shouldAdjustToUTC()) { return TimestampType$.MODULE$; } throw new UnsupportedOperationException( "Spark does not support timestamp without time zone fields"); case STRING: return StringType$.MODULE$; case UUID: // use String return StringType$.MODULE$; case FIXED: return BinaryType$.MODULE$; case BINARY: return BinaryType$.MODULE$; case DECIMAL: Types.DecimalType decimal = (Types.DecimalType) primitive; return DecimalType$.MODULE$.apply(decimal.precision(), decimal.scale()); default: throw new UnsupportedOperationException( "Cannot convert unknown type to Spark: " + primitive); } } }
2,238
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/SparkTypeVisitor.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark; import com.google.common.collect.Lists; import org.apache.spark.sql.types.ArrayType; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.MapType; import org.apache.spark.sql.types.StructField; import org.apache.spark.sql.types.StructType; import org.apache.spark.sql.types.UserDefinedType; import java.util.List; class SparkTypeVisitor<T> { static <T> T visit(DataType type, SparkTypeVisitor<T> visitor) { if (type instanceof StructType) { StructField[] fields = ((StructType) type).fields(); List<T> fieldResults = Lists.newArrayListWithExpectedSize(fields.length); for (StructField field : fields) { fieldResults.add(visitor.field( field, visit(field.dataType(), visitor))); } return visitor.struct((StructType) type, fieldResults); } else if (type instanceof MapType) { return visitor.map((MapType) type, visit(((MapType) type).keyType(), visitor), visit(((MapType) type).valueType(), visitor)); } else if (type instanceof ArrayType) { return visitor.array( (ArrayType) type, visit(((ArrayType) type).elementType(), visitor)); } else if (type instanceof UserDefinedType){ throw new UnsupportedOperationException( "User-defined types are not supported"); } else { return visitor.atomic(type); } } public T struct(StructType struct, List<T> fieldResults) { return null; } public T field(StructField field, T typeResult) { return null; } public T array(ArrayType array, T elementResult) { return null; } public T map(MapType map, T keyResult, T valueResult) { return null; } public T atomic(DataType atomic) { return null; } }
2,239
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/SparkTypeToType.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark; import com.google.common.collect.Lists; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.Types; import org.apache.spark.sql.types.ArrayType; import org.apache.spark.sql.types.BinaryType; import org.apache.spark.sql.types.BooleanType; import org.apache.spark.sql.types.ByteType; import org.apache.spark.sql.types.CharType; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.DateType; import org.apache.spark.sql.types.DecimalType; import org.apache.spark.sql.types.DoubleType; import org.apache.spark.sql.types.FloatType; import org.apache.spark.sql.types.IntegerType; import org.apache.spark.sql.types.LongType; import org.apache.spark.sql.types.MapType; import org.apache.spark.sql.types.ShortType; import org.apache.spark.sql.types.StringType; import org.apache.spark.sql.types.StructField; import org.apache.spark.sql.types.StructType; import org.apache.spark.sql.types.TimestampType; import org.apache.spark.sql.types.VarcharType; import java.util.List; class SparkTypeToType extends SparkTypeVisitor<Type> { private final StructType root; private int nextId = 0; SparkTypeToType() { this.root = null; } SparkTypeToType(StructType root) { this.root = root; // the root struct's fields use the first ids this.nextId = root.fields().length; } private int getNextId() { int next = nextId; nextId += 1; return next; } @Override public Type struct(StructType struct, List<Type> types) { StructField[] fields = struct.fields(); List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(fields.length); boolean isRoot = root == struct; for (int i = 0; i < fields.length; i += 1) { StructField field = fields[i]; Type type = types.get(i); int id; if (isRoot) { // for new conversions, use ordinals for ids in the root struct id = i; } else { id = getNextId(); } if (field.nullable()) { newFields.add(Types.NestedField.optional(id, field.name(), type)); } else { newFields.add(Types.NestedField.required(id, field.name(), type)); } } return Types.StructType.of(newFields); } @Override public Type field(StructField field, Type typeResult) { return typeResult; } @Override public Type array(ArrayType array, Type elementType) { if (array.containsNull()) { return Types.ListType.ofOptional(getNextId(), elementType); } else { return Types.ListType.ofRequired(getNextId(), elementType); } } @Override public Type map(MapType map, Type keyType, Type valueType) { if (map.valueContainsNull()) { return Types.MapType.ofOptional(getNextId(), getNextId(), keyType, valueType); } else { return Types.MapType.ofRequired(getNextId(), getNextId(), keyType, valueType); } } @Override public Type atomic(DataType atomic) { if (atomic instanceof BooleanType) { return Types.BooleanType.get(); } else if ( atomic instanceof IntegerType || atomic instanceof ShortType || atomic instanceof ByteType) { return Types.IntegerType.get(); } else if (atomic instanceof LongType) { return Types.LongType.get(); } else if (atomic instanceof FloatType) { return Types.FloatType.get(); } else if (atomic instanceof DoubleType) { return Types.DoubleType.get(); } else if ( atomic instanceof StringType || atomic instanceof CharType || atomic instanceof VarcharType) { return Types.StringType.get(); } else if (atomic instanceof DateType) { return Types.DateType.get(); } else if (atomic instanceof TimestampType) { return Types.TimestampType.withZone(); } else if (atomic instanceof DecimalType) { return Types.DecimalType.of( ((DecimalType) atomic).precision(), ((DecimalType) atomic).scale()); } else if (atomic instanceof BinaryType) { return Types.BinaryType.get(); } throw new UnsupportedOperationException( "Not a supported type: " + atomic.catalogString()); } }
2,240
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/source/Stats.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import org.apache.spark.sql.sources.v2.reader.Statistics; import java.util.OptionalLong; class Stats implements Statistics { private final OptionalLong sizeInBytes; private final OptionalLong numRows; Stats(long sizeInBytes, long numRows) { this.sizeInBytes = OptionalLong.of(sizeInBytes); this.numRows = OptionalLong.of(numRows); } @Override public OptionalLong sizeInBytes() { return sizeInBytes; } @Override public OptionalLong numRows() { return numRows; } }
2,241
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/source/Writer.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.netflix.iceberg.AppendFiles; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.DataFiles; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.TableProperties; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.hadoop.HadoopInputFile; import com.netflix.iceberg.hadoop.HadoopOutputFile; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.io.OutputFile; import com.netflix.iceberg.orc.ORC; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.spark.data.SparkAvroWriter; import com.netflix.iceberg.spark.data.SparkOrcWriter; import com.netflix.iceberg.transforms.Transform; import com.netflix.iceberg.transforms.Transforms; import com.netflix.iceberg.types.Types.StringType; import com.netflix.iceberg.util.Tasks; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.execution.datasources.parquet.ParquetWriteSupport; import org.apache.spark.sql.sources.v2.writer.DataSourceWriter; import org.apache.spark.sql.sources.v2.writer.DataWriter; import org.apache.spark.sql.sources.v2.writer.DataWriterFactory; import org.apache.spark.sql.sources.v2.writer.SupportsWriteInternalRow; import org.apache.spark.sql.sources.v2.writer.WriterCommitMessage; import org.apache.spark.util.SerializableConfiguration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.Closeable; import java.io.IOException; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.function.Function; import static com.google.common.collect.Iterables.concat; import static com.google.common.collect.Iterables.transform; import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS; import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT; import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS; import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT; import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES; import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT; import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS; import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT; import static com.netflix.iceberg.TableProperties.OBJECT_STORE_ENABLED; import static com.netflix.iceberg.TableProperties.OBJECT_STORE_ENABLED_DEFAULT; import static com.netflix.iceberg.TableProperties.OBJECT_STORE_PATH; import static com.netflix.iceberg.spark.SparkSchemaUtil.convert; // TODO: parameterize DataSourceWriter with subclass of WriterCommitMessage class Writer implements DataSourceWriter, SupportsWriteInternalRow { private static final Transform<String, Integer> HASH_FUNC = Transforms .bucket(StringType.get(), Integer.MAX_VALUE); private static final Logger LOG = LoggerFactory.getLogger(Writer.class); private final Table table; private final Configuration conf; private final FileFormat format; Writer(Table table, Configuration conf, FileFormat format) { this.table = table; this.conf = conf; this.format = format; } @Override public DataWriterFactory<InternalRow> createInternalRowWriterFactory() { return new WriterFactory(table.spec(), format, dataLocation(), table.properties(), conf); } @Override public void commit(WriterCommitMessage[] messages) { AppendFiles append = table.newAppend(); int numFiles = 0; for (DataFile file : files(messages)) { numFiles += 1; append.appendFile(file); } LOG.info("Appending {} files to {}", numFiles, table); long start = System.currentTimeMillis(); append.commit(); // abort is automatically called if this fails long duration = System.currentTimeMillis() - start; LOG.info("Committed in {} ms", duration); } @Override public void abort(WriterCommitMessage[] messages) { FileSystem fs; try { fs = new Path(table.location()).getFileSystem(conf); } catch (IOException e) { throw new RuntimeIOException(e); } Tasks.foreach(files(messages)) .retry(propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT)) .exponentialBackoff( propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT), propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT), propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT), 2.0 /* exponential */ ) .throwFailureWhenFinished() .run(file -> { try { fs.delete(new Path(file.path().toString()), false /* not recursive */ ); } catch (IOException e) { throw new RuntimeIOException(e); } }); } private Iterable<DataFile> files(WriterCommitMessage[] messages) { if (messages.length > 0) { return concat(transform(Arrays.asList(messages), message -> message != null ? ImmutableList.copyOf(((TaskCommit) message).files()) : ImmutableList.of())); } return ImmutableList.of(); } private int propertyAsInt(String property, int defaultValue) { Map<String, String> properties = table.properties(); String value = properties.get(property); if (value != null) { return Integer.parseInt(properties.get(property)); } return defaultValue; } private String dataLocation() { return table.properties().getOrDefault( TableProperties.WRITE_NEW_DATA_LOCATION, new Path(new Path(table.location()), "data").toString()); } @Override public String toString() { return String.format("IcebergWrite(table=%s, type=%s, format=%s)", table, table.schema().asStruct(), format); } private static class TaskCommit implements WriterCommitMessage { private final DataFile[] files; TaskCommit() { this.files = new DataFile[0]; } TaskCommit(DataFile file) { this.files = new DataFile[] { file }; } TaskCommit(List<DataFile> files) { this.files = files.toArray(new DataFile[files.size()]); } DataFile[] files() { return files; } } private static class WriterFactory implements DataWriterFactory<InternalRow> { private final PartitionSpec spec; private final FileFormat format; private final String dataLocation; private final Map<String, String> properties; private final SerializableConfiguration conf; private final String uuid = UUID.randomUUID().toString(); private transient Path dataPath = null; WriterFactory(PartitionSpec spec, FileFormat format, String dataLocation, Map<String, String> properties, Configuration conf) { this.spec = spec; this.format = format; this.dataLocation = dataLocation; this.properties = properties; this.conf = new SerializableConfiguration(conf); } @Override public DataWriter<InternalRow> createDataWriter(int partitionId, int attemptNumber) { String filename = format.addExtension(String.format("%05d-%d-%s", partitionId, attemptNumber, uuid)); AppenderFactory<InternalRow> factory = new SparkAppenderFactory(); if (spec.fields().isEmpty()) { return new UnpartitionedWriter(lazyDataPath(), filename, format, conf.value(), factory); } else { Path baseDataPath = lazyDataPath(); // avoid calling this in the output path function Function<PartitionKey, Path> outputPathFunc = key -> new Path(new Path(baseDataPath, key.toPath()), filename); boolean useObjectStorage = ( Boolean.parseBoolean(properties.get(OBJECT_STORE_ENABLED)) || OBJECT_STORE_ENABLED_DEFAULT ); if (useObjectStorage) { // try to get db and table portions of the path for context in the object store String context = pathContext(baseDataPath); String objectStore = properties.get(OBJECT_STORE_PATH); Preconditions.checkNotNull(objectStore, "Cannot use object storage, missing location: " + OBJECT_STORE_PATH); Path objectStorePath = new Path(objectStore); outputPathFunc = key -> { String partitionAndFilename = key.toPath() + "/" + filename; int hash = HASH_FUNC.apply(partitionAndFilename); return new Path(objectStorePath, String.format("%08x/%s/%s", hash, context, partitionAndFilename)); }; } return new PartitionedWriter(spec, format, conf.value(), factory, outputPathFunc); } } private static String pathContext(Path dataPath) { Path parent = dataPath.getParent(); if (parent != null) { // remove the data folder if (dataPath.getName().equals("data")) { return pathContext(parent); } return parent.getName() + "/" + dataPath.getName(); } return dataPath.getName(); } private Path lazyDataPath() { if (dataPath == null) { this.dataPath = new Path(dataLocation); } return dataPath; } private class SparkAppenderFactory implements AppenderFactory<InternalRow> { public FileAppender<InternalRow> newAppender(OutputFile file, FileFormat format) { Schema schema = spec.schema(); try { switch (format) { case PARQUET: String jsonSchema = convert(schema).json(); return Parquet.write(file) .writeSupport(new ParquetWriteSupport()) .set("org.apache.spark.sql.parquet.row.attributes", jsonSchema) .set("spark.sql.parquet.writeLegacyFormat", "false") .set("spark.sql.parquet.binaryAsString", "false") .set("spark.sql.parquet.int96AsTimestamp", "false") .set("spark.sql.parquet.outputTimestampType", "TIMESTAMP_MICROS") .setAll(properties) .schema(schema) .build(); case AVRO: return Avro.write(file) .createWriterFunc(ignored -> new SparkAvroWriter(schema)) .setAll(properties) .schema(schema) .build(); case ORC: { @SuppressWarnings("unchecked") SparkOrcWriter writer = new SparkOrcWriter(ORC.write(file) .schema(schema) .build()); return writer; } default: throw new UnsupportedOperationException("Cannot write unknown format: " + format); } } catch (IOException e) { throw new RuntimeIOException(e); } } } } private interface AppenderFactory<T> { FileAppender<T> newAppender(OutputFile file, FileFormat format); } private static class UnpartitionedWriter implements DataWriter<InternalRow>, Closeable { private final Path file; private final Configuration conf; private FileAppender<InternalRow> appender = null; private Metrics metrics = null; UnpartitionedWriter(Path dataPath, String filename, FileFormat format, Configuration conf, AppenderFactory<InternalRow> factory) { this.file = new Path(dataPath, filename); this.appender = factory.newAppender(HadoopOutputFile.fromPath(file, conf), format); this.conf = conf; } @Override public void write(InternalRow record) { appender.add(record); } @Override public WriterCommitMessage commit() throws IOException { Preconditions.checkArgument(appender != null, "Commit called on a closed writer: %s", this); close(); if (metrics.recordCount() == 0L) { FileSystem fs = file.getFileSystem(conf); fs.delete(file, false); return new TaskCommit(); } InputFile inFile = HadoopInputFile.fromPath(file, conf); DataFile dataFile = DataFiles.fromInputFile(inFile, null, metrics); return new TaskCommit(dataFile); } @Override public void abort() throws IOException { Preconditions.checkArgument(appender != null, "Abort called on a closed writer: %s", this); close(); FileSystem fs = file.getFileSystem(conf); fs.delete(file, false); } @Override public void close() throws IOException { if (this.appender != null) { this.appender.close(); this.metrics = appender.metrics(); this.appender = null; } } } private static class PartitionedWriter implements DataWriter<InternalRow> { private final Set<PartitionKey> completedPartitions = Sets.newHashSet(); private final List<DataFile> completedFiles = Lists.newArrayList(); private final PartitionSpec spec; private final FileFormat format; private final Configuration conf; private final AppenderFactory<InternalRow> factory; private final Function<PartitionKey, Path> outputPathFunc; private final PartitionKey key; private PartitionKey currentKey = null; private FileAppender<InternalRow> currentAppender = null; private Path currentPath = null; PartitionedWriter(PartitionSpec spec, FileFormat format, Configuration conf, AppenderFactory<InternalRow> factory, Function<PartitionKey, Path> outputPathFunc) { this.spec = spec; this.format = format; this.conf = conf; this.factory = factory; this.outputPathFunc = outputPathFunc; this.key = new PartitionKey(spec); } @Override public void write(InternalRow row) throws IOException { key.partition(row); if (!key.equals(currentKey)) { closeCurrent(); if (completedPartitions.contains(key)) { // if rows are not correctly grouped, detect and fail the write PartitionKey existingKey = Iterables.find(completedPartitions, key::equals, null); LOG.warn("Duplicate key: {} == {}", existingKey, key); throw new IllegalStateException("Already closed file for partition: " + key.toPath()); } this.currentKey = key.copy(); this.currentPath = outputPathFunc.apply(currentKey); OutputFile file = HadoopOutputFile.fromPath(currentPath, conf); this.currentAppender = factory.newAppender(file, format); } currentAppender.add(row); } @Override public WriterCommitMessage commit() throws IOException { closeCurrent(); return new TaskCommit(completedFiles); } @Override public void abort() throws IOException { FileSystem fs = currentPath.getFileSystem(conf); // clean up files created by this writer Tasks.foreach(completedFiles) .throwFailureWhenFinished() .noRetry() .run(file -> fs.delete(new Path(file.path().toString())), IOException.class); if (currentAppender != null) { currentAppender.close(); this.currentAppender = null; fs.delete(currentPath); } } private void closeCurrent() throws IOException { if (currentAppender != null) { currentAppender.close(); // metrics are only valid after the appender is closed Metrics metrics = currentAppender.metrics(); this.currentAppender = null; InputFile inFile = HadoopInputFile.fromPath(currentPath, conf); DataFile dataFile = DataFiles.builder(spec) .withInputFile(inFile) .withPartition(currentKey) .withMetrics(metrics) .build(); completedPartitions.add(currentKey); completedFiles.add(dataFile); } } } }
2,242
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/source/PartitionKey.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.Maps; import com.netflix.iceberg.PartitionField; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.StructLike; import com.netflix.iceberg.transforms.Transform; import com.netflix.iceberg.types.Type; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.Decimal; import org.apache.spark.unsafe.types.UTF8String; import java.lang.reflect.Array; import java.util.Arrays; import java.util.List; import java.util.Map; import static com.netflix.iceberg.spark.SparkSchemaUtil.convert; class PartitionKey implements StructLike { private final PartitionSpec spec; private final int size; private final Object[] partitionTuple; private final Transform[] transforms; private final Accessor<InternalRow>[] accessors; @SuppressWarnings("unchecked") PartitionKey(PartitionSpec spec) { this.spec = spec; List<PartitionField> fields = spec.fields(); this.size = fields.size(); this.partitionTuple = new Object[size]; this.transforms = new Transform[size]; this.accessors = (Accessor<InternalRow>[]) Array.newInstance(Accessor.class, size); Schema schema = spec.schema(); Map<Integer, Accessor<InternalRow>> accessors = buildAccessors(schema); for (int i = 0; i < size; i += 1) { PartitionField field = fields.get(i); Accessor<InternalRow> accessor = accessors.get(field.sourceId()); if (accessor == null) { throw new RuntimeException( "Cannot build accessor for field: " + schema.findField(field.sourceId())); } this.accessors[i] = accessor; this.transforms[i] = field.transform(); } } private PartitionKey(PartitionKey toCopy) { this.spec = toCopy.spec; this.size = toCopy.size; this.partitionTuple = new Object[toCopy.partitionTuple.length]; this.transforms = toCopy.transforms; this.accessors = toCopy.accessors; for (int i = 0; i < partitionTuple.length; i += 1) { this.partitionTuple[i] = defensiveCopyIfNeeded(toCopy.partitionTuple[i]); } } private Object defensiveCopyIfNeeded(Object obj) { if (obj instanceof UTF8String) { // bytes backing the UTF8 string might be reused byte[] bytes = ((UTF8String) obj).getBytes(); return UTF8String.fromBytes(Arrays.copyOf(bytes, bytes.length)); } return obj; } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("["); for (int i = 0; i < partitionTuple.length; i += 1) { if (i > 0) { sb.append(", "); } sb.append(partitionTuple[i]); } sb.append("]"); return sb.toString(); } PartitionKey copy() { return new PartitionKey(this); } String toPath() { return spec.partitionToPath(this); } @SuppressWarnings("unchecked") void partition(InternalRow row) { for (int i = 0; i < partitionTuple.length; i += 1) { Transform<Object, Object> transform = transforms[i]; partitionTuple[i] = transform.apply(accessors[i].get(row)); } } @Override public int size() { return size; } @Override @SuppressWarnings("unchecked") public <T> T get(int pos, Class<T> javaClass) { return javaClass.cast(partitionTuple[pos]); } @Override public <T> void set(int pos, T value) { partitionTuple[pos] = value; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } PartitionKey that = (PartitionKey) o; return Arrays.equals(partitionTuple, that.partitionTuple); } @Override public int hashCode() { return Arrays.hashCode(partitionTuple); } private interface Accessor<T> { Object get(T container); } private static Map<Integer, Accessor<InternalRow>> buildAccessors(Schema schema) { return TypeUtil.visit(schema, new BuildPositionAccessors()); } private static Accessor<InternalRow> newAccessor(int p, Type type) { switch (type.typeId()) { case STRING: return new StringAccessor(p, convert(type)); case DECIMAL: return new DecimalAccessor(p, convert(type)); default: return new PositionAccessor(p, convert(type)); } } private static Accessor<InternalRow> newAccessor(int p, boolean isOptional, Types.StructType type, Accessor<InternalRow> accessor) { int size = type.fields().size(); if (isOptional) { // the wrapped position handles null layers return new WrappedPositionAccessor(p, size, accessor); } else if (accessor instanceof PositionAccessor) { return new Position2Accessor(p, size, (PositionAccessor) accessor); } else if (accessor instanceof Position2Accessor) { return new Position3Accessor(p, size, (Position2Accessor) accessor); } else { return new WrappedPositionAccessor(p, size, accessor); } } private static class BuildPositionAccessors extends TypeUtil.SchemaVisitor<Map<Integer, Accessor<InternalRow>>> { @Override public Map<Integer, Accessor<InternalRow>> schema( Schema schema, Map<Integer, Accessor<InternalRow>> structResult) { return structResult; } @Override public Map<Integer, Accessor<InternalRow>> struct( Types.StructType struct, List<Map<Integer, Accessor<InternalRow>>> fieldResults) { Map<Integer, Accessor<InternalRow>> accessors = Maps.newHashMap(); List<Types.NestedField> fields = struct.fields(); for (int i = 0; i < fieldResults.size(); i += 1) { Types.NestedField field = fields.get(i); Map<Integer, Accessor<InternalRow>> result = fieldResults.get(i); if (result != null) { for (Map.Entry<Integer, Accessor<InternalRow>> entry : result.entrySet()) { accessors.put(entry.getKey(), newAccessor(i, field.isOptional(), field.type().asNestedType().asStructType(), entry.getValue())); } } else { accessors.put(field.fieldId(), newAccessor(i, field.type())); } } if (accessors.isEmpty()) { return null; } return accessors; } @Override public Map<Integer, Accessor<InternalRow>> field( Types.NestedField field, Map<Integer, Accessor<InternalRow>> fieldResult) { return fieldResult; } } private static class PositionAccessor implements Accessor<InternalRow> { protected final DataType type; protected int p; private PositionAccessor(int p, DataType type) { this.p = p; this.type = type; } @Override public Object get(InternalRow row) { if (row.isNullAt(p)) { return null; } return row.get(p, type); } } private static class StringAccessor extends PositionAccessor { private StringAccessor(int p, DataType type) { super(p, type); } @Override public Object get(InternalRow row) { if (row.isNullAt(p)) { return null; } return row.get(p, type).toString(); } } private static class DecimalAccessor extends PositionAccessor { private DecimalAccessor(int p, DataType type) { super(p, type); } @Override public Object get(InternalRow row) { if (row.isNullAt(p)) { return null; } return ((Decimal) row.get(p, type)).toJavaBigDecimal(); } } private static class Position2Accessor implements Accessor<InternalRow> { private final int p0; private final int size0; private final int p1; private final DataType type; private Position2Accessor(int p, int size, PositionAccessor wrapped) { this.p0 = p; this.size0 = size; this.p1 = wrapped.p; this.type = wrapped.type; } @Override public Object get(InternalRow row) { return row.getStruct(p0, size0).get(p1, type); } } private static class Position3Accessor implements Accessor<InternalRow> { private final int p0; private final int size0; private final int p1; private final int size1; private final int p2; private final DataType type; private Position3Accessor(int p, int size, Position2Accessor wrapped) { this.p0 = p; this.size0 = size; this.p1 = wrapped.p0; this.size1 = wrapped.size0; this.p2 = wrapped.p1; this.type = wrapped.type; } @Override public Object get(InternalRow row) { return row.getStruct(p0, size0).getStruct(p1, size1).get(p2, type); } } private static class WrappedPositionAccessor implements Accessor<InternalRow> { private final int p; private final int size; private final Accessor<InternalRow> accessor; private WrappedPositionAccessor(int p, int size, Accessor<InternalRow> accessor) { this.p = p; this.size = size; this.accessor = accessor; } @Override public Object get(InternalRow row) { InternalRow inner = row.getStruct(p, size); if (inner != null) { return accessor.get(inner); } return null; } } }
2,243
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/source/Reader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import com.netflix.iceberg.CombinedScanTask; import com.netflix.iceberg.DataFile; import com.netflix.iceberg.FileScanTask; import com.netflix.iceberg.PartitionField; import com.netflix.iceberg.PartitionSpec; import com.netflix.iceberg.Schema; import com.netflix.iceberg.SchemaParser; import com.netflix.iceberg.StructLike; import com.netflix.iceberg.Table; import com.netflix.iceberg.TableScan; import com.netflix.iceberg.avro.Avro; import com.netflix.iceberg.common.DynMethods; import com.netflix.iceberg.exceptions.RuntimeIOException; import com.netflix.iceberg.expressions.Expression; import com.netflix.iceberg.hadoop.HadoopInputFile; import com.netflix.iceberg.io.CloseableIterable; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.parquet.Parquet; import com.netflix.iceberg.spark.SparkExpressions; import com.netflix.iceberg.spark.SparkSchemaUtil; import com.netflix.iceberg.spark.data.SparkAvroReader; import com.netflix.iceberg.spark.data.SparkOrcReader; import com.netflix.iceberg.spark.data.SparkParquetReaders; import com.netflix.iceberg.types.TypeUtil; import com.netflix.iceberg.types.Types; import org.apache.hadoop.conf.Configuration; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.Attribute; import org.apache.spark.sql.catalyst.expressions.AttributeReference; import org.apache.spark.sql.catalyst.expressions.GenericInternalRow; import org.apache.spark.sql.catalyst.expressions.JoinedRow; import org.apache.spark.sql.catalyst.expressions.UnsafeProjection; import org.apache.spark.sql.catalyst.expressions.UnsafeRow; import org.apache.spark.sql.sources.v2.reader.DataReader; import org.apache.spark.sql.sources.v2.reader.DataSourceReader; import org.apache.spark.sql.sources.v2.reader.DataReaderFactory; import org.apache.spark.sql.sources.v2.reader.Statistics; import org.apache.spark.sql.sources.v2.reader.SupportsPushDownCatalystFilters; import org.apache.spark.sql.sources.v2.reader.SupportsPushDownRequiredColumns; import org.apache.spark.sql.sources.v2.reader.SupportsReportStatistics; import org.apache.spark.sql.sources.v2.reader.SupportsScanUnsafeRow; import org.apache.spark.sql.types.BinaryType; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.Decimal; import org.apache.spark.sql.types.DecimalType; import org.apache.spark.sql.types.StringType; import org.apache.spark.sql.types.StructField; import org.apache.spark.sql.types.StructType; import org.apache.spark.unsafe.types.UTF8String; import org.apache.spark.util.SerializableConfiguration; import java.io.Closeable; import java.io.IOException; import java.io.Serializable; import java.nio.ByteBuffer; import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.function.Function; import static com.google.common.collect.Iterators.transform; import static com.netflix.iceberg.spark.SparkSchemaUtil.convert; import static com.netflix.iceberg.spark.SparkSchemaUtil.prune; import static scala.collection.JavaConverters.asScalaBufferConverter; import static scala.collection.JavaConverters.seqAsJavaListConverter; class Reader implements DataSourceReader, SupportsScanUnsafeRow, SupportsPushDownCatalystFilters, SupportsPushDownRequiredColumns, SupportsReportStatistics { private static final org.apache.spark.sql.catalyst.expressions.Expression[] NO_EXPRS = new org.apache.spark.sql.catalyst.expressions.Expression[0]; private final Table table; private final SerializableConfiguration conf; private StructType requestedSchema = null; private List<Expression> filterExpressions = null; private org.apache.spark.sql.catalyst.expressions.Expression[] pushedExprs = NO_EXPRS; // lazy variables private Schema schema = null; private StructType type = null; // cached because Spark accesses it multiple times private List<CombinedScanTask> tasks = null; // lazy cache of tasks Reader(Table table, Configuration conf) { this.table = table; this.conf = new SerializableConfiguration(conf); this.schema = table.schema(); } private Schema lazySchema() { if (schema == null) { if (requestedSchema != null) { this.schema = prune(table.schema(), requestedSchema); } else { this.schema = table.schema(); } } return schema; } private StructType lazyType() { if (type == null) { this.type = convert(lazySchema()); } return type; } @Override public StructType readSchema() { return lazyType(); } @Override public List<DataReaderFactory<UnsafeRow>> createUnsafeRowReaderFactories() { String tableSchemaString = SchemaParser.toJson(table.schema()); String expectedSchemaString = SchemaParser.toJson(lazySchema()); List<DataReaderFactory<UnsafeRow>> readTasks = Lists.newArrayList(); for (CombinedScanTask task : tasks()) { readTasks.add(new ReadTask(task, tableSchemaString, expectedSchemaString, conf)); } return readTasks; } @Override public org.apache.spark.sql.catalyst.expressions.Expression[] pushCatalystFilters( org.apache.spark.sql.catalyst.expressions.Expression[] filters) { this.tasks = null; // invalidate cached tasks, if present List<Expression> expressions = Lists.newArrayListWithExpectedSize(filters.length); List<org.apache.spark.sql.catalyst.expressions.Expression> pushed = Lists.newArrayListWithExpectedSize(filters.length); for (org.apache.spark.sql.catalyst.expressions.Expression filter : filters) { Expression expr = SparkExpressions.convert(filter); if (expr != null) { expressions.add(expr); pushed.add(filter); } } this.filterExpressions = expressions; this.pushedExprs = pushed.toArray(new org.apache.spark.sql.catalyst.expressions.Expression[0]); // invalidate the schema that will be projected this.schema = null; this.type = null; // Spark doesn't support residuals per task, so return all filters // to get Spark to handle record-level filtering return filters; } @Override public org.apache.spark.sql.catalyst.expressions.Expression[] pushedCatalystFilters() { return pushedExprs; } @Override public void pruneColumns(StructType requestedSchema) { this.requestedSchema = requestedSchema; // invalidate the schema that will be projected this.schema = null; this.type = null; } @Override public Statistics getStatistics() { long sizeInBytes = 0L; long numRows = 0L; for (CombinedScanTask task : tasks()) { for (FileScanTask file : task.files()) { sizeInBytes += file.length(); numRows += file.file().recordCount(); } } return new Stats(sizeInBytes, numRows); } private List<CombinedScanTask> tasks() { if (tasks == null) { TableScan scan = table.newScan().project(lazySchema()); if (filterExpressions != null) { for (Expression filter : filterExpressions) { scan = scan.filter(filter); } } try (CloseableIterable<CombinedScanTask> tasksIterable = scan.planTasks()) { this.tasks = Lists.newArrayList(tasksIterable); } catch (IOException e) { throw new RuntimeIOException(e, "Failed to close table scan: %s", scan); } } return tasks; } @Override public String toString() { return String.format( "IcebergScan(table=%s, type=%s, filters=%s)", table, lazySchema().asStruct(), filterExpressions); } private static class ReadTask implements DataReaderFactory<UnsafeRow>, Serializable { private final CombinedScanTask task; private final String tableSchemaString; private final String expectedSchemaString; private final SerializableConfiguration conf; private transient Schema tableSchema = null; private transient Schema expectedSchema = null; private ReadTask(CombinedScanTask task, String tableSchemaString, String expectedSchemaString, SerializableConfiguration conf) { this.task = task; this.tableSchemaString = tableSchemaString; this.expectedSchemaString = expectedSchemaString; this.conf = conf; } @Override public DataReader<UnsafeRow> createDataReader() { return new TaskDataReader(task, lazyTableSchema(), lazyExpectedSchema(), conf.value()); } private Schema lazyTableSchema() { if (tableSchema == null) { this.tableSchema = SchemaParser.fromJson(tableSchemaString); } return tableSchema; } private Schema lazyExpectedSchema() { if (expectedSchema == null) { this.expectedSchema = SchemaParser.fromJson(expectedSchemaString); } return expectedSchema; } } private static class TaskDataReader implements DataReader<UnsafeRow> { // for some reason, the apply method can't be called from Java without reflection private static final DynMethods.UnboundMethod APPLY_PROJECTION = DynMethods.builder("apply") .impl(UnsafeProjection.class, InternalRow.class) .build(); private final Iterator<FileScanTask> tasks; private final Schema tableSchema; private final Schema expectedSchema; private final Configuration conf; private Iterator<UnsafeRow> currentIterator = null; private Closeable currentCloseable = null; private UnsafeRow current = null; public TaskDataReader(CombinedScanTask task, Schema tableSchema, Schema expectedSchema, Configuration conf) { this.tasks = task.files().iterator(); this.tableSchema = tableSchema; this.expectedSchema = expectedSchema; this.conf = conf; // open last because the schemas and conf must be set this.currentIterator = open(tasks.next()); } @Override public boolean next() throws IOException { while (true) { if (currentIterator.hasNext()) { this.current = currentIterator.next(); return true; } else if (tasks.hasNext()) { this.currentCloseable.close(); this.currentIterator = open(tasks.next()); } else { return false; } } } @Override public UnsafeRow get() { return current; } @Override public void close() throws IOException { // close the current iterator this.currentCloseable.close(); // exhaust the task iterator while (tasks.hasNext()) { tasks.next(); } } private Iterator<UnsafeRow> open(FileScanTask task) { DataFile file = task.file(); // schema or rows returned by readers Schema finalSchema = expectedSchema; PartitionSpec spec = task.spec(); Set<Integer> idColumns = identitySourceIds(spec); // schema needed for the projection and filtering Schema requiredSchema = prune(tableSchema, convert(finalSchema), task.residual()); boolean hasJoinedPartitionColumns = !idColumns.isEmpty(); boolean hasExtraFilterColumns = requiredSchema.columns().size() != finalSchema.columns().size(); Schema iterSchema; Iterator<InternalRow> iter; if (hasJoinedPartitionColumns) { // schema used to read data files Schema readSchema = TypeUtil.selectNot(requiredSchema, idColumns); Schema partitionSchema = TypeUtil.select(requiredSchema, idColumns); PartitionRowConverter convertToRow = new PartitionRowConverter(partitionSchema, spec); JoinedRow joined = new JoinedRow(); InternalRow partition = convertToRow.apply(file.partition()); joined.withRight(partition); // create joined rows and project from the joined schema to the final schema iterSchema = TypeUtil.join(readSchema, partitionSchema); iter = transform(open(task, readSchema, conf), joined::withLeft); } else if (hasExtraFilterColumns) { // add projection to the final schema iterSchema = requiredSchema; iter = open(task, requiredSchema, conf); } else { // return the base iterator iterSchema = finalSchema; iter = open(task, finalSchema, conf); } return transform(iter, APPLY_PROJECTION.bind(projection(finalSchema, iterSchema))::invoke); } private static UnsafeProjection projection(Schema finalSchema, Schema readSchema) { StructType struct = convert(readSchema); List<AttributeReference> refs = seqAsJavaListConverter(struct.toAttributes()).asJava(); List<Attribute> attrs = Lists.newArrayListWithExpectedSize(struct.fields().length); List<org.apache.spark.sql.catalyst.expressions.Expression> exprs = Lists.newArrayListWithExpectedSize(struct.fields().length); for (AttributeReference ref : refs) { attrs.add(ref.toAttribute()); } for (Types.NestedField field : finalSchema.columns()) { int indexInReadSchema = struct.fieldIndex(field.name()); exprs.add(refs.get(indexInReadSchema)); } return UnsafeProjection.create( asScalaBufferConverter(exprs).asScala().toSeq(), asScalaBufferConverter(attrs).asScala().toSeq()); } private static Set<Integer> identitySourceIds(PartitionSpec spec) { Set<Integer> sourceIds = Sets.newHashSet(); List<PartitionField> fields = spec.fields(); for (int i = 0; i < fields.size(); i += 1) { PartitionField field = fields.get(i); if ("identity".equals(field.transform().toString())) { sourceIds.add(field.sourceId()); } } return sourceIds; } private Iterator<InternalRow> open(FileScanTask task, Schema readSchema, Configuration conf) { InputFile location = HadoopInputFile.fromLocation(task.file().path(), conf); CloseableIterable<InternalRow> iter; switch (task.file().format()) { case ORC: SparkOrcReader reader = new SparkOrcReader(location, task, readSchema); this.currentCloseable = reader; return reader; case PARQUET: iter = newParquetIterable(location, task, readSchema); break; case AVRO: iter = newAvroIterable(location, task, readSchema); break; default: throw new UnsupportedOperationException( "Cannot read unknown format: " + task.file().format()); } this.currentCloseable = iter; return iter.iterator(); } private CloseableIterable<InternalRow> newAvroIterable(InputFile location, FileScanTask task, Schema readSchema) { return Avro.read(location) .reuseContainers() .project(readSchema) .split(task.start(), task.length()) .createReaderFunc(SparkAvroReader::new) .build(); } private CloseableIterable<InternalRow> newParquetIterable(InputFile location, FileScanTask task, Schema readSchema) { return Parquet.read(location) .project(readSchema) .split(task.start(), task.length()) .createReaderFunc(fileSchema -> SparkParquetReaders.buildReader(readSchema, fileSchema)) .filter(task.residual()) .build(); } } private static class PartitionRowConverter implements Function<StructLike, InternalRow> { private final DataType[] types; private final int[] positions; private final Class<?>[] javaTypes; private final GenericInternalRow reusedRow; PartitionRowConverter(Schema partitionSchema, PartitionSpec spec) { StructType partitionType = SparkSchemaUtil.convert(partitionSchema); StructField[] fields = partitionType.fields(); this.types = new DataType[fields.length]; this.positions = new int[types.length]; this.javaTypes = new Class<?>[types.length]; this.reusedRow = new GenericInternalRow(types.length); List<PartitionField> partitionFields = spec.fields(); for (int rowIndex = 0; rowIndex < fields.length; rowIndex += 1) { this.types[rowIndex] = fields[rowIndex].dataType(); int sourceId = partitionSchema.columns().get(rowIndex).fieldId(); for (int specIndex = 0; specIndex < partitionFields.size(); specIndex += 1) { PartitionField field = spec.fields().get(specIndex); if (field.sourceId() == sourceId && "identity".equals(field.transform().toString())) { positions[rowIndex] = specIndex; javaTypes[rowIndex] = spec.javaClasses()[specIndex]; break; } } } } @Override public InternalRow apply(StructLike tuple) { for (int i = 0; i < types.length; i += 1) { reusedRow.update(i, convert(tuple.get(positions[i], javaTypes[i]), types[i])); } return reusedRow; } /** * Converts the objects into instances used by Spark's InternalRow. * * @param value a data value * @param type the Spark data type * @return the value converted to the representation expected by Spark's InternalRow. */ private static Object convert(Object value, DataType type) { if (type instanceof StringType) { return UTF8String.fromString(value.toString()); } else if (type instanceof BinaryType) { ByteBuffer buffer = (ByteBuffer) value; return buffer.get(new byte[buffer.remaining()]); } else if (type instanceof DecimalType) { return Decimal.fromDecimal(value); } return value; } } private static class StructLikeInternalRow implements StructLike { private final DataType[] types; private InternalRow row = null; StructLikeInternalRow(StructType struct) { this.types = new DataType[struct.size()]; StructField[] fields = struct.fields(); for (int i = 0; i < fields.length; i += 1) { types[i] = fields[i].dataType(); } } public StructLikeInternalRow setRow(InternalRow row) { this.row = row; return this; } @Override public int size() { return types.length; } @Override @SuppressWarnings("unchecked") public <T> T get(int pos, Class<T> javaClass) { return javaClass.cast(row.get(pos, types[pos])); } @Override public <T> void set(int pos, T value) { throw new UnsupportedOperationException("Not implemented: set"); } } }
2,244
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/source/IcebergSource.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.source; import com.google.common.base.Preconditions; import com.netflix.iceberg.FileFormat; import com.netflix.iceberg.Schema; import com.netflix.iceberg.Table; import com.netflix.iceberg.hadoop.HadoopTables; import com.netflix.iceberg.spark.SparkSchemaUtil; import com.netflix.iceberg.types.CheckCompatibility; import org.apache.hadoop.conf.Configuration; import org.apache.spark.sql.SaveMode; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.sources.DataSourceRegister; import org.apache.spark.sql.sources.v2.DataSourceV2; import org.apache.spark.sql.sources.v2.DataSourceOptions; import org.apache.spark.sql.sources.v2.ReadSupport; import org.apache.spark.sql.sources.v2.WriteSupport; import org.apache.spark.sql.sources.v2.reader.DataSourceReader; import org.apache.spark.sql.sources.v2.writer.DataSourceWriter; import org.apache.spark.sql.types.StructType; import java.util.List; import java.util.Locale; import java.util.Optional; import static com.netflix.iceberg.TableProperties.DEFAULT_FILE_FORMAT; import static com.netflix.iceberg.TableProperties.DEFAULT_FILE_FORMAT_DEFAULT; public class IcebergSource implements DataSourceV2, ReadSupport, WriteSupport, DataSourceRegister { private SparkSession lazySpark = null; private Configuration lazyConf = null; @Override public String shortName() { return "iceberg"; } @Override public DataSourceReader createReader(DataSourceOptions options) { Table table = findTable(options); return new Reader(table, lazyConf()); } @Override public Optional<DataSourceWriter> createWriter(String jobId, StructType dfStruct, SaveMode mode, DataSourceOptions options) { Preconditions.checkArgument(mode == SaveMode.Append, "Save mode %s is not supported", mode); Table table = findTable(options); Schema dfSchema = SparkSchemaUtil.convert(table.schema(), dfStruct); List<String> errors = CheckCompatibility.writeCompatibilityErrors(table.schema(), dfSchema); if (!errors.isEmpty()) { StringBuilder sb = new StringBuilder(); sb.append("Cannot write incompatible dataframe to table with schema:\n") .append(table.schema()).append("\nProblems:"); for (String error : errors) { sb.append("\n* ").append(error); } throw new IllegalArgumentException(sb.toString()); } Optional<String> formatOption = options.get("iceberg.write.format"); FileFormat format; if (formatOption.isPresent()) { format = FileFormat.valueOf(formatOption.get().toUpperCase(Locale.ENGLISH)); } else { format = FileFormat.valueOf(table.properties() .getOrDefault(DEFAULT_FILE_FORMAT, DEFAULT_FILE_FORMAT_DEFAULT) .toUpperCase(Locale.ENGLISH)); } return Optional.of(new Writer(table, lazyConf(), format)); } protected Table findTable(DataSourceOptions options) { Optional<String> location = options.get("path"); Preconditions.checkArgument(location.isPresent(), "Cannot open table without a location: path is not set"); HadoopTables tables = new HadoopTables(lazyConf()); return tables.load(location.get()); } protected SparkSession lazySparkSession() { if (lazySpark == null) { this.lazySpark = SparkSession.builder().getOrCreate(); } return lazySpark; } protected Configuration lazyConf() { if (lazyConf == null) { this.lazyConf = lazySparkSession().sparkContext().hadoopConfiguration(); } return lazyConf; } }
2,245
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/hacks/Hive.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.hacks; import com.google.common.base.Splitter; import com.google.common.collect.Lists; import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.catalyst.catalog.CatalogTablePartition; import org.apache.spark.sql.hive.HiveUtils$; import org.apache.spark.sql.hive.client.HiveClient; import scala.Option; import scala.collection.Seq; import java.util.List; public class Hive { public static Seq<CatalogTablePartition> partitions(SparkSession spark, String name) { List<String> parts = Lists.newArrayList(Splitter.on('.').limit(2).split(name)); String db = parts.size() == 1 ? "default" : parts.get(0); String table = parts.get(parts.size() == 1 ? 0 : 1); HiveClient client = HiveUtils$.MODULE$.newClientForMetadata( spark.sparkContext().conf(), spark.sparkContext().hadoopConfiguration()); client.getPartitions(db, table, Option.empty()); return client.getPartitions(db, table, Option.empty()); } }
2,246
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/data/SparkOrcWriter.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.netflix.iceberg.Metrics; import com.netflix.iceberg.io.FileAppender; import com.netflix.iceberg.orc.OrcFileAppender; import org.apache.orc.TypeDescription; import org.apache.orc.storage.common.type.HiveDecimal; import org.apache.orc.storage.ql.exec.vector.BytesColumnVector; import org.apache.orc.storage.ql.exec.vector.ColumnVector; import org.apache.orc.storage.ql.exec.vector.DecimalColumnVector; import org.apache.orc.storage.ql.exec.vector.DoubleColumnVector; import org.apache.orc.storage.ql.exec.vector.ListColumnVector; import org.apache.orc.storage.ql.exec.vector.LongColumnVector; import org.apache.orc.storage.ql.exec.vector.MapColumnVector; import org.apache.orc.storage.ql.exec.vector.StructColumnVector; import org.apache.orc.storage.ql.exec.vector.TimestampColumnVector; import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.SpecializedGetters; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.DateTimeUtils; import org.apache.spark.sql.catalyst.util.MapData; import org.apache.spark.unsafe.types.UTF8String; import java.io.IOException; import java.sql.Timestamp; import java.util.List; /** * This class acts as an adaptor from an OrcFileAppender to a * FileAppender&lt;InternalRow&gt;. */ public class SparkOrcWriter implements FileAppender<InternalRow> { private final static int BATCH_SIZE = 1024; private final VectorizedRowBatch batch; private final OrcFileAppender writer; private final Converter[] converters; public SparkOrcWriter(OrcFileAppender writer) { TypeDescription schema = writer.getSchema(); batch = schema.createRowBatch(BATCH_SIZE); this.writer = writer; converters = buildConverters(schema); } /** * The interface for the conversion from Spark's SpecializedGetters to * ORC's ColumnVectors. */ interface Converter { /** * Take a value from the Spark data value and add it to the ORC output. * @param rowId the row in the ColumnVector * @param column either the column number or element number * @param data either an InternalRow or ArrayData * @param output the ColumnVector to put the value into */ void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output); } static class BooleanConverter implements Converter { public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((LongColumnVector) output).vector[rowId] = data.getBoolean(column) ? 1 : 0; } } } static class ByteConverter implements Converter { public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((LongColumnVector) output).vector[rowId] = data.getByte(column); } } } static class ShortConverter implements Converter { public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((LongColumnVector) output).vector[rowId] = data.getShort(column); } } } static class IntConverter implements Converter { public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((LongColumnVector) output).vector[rowId] = data.getInt(column); } } } static class LongConverter implements Converter { public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((LongColumnVector) output).vector[rowId] = data.getLong(column); } } } static class FloatConverter implements Converter { public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((DoubleColumnVector) output).vector[rowId] = data.getFloat(column); } } } static class DoubleConverter implements Converter { public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((DoubleColumnVector) output).vector[rowId] = data.getDouble(column); } } } static class StringConverter implements Converter { public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; byte[] value = data.getUTF8String(column).getBytes(); ((BytesColumnVector) output).setRef(rowId, value, 0, value.length); } } } static class BytesConverter implements Converter { public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; // getBinary always makes a copy, so we don't need to worry about it // being changed behind our back. byte[] value = data.getBinary(column); ((BytesColumnVector) output).setRef(rowId, value, 0, value.length); } } } static class TimestampConverter implements Converter { public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; TimestampColumnVector cv = (TimestampColumnVector) output; long micros = data.getLong(column); cv.time[rowId] = (micros / 1_000_000) * 1000; int nanos = (int) (micros % 1_000_000) * 1000; if (nanos < 0) { nanos += 1_000_000_000; cv.time[rowId] -= 1000; } cv.nanos[rowId] = nanos; } } } static class Decimal18Converter implements Converter { private final int precision; private final int scale; Decimal18Converter(TypeDescription schema) { precision = schema.getPrecision(); scale = schema.getScale(); } public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((DecimalColumnVector) output).vector[rowId].setFromLongAndScale( data.getDecimal(column, precision, scale).toUnscaledLong(), scale); } } } static class Decimal38Converter implements Converter { private final int precision; private final int scale; Decimal38Converter(TypeDescription schema) { precision = schema.getPrecision(); scale = schema.getScale(); } public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((DecimalColumnVector) output).vector[rowId].set( HiveDecimal.create(data.getDecimal(column, precision, scale) .toJavaBigDecimal())); } } } static class StructConverter implements Converter { private final Converter[] children; StructConverter(TypeDescription schema) { children = new Converter[schema.getChildren().size()]; for(int c=0; c < children.length; ++c) { children[c] = buildConverter(schema.getChildren().get(c)); } } public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; InternalRow value = data.getStruct(column, children.length); StructColumnVector cv = (StructColumnVector) output; for(int c=0; c < children.length; ++c) { children[c].addValue(rowId, c, value, cv.fields[c]); } } } } static class ListConverter implements Converter { private final Converter children; ListConverter(TypeDescription schema) { children = buildConverter(schema.getChildren().get(0)); } public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ArrayData value = data.getArray(column); ListColumnVector cv = (ListColumnVector) output; // record the length and start of the list elements cv.lengths[rowId] = value.numElements(); cv.offsets[rowId] = cv.childCount; cv.childCount += cv.lengths[rowId]; // make sure the child is big enough cv.child.ensureSize(cv.childCount, true); // Add each element for(int e=0; e < cv.lengths[rowId]; ++e) { children.addValue((int) (e + cv.offsets[rowId]), e, value, cv.child); } } } } static class MapConverter implements Converter { private final Converter keyConverter; private final Converter valueConverter; MapConverter(TypeDescription schema) { keyConverter = buildConverter(schema.getChildren().get(0)); valueConverter = buildConverter(schema.getChildren().get(1)); } public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; MapData map = data.getMap(column); ArrayData key = map.keyArray(); ArrayData value = map.valueArray(); MapColumnVector cv = (MapColumnVector) output; // record the length and start of the list elements cv.lengths[rowId] = value.numElements(); cv.offsets[rowId] = cv.childCount; cv.childCount += cv.lengths[rowId]; // make sure the child is big enough cv.keys.ensureSize(cv.childCount, true); cv.values.ensureSize(cv.childCount, true); // Add each element for(int e=0; e < cv.lengths[rowId]; ++e) { int pos = (int)(e + cv.offsets[rowId]); keyConverter.addValue(pos, e, key, cv.keys); valueConverter.addValue(pos, e, value, cv.values); } } } } private static Converter buildConverter(TypeDescription schema) { switch (schema.getCategory()) { case BOOLEAN: return new BooleanConverter(); case BYTE: return new ByteConverter(); case SHORT: return new ShortConverter(); case DATE: case INT: return new IntConverter(); case LONG: return new LongConverter(); case FLOAT: return new FloatConverter(); case DOUBLE: return new DoubleConverter(); case BINARY: return new BytesConverter(); case STRING: case CHAR: case VARCHAR: return new StringConverter(); case DECIMAL: return schema.getPrecision() <= 18 ? new Decimal18Converter(schema) : new Decimal38Converter(schema); case TIMESTAMP: return new TimestampConverter(); case STRUCT: return new StructConverter(schema); case LIST: return new ListConverter(schema); case MAP: return new MapConverter(schema); } throw new IllegalArgumentException("Unhandled type " + schema); } private static Converter[] buildConverters(TypeDescription schema) { if (schema.getCategory() != TypeDescription.Category.STRUCT) { throw new IllegalArgumentException("Top level must be a struct " + schema); } List<TypeDescription> children = schema.getChildren(); Converter[] result = new Converter[children.size()]; for(int c=0; c < children.size(); ++c) { result[c] = buildConverter(children.get(c)); } return result; } @Override public void add(InternalRow datum) { int row = batch.size++; for(int c=0; c < converters.length; ++c) { converters[c].addValue(row, c, datum, batch.cols[c]); } if (batch.size == BATCH_SIZE) { writer.add(batch); batch.reset(); } } @Override public Metrics metrics() { return writer.metrics(); } @Override public void close() throws IOException { if (batch.size > 0) { writer.add(batch); batch.reset(); } writer.close(); } }
2,247
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/data/SparkAvroWriter.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; import com.netflix.iceberg.avro.AvroSchemaUtil; import com.netflix.iceberg.avro.AvroSchemaVisitor; import com.netflix.iceberg.avro.ValueWriter; import com.netflix.iceberg.avro.ValueWriters; import com.netflix.iceberg.types.Type; import org.apache.avro.LogicalType; import org.apache.avro.LogicalTypes; import org.apache.avro.Schema; import org.apache.avro.io.DatumWriter; import org.apache.avro.io.Encoder; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.types.DataType; import java.io.IOException; import java.util.List; import static com.netflix.iceberg.avro.AvroSchemaUtil.getFieldId; import static com.netflix.iceberg.avro.AvroSchemaVisitor.visit; import static com.netflix.iceberg.spark.SparkSchemaUtil.convert; public class SparkAvroWriter implements DatumWriter<InternalRow> { private final com.netflix.iceberg.Schema schema; private ValueWriter<InternalRow> writer = null; public SparkAvroWriter(com.netflix.iceberg.Schema schema) { this.schema = schema; } @Override @SuppressWarnings("unchecked") public void setSchema(Schema schema) { this.writer = (ValueWriter<InternalRow>) visit(schema, new WriteBuilder(this.schema)); } @Override public void write(InternalRow datum, Encoder out) throws IOException { writer.write(datum, out); } private static class WriteBuilder extends AvroSchemaVisitor<ValueWriter<?>> { private final com.netflix.iceberg.Schema schema; private WriteBuilder(com.netflix.iceberg.Schema schema) { this.schema = schema; } @Override public ValueWriter<?> record(Schema record, List<String> names, List<ValueWriter<?>> fields) { List<DataType> types = Lists.newArrayList(); for (Schema.Field field : record.getFields()) { types.add(convert(schema.findType(getFieldId(field)))); } return SparkValueWriters.struct(fields, types); } @Override public ValueWriter<?> union(Schema union, List<ValueWriter<?>> options) { Preconditions.checkArgument(options.contains(ValueWriters.nulls()), "Cannot create writer for non-option union: " + union); Preconditions.checkArgument(options.size() == 2, "Cannot create writer for non-option union: " + union); if (union.getTypes().get(0).getType() == Schema.Type.NULL) { return ValueWriters.option(0, options.get(1)); } else { return ValueWriters.option(1, options.get(0)); } } @Override public ValueWriter<?> array(Schema array, ValueWriter<?> elementWriter) { LogicalType logical = array.getLogicalType(); if (logical != null && "map".equals(logical.getName())) { Type keyType = schema.findType(getFieldId(array.getElementType().getField("key"))); Type valueType = schema.findType(getFieldId(array.getElementType().getField("value"))); ValueWriter<?>[] writers = ((SparkValueWriters.StructWriter) elementWriter).writers; return SparkValueWriters.arrayMap( writers[0], convert(keyType), writers[1], convert(valueType)); } Type elementType = schema.findType(AvroSchemaUtil.getElementId(array)); return SparkValueWriters.array(elementWriter, convert(elementType)); } @Override public ValueWriter<?> map(Schema map, ValueWriter<?> valueReader) { Type keyType = schema.findType(AvroSchemaUtil.getKeyId(map)); Type valueType = schema.findType(AvroSchemaUtil.getValueId(map)); return SparkValueWriters.map( SparkValueWriters.strings(), convert(keyType), valueReader, convert(valueType)); } @Override public ValueWriter<?> primitive(Schema primitive) { LogicalType logicalType = primitive.getLogicalType(); if (logicalType != null) { switch (logicalType.getName()) { case "date": // Spark uses the same representation return ValueWriters.ints(); case "timestamp-micros": // Spark uses the same representation return ValueWriters.longs(); case "decimal": LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; return SparkValueWriters.decimal(decimal.getPrecision(), decimal.getScale()); case "uuid": return ValueWriters.uuids(); default: throw new IllegalArgumentException("Unsupported logical type: " + logicalType); } } switch (primitive.getType()) { case NULL: return ValueWriters.nulls(); case BOOLEAN: return ValueWriters.booleans(); case INT: return ValueWriters.ints(); case LONG: return ValueWriters.longs(); case FLOAT: return ValueWriters.floats(); case DOUBLE: return ValueWriters.doubles(); case STRING: return SparkValueWriters.strings(); case FIXED: return ValueWriters.fixed(primitive.getFixedSize()); case BYTES: return ValueWriters.bytes(); default: throw new IllegalArgumentException("Unsupported type: " + primitive); } } } }
2,248
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/data/SparkAvroReader.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.google.common.collect.MapMaker; import com.netflix.iceberg.avro.AvroSchemaVisitor; import com.netflix.iceberg.avro.ValueReader; import com.netflix.iceberg.avro.ValueReaders; import com.netflix.iceberg.exceptions.RuntimeIOException; import org.apache.avro.LogicalType; import org.apache.avro.LogicalTypes; import org.apache.avro.Schema; import org.apache.avro.io.DatumReader; import org.apache.avro.io.Decoder; import org.apache.avro.io.DecoderFactory; import org.apache.avro.io.ResolvingDecoder; import org.apache.spark.sql.catalyst.InternalRow; import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; public class SparkAvroReader implements DatumReader<InternalRow> { private static final ThreadLocal<Map<Schema, Map<Schema, ResolvingDecoder>>> DECODER_CACHES = ThreadLocal.withInitial(() -> new MapMaker().weakKeys().makeMap()); private final Schema readSchema; private final ValueReader<InternalRow> reader; private Schema fileSchema = null; @SuppressWarnings("unchecked") public SparkAvroReader(Schema readSchema) { this.readSchema = readSchema; this.reader = (ValueReader<InternalRow>) AvroSchemaVisitor.visit(readSchema, new ReadBuilder()); } @Override public void setSchema(Schema fileSchema) { this.fileSchema = Schema.applyAliases(fileSchema, readSchema); } @Override public InternalRow read(InternalRow reuse, Decoder decoder) throws IOException { ResolvingDecoder resolver = resolve(decoder); InternalRow row = reader.read(resolver, reuse); resolver.drain(); return row; } private ResolvingDecoder resolve(Decoder decoder) throws IOException { Map<Schema, Map<Schema, ResolvingDecoder>> cache = DECODER_CACHES.get(); Map<Schema, ResolvingDecoder> fileSchemaToResolver = cache .computeIfAbsent(readSchema, k -> new HashMap<>()); ResolvingDecoder resolver = fileSchemaToResolver.get(fileSchema); if (resolver == null) { resolver = newResolver(); fileSchemaToResolver.put(fileSchema, resolver); } resolver.configure(decoder); return resolver; } private ResolvingDecoder newResolver() { try { return DecoderFactory.get().resolvingDecoder(fileSchema, readSchema, null); } catch (IOException e) { throw new RuntimeIOException(e); } } private static class ReadBuilder extends AvroSchemaVisitor<ValueReader<?>> { private ReadBuilder() { } @Override public ValueReader<?> record(Schema record, List<String> names, List<ValueReader<?>> fields) { return SparkValueReaders.struct(fields); } @Override public ValueReader<?> union(Schema union, List<ValueReader<?>> options) { return ValueReaders.union(options); } @Override public ValueReader<?> array(Schema array, ValueReader<?> elementReader) { LogicalType logical = array.getLogicalType(); if (logical != null && "map".equals(logical.getName())) { ValueReader<?>[] keyValueReaders = ((SparkValueReaders.StructReader) elementReader).readers; return SparkValueReaders.arrayMap(keyValueReaders[0], keyValueReaders[1]); } return SparkValueReaders.array(elementReader); } @Override public ValueReader<?> map(Schema map, ValueReader<?> valueReader) { return SparkValueReaders.map(SparkValueReaders.strings(), valueReader); } @Override public ValueReader<?> primitive(Schema primitive) { LogicalType logicalType = primitive.getLogicalType(); if (logicalType != null) { switch (logicalType.getName()) { case "date": // Spark uses the same representation return ValueReaders.ints(); case "timestamp-millis": // adjust to microseconds ValueReader<Long> longs = ValueReaders.longs(); return (ValueReader<Long>) (decoder, ignored) -> longs.read(decoder, null) * 1000L; case "timestamp-micros": // Spark uses the same representation return ValueReaders.longs(); case "decimal": ValueReader<byte[]> inner; switch (primitive.getType()) { case FIXED: inner = ValueReaders.fixed(primitive.getFixedSize()); break; case BYTES: inner = ValueReaders.bytes(); break; default: throw new IllegalArgumentException( "Invalid primitive type for decimal: " + primitive.getType()); } LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType; return SparkValueReaders.decimal(inner, decimal.getScale()); case "uuid": return SparkValueReaders.uuids(); default: throw new IllegalArgumentException("Unknown logical type: " + logicalType); } } switch (primitive.getType()) { case NULL: return ValueReaders.nulls(); case BOOLEAN: return ValueReaders.booleans(); case INT: return ValueReaders.ints(); case LONG: return ValueReaders.longs(); case FLOAT: return ValueReaders.floats(); case DOUBLE: return ValueReaders.doubles(); case STRING: return SparkValueReaders.strings(); case FIXED: return ValueReaders.fixed(primitive.getFixedSize()); case BYTES: return ValueReaders.bytes(); default: throw new IllegalArgumentException("Unsupported type: " + primitive); } } } }
2,249
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/data/SparkOrcReader.java
/* * Copyright 2018 Hortonworks * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.netflix.iceberg.FileScanTask; import com.netflix.iceberg.Schema; import com.netflix.iceberg.io.InputFile; import com.netflix.iceberg.orc.ColumnIdMap; import com.netflix.iceberg.orc.ORC; import com.netflix.iceberg.orc.OrcIterator; import com.netflix.iceberg.orc.TypeConversion; import org.apache.orc.TypeDescription; import org.apache.orc.storage.common.type.FastHiveDecimal; import org.apache.orc.storage.ql.exec.vector.BytesColumnVector; import org.apache.orc.storage.ql.exec.vector.ColumnVector; import org.apache.orc.storage.ql.exec.vector.DecimalColumnVector; import org.apache.orc.storage.ql.exec.vector.DoubleColumnVector; import org.apache.orc.storage.ql.exec.vector.ListColumnVector; import org.apache.orc.storage.ql.exec.vector.LongColumnVector; import org.apache.orc.storage.ql.exec.vector.MapColumnVector; import org.apache.orc.storage.ql.exec.vector.StructColumnVector; import org.apache.orc.storage.ql.exec.vector.TimestampColumnVector; import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch; import org.apache.orc.storage.serde2.io.DateWritable; import org.apache.orc.storage.serde2.io.HiveDecimalWritable; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.SpecializedGetters; import org.apache.spark.sql.catalyst.expressions.UnsafeRow; import org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder; import org.apache.spark.sql.catalyst.expressions.codegen.UnsafeArrayWriter; import org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.MapData; import org.apache.spark.sql.types.Decimal; import org.apache.spark.unsafe.Platform; import org.apache.spark.unsafe.array.ByteArrayMethods; import java.io.Closeable; import java.io.IOException; import java.math.BigDecimal; import java.sql.Timestamp; import java.util.Iterator; import java.util.List; /** * Converts the OrcInterator, which returns ORC's VectorizedRowBatch to a * set of Spark's UnsafeRows. * * It minimizes allocations by reusing most of the objects in the implementation. */ public class SparkOrcReader implements Iterator<InternalRow>, Closeable { private final static int INITIAL_SIZE = 128 * 1024; private final OrcIterator reader; private final TypeDescription orcSchema; private final UnsafeRow row; private final BufferHolder holder; private final UnsafeRowWriter writer; private int nextRow = 0; private VectorizedRowBatch current = null; private Converter[] converter; public SparkOrcReader(InputFile location, FileScanTask task, Schema readSchema) { ColumnIdMap columnIds = new ColumnIdMap(); orcSchema = TypeConversion.toOrc(readSchema, columnIds); reader = ORC.read(location) .split(task.start(), task.length()) .schema(readSchema) .build(); int numFields = readSchema.columns().size(); row = new UnsafeRow(numFields); holder = new BufferHolder(row, INITIAL_SIZE); writer = new UnsafeRowWriter(holder, numFields); converter = new Converter[numFields]; for(int c=0; c < numFields; ++c) { converter[c] = buildConverter(holder, orcSchema.getChildren().get(c)); } } @Override public boolean hasNext() { return (current != null && nextRow < current.size) || reader.hasNext(); } @Override public UnsafeRow next() { if (current == null || nextRow >= current.size) { current = reader.next(); nextRow = 0; } // Reset the holder to start the buffer over again. // BufferHolder.reset does the wrong thing... holder.cursor = Platform.BYTE_ARRAY_OFFSET; writer.reset(); for(int c=0; c < current.cols.length; ++c) { converter[c].convert(writer, c, current.cols[c], nextRow); } nextRow++; return row; } @Override public void close() throws IOException { reader.close(); } private static void printRow(SpecializedGetters row, TypeDescription schema) { List<TypeDescription> children = schema.getChildren(); System.out.print("{"); for(int c = 0; c < children.size(); ++c) { System.out.print("\"" + schema.getFieldNames().get(c) + "\": "); printRow(row, c, children.get(c)); } System.out.print("}"); } private static void printRow(SpecializedGetters row, int ord, TypeDescription schema) { switch (schema.getCategory()) { case BOOLEAN: System.out.print(row.getBoolean(ord)); break; case BYTE: System.out.print(row.getByte(ord)); break; case SHORT: System.out.print(row.getShort(ord)); break; case INT: System.out.print(row.getInt(ord)); break; case LONG: System.out.print(row.getLong(ord)); break; case FLOAT: System.out.print(row.getFloat(ord)); break; case DOUBLE: System.out.print(row.getDouble(ord)); break; case CHAR: case VARCHAR: case STRING: System.out.print("\"" + row.getUTF8String(ord) + "\""); break; case BINARY: { byte[] bin = row.getBinary(ord); if (bin == null) { System.out.print("null"); } else { System.out.print("["); for (int i = 0; i < bin.length; ++i) { if (i != 0) { System.out.print(", "); } int v = bin[i] & 0xff; if (v < 16) { System.out.print("0" + Integer.toHexString(v)); } else { System.out.print(Integer.toHexString(v)); } } System.out.print("]"); } break; } case DECIMAL: System.out.print(row.getDecimal(ord, schema.getPrecision(), schema.getScale())); break; case DATE: System.out.print("\"" + new DateWritable(row.getInt(ord)) + "\""); break; case TIMESTAMP: System.out.print("\"" + new Timestamp(row.getLong(ord)) + "\""); break; case STRUCT: printRow(row.getStruct(ord, schema.getChildren().size()), schema); break; case LIST: { TypeDescription child = schema.getChildren().get(0); System.out.print("["); ArrayData list = row.getArray(ord); for(int e=0; e < list.numElements(); ++e) { if (e != 0) { System.out.print(", "); } printRow(list, e, child); } System.out.print("]"); break; } case MAP: { TypeDescription keyType = schema.getChildren().get(0); TypeDescription valueType = schema.getChildren().get(1); MapData map = row.getMap(ord); ArrayData keys = map.keyArray(); ArrayData values = map.valueArray(); System.out.print("["); for(int e=0; e < map.numElements(); ++e) { if (e != 0) { System.out.print(", "); } printRow(keys, e, keyType); System.out.print(": "); printRow(values, e, valueType); } System.out.print("]"); break; } default: throw new IllegalArgumentException("Unhandled type " + schema); } } static int getArrayElementSize(TypeDescription type) { switch (type.getCategory()) { case BOOLEAN: case BYTE: return 1; case SHORT: return 2; case INT: case FLOAT: return 4; default: return 8; } } /** * The common interface for converting from a ORC ColumnVector to a Spark * UnsafeRow. UnsafeRows need two different interfaces for writers and thus * we have two methods the first is for structs (UnsafeRowWriter) and the * second is for lists and maps (UnsafeArrayWriter). If Spark adds a common * interface similar to SpecializedGetters we could that and a single set of * methods. */ interface Converter { void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row); void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row); } private static class BooleanConverter implements Converter { @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { writer.write(column, ((LongColumnVector) vector).vector[row] != 0); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { writer.write(element, ((LongColumnVector) vector).vector[row] != 0); } } } private static class ByteConverter implements Converter { @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { writer.write(column, (byte) ((LongColumnVector) vector).vector[row]); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { writer.write(element, (byte) ((LongColumnVector) vector).vector[row]); } } } private static class ShortConverter implements Converter { @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { writer.write(column, (short) ((LongColumnVector) vector).vector[row]); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { writer.write(element, (short) ((LongColumnVector) vector).vector[row]); } } } private static class IntConverter implements Converter { @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { writer.write(column, (int) ((LongColumnVector) vector).vector[row]); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { writer.write(element, (int) ((LongColumnVector) vector).vector[row]); } } } private static class LongConverter implements Converter { @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { writer.write(column, ((LongColumnVector) vector).vector[row]); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { writer.write(element, ((LongColumnVector) vector).vector[row]); } } } private static class FloatConverter implements Converter { @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { writer.write(column, (float) ((DoubleColumnVector) vector).vector[row]); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { writer.write(element, (float) ((DoubleColumnVector) vector).vector[row]); } } } private static class DoubleConverter implements Converter { @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { writer.write(column, ((DoubleColumnVector) vector).vector[row]); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { writer.write(element, ((DoubleColumnVector) vector).vector[row]); } } } private static class TimestampConverter implements Converter { private long convert(TimestampColumnVector vector, int row) { // compute microseconds past 1970. long micros = (vector.time[row]/1000) * 1_000_000 + vector.nanos[row] / 1000; return micros; } @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { writer.write(column, convert((TimestampColumnVector) vector, row)); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { writer.write(element, convert((TimestampColumnVector) vector, row)); } } } /** * UnsafeArrayWriter doesn't have a binary form that lets the user pass an * offset and length, so I've added one here. It is the minor tweak of the * UnsafeArrayWriter.write(int, byte[]) method. * @param holder the BufferHolder where the bytes are being written * @param writer the UnsafeArrayWriter * @param ordinal the element that we are writing into * @param input the input bytes * @param offset the first byte from input * @param length the number of bytes to write */ static void write(BufferHolder holder, UnsafeArrayWriter writer, int ordinal, byte[] input, int offset, int length) { final int roundedSize = ByteArrayMethods.roundNumberOfBytesToNearestWord(length); // grow the global buffer before writing data. holder.grow(roundedSize); if ((length & 0x07) > 0) { Platform.putLong(holder.buffer, holder.cursor + ((length >> 3) << 3), 0L); } // Write the bytes to the variable length portion. Platform.copyMemory(input, Platform.BYTE_ARRAY_OFFSET + offset, holder.buffer, holder.cursor, length); writer.setOffsetAndSize(ordinal, holder.cursor, length); // move the cursor forward. holder.cursor += roundedSize; } private static class BinaryConverter implements Converter { private final BufferHolder holder; BinaryConverter(BufferHolder holder) { this.holder = holder; } @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { BytesColumnVector v = (BytesColumnVector) vector; writer.write(column, v.vector[row], v.start[row], v.length[row]); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { BytesColumnVector v = (BytesColumnVector) vector; write(holder, writer, element, v.vector[row], v.start[row], v.length[row]); } } } /** * This hack is to get the unscaled value (for precision <= 18) quickly. * This can be replaced when we upgrade to storage-api 2.5.0. */ static class DecimalHack extends FastHiveDecimal { long unscaledLong(FastHiveDecimal value) { fastSet(value); return fastSignum * fast1 * 10_000_000_000_000_000L + fast0; } } private static class Decimal18Converter implements Converter { final DecimalHack hack = new DecimalHack(); final int precision; final int scale; Decimal18Converter(int precision, int scale) { this.precision = precision; this.scale = scale; } @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { HiveDecimalWritable v = ((DecimalColumnVector) vector).vector[row]; writer.write(column, new Decimal().set(hack.unscaledLong(v), precision, v.scale()), precision, scale); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { HiveDecimalWritable v = ((DecimalColumnVector) vector).vector[row]; writer.write(element, new Decimal().set(hack.unscaledLong(v), precision, v.scale()), precision, scale); } } } private static class Decimal38Converter implements Converter { final int precision; final int scale; Decimal38Converter(int precision, int scale) { this.precision = precision; this.scale = scale; } @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { BigDecimal v = ((DecimalColumnVector) vector).vector[row] .getHiveDecimal().bigDecimalValue(); writer.write(column, new Decimal().set(new scala.math.BigDecimal(v), precision, scale), precision, scale); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { BigDecimal v = ((DecimalColumnVector) vector).vector[row] .getHiveDecimal().bigDecimalValue(); writer.write(element, new Decimal().set(new scala.math.BigDecimal(v), precision, scale), precision, scale); } } } private static class StructConverter implements Converter { private final BufferHolder holder; private final Converter[] children; private final UnsafeRowWriter childWriter; StructConverter(BufferHolder holder, TypeDescription schema) { this.holder = holder; children = new Converter[schema.getChildren().size()]; for(int c=0; c < children.length; ++c) { children[c] = buildConverter(holder, schema.getChildren().get(c)); } childWriter = new UnsafeRowWriter(holder, children.length); } int writeStruct(StructColumnVector vector, int row) { int start = holder.cursor; childWriter.reset(); for(int c=0; c < children.length; ++c) { children[c].convert(childWriter, c, vector.fields[c], row); } return start; } @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { int start = writeStruct((StructColumnVector) vector, row); writer.setOffsetAndSize(column, start, holder.cursor - start); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { int start = writeStruct((StructColumnVector) vector, row); writer.setOffsetAndSize(element, start, holder.cursor - start); } } } private static class ListConverter implements Converter { private final BufferHolder holder; private final Converter children; private final UnsafeArrayWriter childWriter; private final int elementSize; ListConverter(BufferHolder holder, TypeDescription schema) { this.holder = holder; TypeDescription child = schema.getChildren().get(0); children = buildConverter(holder, child); childWriter = new UnsafeArrayWriter(); elementSize = getArrayElementSize(child); } int writeList(ListColumnVector v, int row) { int offset = (int) v.offsets[row]; int length = (int) v.lengths[row]; int start = holder.cursor; childWriter.initialize(holder, length, elementSize); for(int c=0; c < length; ++c) { children.convert(childWriter, c, v.child, offset + c); } return start; } @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { int start = writeList((ListColumnVector) vector, row); writer.setOffsetAndSize(column, start, holder.cursor - start); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { int start = writeList((ListColumnVector) vector, row); writer.setOffsetAndSize(element, start, holder.cursor - start); } } } private static class MapConverter implements Converter { private final BufferHolder holder; private final Converter keyConvert; private final Converter valueConvert; private final UnsafeArrayWriter childWriter; private final int keySize; private final int valueSize; MapConverter(BufferHolder holder, TypeDescription schema) { this.holder = holder; TypeDescription keyType = schema.getChildren().get(0); TypeDescription valueType = schema.getChildren().get(1); keyConvert = buildConverter(holder, keyType); keySize = getArrayElementSize(keyType); valueConvert = buildConverter(holder, valueType); valueSize = getArrayElementSize(valueType); childWriter = new UnsafeArrayWriter(); } int writeMap(MapColumnVector v, int row) { int offset = (int) v.offsets[row]; int length = (int) v.lengths[row]; int start = holder.cursor; // save room for the key size final int KEY_SIZE_BYTES = 8; holder.grow(KEY_SIZE_BYTES); holder.cursor += KEY_SIZE_BYTES; // serialize the keys childWriter.initialize(holder, length, keySize); for(int c=0; c < length; ++c) { keyConvert.convert(childWriter, c, v.keys, offset + c); } // store the serialized size of the keys Platform.putLong(holder.buffer, start, holder.cursor - start - KEY_SIZE_BYTES); // serialize the values childWriter.initialize(holder, length, valueSize); for(int c=0; c < length; ++c) { valueConvert.convert(childWriter, c, v.values, offset + c); } return start; } @Override public void convert(UnsafeRowWriter writer, int column, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNullAt(column); } else { int start = writeMap((MapColumnVector) vector, row); writer.setOffsetAndSize(column, start, holder.cursor - start); } } @Override public void convert(UnsafeArrayWriter writer, int element, ColumnVector vector, int row) { if (vector.isRepeating) { row = 0; } if (!vector.noNulls && vector.isNull[row]) { writer.setNull(element); } else { int start = writeMap((MapColumnVector) vector, row); writer.setOffsetAndSize(element, start, holder.cursor - start); } } } static Converter buildConverter(BufferHolder holder, TypeDescription schema) { switch (schema.getCategory()) { case BOOLEAN: return new BooleanConverter(); case BYTE: return new ByteConverter(); case SHORT: return new ShortConverter(); case DATE: case INT: return new IntConverter(); case LONG: return new LongConverter(); case FLOAT: return new FloatConverter(); case DOUBLE: return new DoubleConverter(); case TIMESTAMP: return new TimestampConverter(); case DECIMAL: if (schema.getPrecision() <= Decimal.MAX_LONG_DIGITS()) { return new Decimal18Converter(schema.getPrecision(), schema.getScale()); } else { return new Decimal38Converter(schema.getPrecision(), schema.getScale()); } case BINARY: case STRING: case CHAR: case VARCHAR: return new BinaryConverter(holder); case STRUCT: return new StructConverter(holder, schema); case LIST: return new ListConverter(holder, schema); case MAP: return new MapConverter(holder, schema); default: throw new IllegalArgumentException("Unhandled type " + schema); } } }
2,250
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/data/SparkValueWriters.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.google.common.base.Preconditions; import com.netflix.iceberg.avro.ValueWriter; import com.netflix.iceberg.types.TypeUtil; import org.apache.avro.io.Encoder; import org.apache.avro.util.Utf8; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.MapData; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.Decimal; import org.apache.spark.unsafe.types.UTF8String; import java.io.IOException; import java.lang.reflect.Array; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.List; import java.util.UUID; public class SparkValueWriters { static ValueWriter<UTF8String> strings() { return StringWriter.INSTANCE; } static ValueWriter<UTF8String> uuids() { return UUIDWriter.INSTANCE; } static ValueWriter<Decimal> decimal(int precision, int scale) { return new DecimalWriter(precision, scale); } static <T> ValueWriter<ArrayData> array(ValueWriter<T> elementWriter, DataType elementType) { return new ArrayWriter<>(elementWriter, elementType); } static <K, V> ValueWriter<MapData> arrayMap( ValueWriter<K> keyWriter, DataType keyType, ValueWriter<V> valueWriter, DataType valueType) { return new ArrayMapWriter<>(keyWriter, keyType, valueWriter, valueType); } static <K, V> ValueWriter<MapData> map( ValueWriter<K> keyWriter, DataType keyType, ValueWriter<V> valueWriter, DataType valueType) { return new MapWriter<>(keyWriter, keyType, valueWriter, valueType); } static ValueWriter<InternalRow> struct(List<ValueWriter<?>> writers, List<DataType> types) { return new StructWriter(writers, types); } private static class StringWriter implements ValueWriter<UTF8String> { private static StringWriter INSTANCE = new StringWriter(); private StringWriter() { } @Override public void write(UTF8String s, Encoder encoder) throws IOException { // use getBytes because it may return the backing byte array if available. // otherwise, it copies to a new byte array, which is still cheaper than Avro // calling toString, which incurs encoding costs encoder.writeString(new Utf8(s.getBytes())); } } private static class UUIDWriter implements ValueWriter<UTF8String> { private static final ThreadLocal<ByteBuffer> BUFFER = ThreadLocal.withInitial(() -> { ByteBuffer buffer = ByteBuffer.allocate(16); buffer.order(ByteOrder.BIG_ENDIAN); return buffer; }); private static UUIDWriter INSTANCE = new UUIDWriter(); private UUIDWriter() { } @Override public void write(UTF8String s, Encoder encoder) throws IOException { // TODO: direct conversion from string to byte buffer UUID uuid = UUID.fromString(s.toString()); ByteBuffer buffer = BUFFER.get(); buffer.rewind(); buffer.putLong(uuid.getMostSignificantBits()); buffer.putLong(uuid.getLeastSignificantBits()); encoder.writeFixed(buffer.array()); } } private static class DecimalWriter implements ValueWriter<Decimal> { private final int precision; private final int scale; private final int length; private final ThreadLocal<byte[]> bytes; private DecimalWriter(int precision, int scale) { this.precision = precision; this.scale = scale; this.length = TypeUtil.decimalRequriedBytes(precision); this.bytes = ThreadLocal.withInitial(() -> new byte[length]); } @Override public void write(Decimal d, Encoder encoder) throws IOException { Preconditions.checkArgument(d.scale() == scale, "Cannot write value as decimal(%s,%s), wrong scale: %s", precision, scale, d); Preconditions.checkArgument(d.precision() <= precision, "Cannot write value as decimal(%s,%s), too large: %s", precision, scale, d); BigDecimal decimal = d.toJavaBigDecimal(); byte fillByte = (byte) (decimal.signum() < 0 ? 0xFF : 0x00); byte[] unscaled = decimal.unscaledValue().toByteArray(); byte[] buf = bytes.get(); int offset = length - unscaled.length; for (int i = 0; i < length; i += 1) { if (i < offset) { buf[i] = fillByte; } else { buf[i] = unscaled[i - offset]; } } encoder.writeFixed(buf); } } private static class ArrayWriter<T> implements ValueWriter<ArrayData> { private final ValueWriter<T> elementWriter; private final DataType elementType; private ArrayWriter(ValueWriter<T> elementWriter, DataType elementType) { this.elementWriter = elementWriter; this.elementType = elementType; } @Override @SuppressWarnings("unchecked") public void write(ArrayData array, Encoder encoder) throws IOException { encoder.writeArrayStart(); int numElements = array.numElements(); encoder.setItemCount(numElements); for (int i = 0; i < numElements; i += 1) { encoder.startItem(); elementWriter.write((T) array.get(i, elementType), encoder); } encoder.writeArrayEnd(); } } private static class ArrayMapWriter<K, V> implements ValueWriter<MapData> { private final ValueWriter<K> keyWriter; private final ValueWriter<V> valueWriter; private final DataType keyType; private final DataType valueType; private ArrayMapWriter(ValueWriter<K> keyWriter, DataType keyType, ValueWriter<V> valueWriter, DataType valueType) { this.keyWriter = keyWriter; this.keyType = keyType; this.valueWriter = valueWriter; this.valueType = valueType; } @Override @SuppressWarnings("unchecked") public void write(MapData map, Encoder encoder) throws IOException { encoder.writeArrayStart(); int numElements = map.numElements(); encoder.setItemCount(numElements); ArrayData keyArray = map.keyArray(); ArrayData valueArray = map.valueArray(); for (int i = 0; i < numElements; i += 1) { encoder.startItem(); keyWriter.write((K) keyArray.get(i, keyType), encoder); valueWriter.write((V) valueArray.get(i, valueType), encoder); } encoder.writeArrayEnd(); } } private static class MapWriter<K, V> implements ValueWriter<MapData> { private final ValueWriter<K> keyWriter; private final ValueWriter<V> valueWriter; private final DataType keyType; private final DataType valueType; private MapWriter(ValueWriter<K> keyWriter, DataType keyType, ValueWriter<V> valueWriter, DataType valueType) { this.keyWriter = keyWriter; this.keyType = keyType; this.valueWriter = valueWriter; this.valueType = valueType; } @Override @SuppressWarnings("unchecked") public void write(MapData map, Encoder encoder) throws IOException { encoder.writeMapStart(); int numElements = map.numElements(); encoder.setItemCount(numElements); ArrayData keyArray = map.keyArray(); ArrayData valueArray = map.valueArray(); for (int i = 0; i < numElements; i += 1) { encoder.startItem(); keyWriter.write((K) keyArray.get(i, keyType), encoder); valueWriter.write((V) valueArray.get(i, valueType), encoder); } encoder.writeMapEnd(); } } static class StructWriter implements ValueWriter<InternalRow> { final ValueWriter<?>[] writers; private final DataType[] types; @SuppressWarnings("unchecked") private StructWriter(List<ValueWriter<?>> writers, List<DataType> types) { this.writers = (ValueWriter<?>[]) Array.newInstance(ValueWriter.class, writers.size()); this.types = new DataType[writers.size()]; for (int i = 0; i < writers.size(); i += 1) { this.writers[i] = writers.get(i); this.types[i] = types.get(i); } } @Override public void write(InternalRow row, Encoder encoder) throws IOException { for (int i = 0; i < types.length; i += 1) { if (row.isNullAt(i)) { writers[i].write(null, encoder); } else { write(row, i, writers[i], encoder); } } } @SuppressWarnings("unchecked") private <T> void write(InternalRow row, int pos, ValueWriter<T> writer, Encoder encoder) throws IOException { writer.write((T) row.get(pos, types[pos]), encoder); } } }
2,251
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/data/SparkValueReaders.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.google.common.collect.Lists; import com.netflix.iceberg.avro.ValueReader; import org.apache.avro.Schema; import org.apache.avro.io.Decoder; import org.apache.avro.io.ResolvingDecoder; import org.apache.avro.util.Utf8; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.GenericInternalRow; import org.apache.spark.sql.catalyst.util.ArrayBasedMapData; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.GenericArrayData; import org.apache.spark.sql.types.Decimal; import org.apache.spark.unsafe.types.UTF8String; import java.io.IOException; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.List; import java.util.UUID; public class SparkValueReaders { static ValueReader<UTF8String> strings() { return StringReader.INSTANCE; } static ValueReader<UTF8String> uuids() { return UUIDReader.INSTANCE; } static ValueReader<Decimal> decimal(ValueReader<byte[]> unscaledReader, int scale) { return new DecimalReader(unscaledReader, scale); } static ValueReader<ArrayData> array(ValueReader<?> elementReader) { return new ArrayReader(elementReader); } static ValueReader<ArrayBasedMapData> arrayMap(ValueReader<?> keyReader, ValueReader<?> valueReader) { return new ArrayMapReader(keyReader, valueReader); } static ValueReader<ArrayBasedMapData> map(ValueReader<?> keyReader, ValueReader<?> valueReader) { return new MapReader(keyReader, valueReader); } static ValueReader<InternalRow> struct(List<ValueReader<?>> readers) { return new StructReader(readers); } private static class StringReader implements ValueReader<UTF8String> { private static StringReader INSTANCE = new StringReader(); private StringReader() { } @Override public UTF8String read(Decoder decoder, Object reuse) throws IOException { // use the decoder's readString(Utf8) method because it may be a resolving decoder Utf8 utf8 = null; if (reuse instanceof UTF8String) { utf8 = new Utf8(((UTF8String) reuse).getBytes()); } Utf8 string = decoder.readString(utf8); return UTF8String.fromBytes(string.getBytes(), 0, string.getByteLength()); // int length = decoder.readInt(); // byte[] bytes = new byte[length]; // decoder.readFixed(bytes, 0, length); // return UTF8String.fromBytes(bytes); } } private static class UUIDReader implements ValueReader<UTF8String> { private static final ThreadLocal<ByteBuffer> BUFFER = ThreadLocal.withInitial(() -> { ByteBuffer buffer = ByteBuffer.allocate(16); buffer.order(ByteOrder.BIG_ENDIAN); return buffer; }); private static UUIDReader INSTANCE = new UUIDReader(); private UUIDReader() { } @Override public UTF8String read(Decoder decoder, Object reuse) throws IOException { ByteBuffer buffer = BUFFER.get(); buffer.rewind(); decoder.readFixed(buffer.array(), 0, 16); long mostSigBits = buffer.getLong(); long leastSigBits = buffer.getLong(); return UTF8String.fromString(new UUID(mostSigBits, leastSigBits).toString()); } } private static class DecimalReader implements ValueReader<Decimal> { private final ValueReader<byte[]> bytesReader; private final int scale; private DecimalReader(ValueReader<byte[]> bytesReader, int scale) { this.bytesReader = bytesReader; this.scale = scale; } @Override public Decimal read(Decoder decoder, Object reuse) throws IOException { byte[] bytes = bytesReader.read(decoder, null); return Decimal.apply(new BigDecimal(new BigInteger(bytes), scale)); } } private static class ArrayReader implements ValueReader<ArrayData> { private final ValueReader<?> elementReader; private final List<Object> reusedList = Lists.newArrayList(); private ArrayReader(ValueReader<?> elementReader) { this.elementReader = elementReader; } @Override public GenericArrayData read(Decoder decoder, Object reuse) throws IOException { reusedList.clear(); long chunkLength = decoder.readArrayStart(); while (chunkLength > 0) { for (int i = 0; i < chunkLength; i += 1) { reusedList.add(elementReader.read(decoder, null)); } chunkLength = decoder.arrayNext(); } // this will convert the list to an array so it is okay to reuse the list return new GenericArrayData(reusedList.toArray()); } } private static class ArrayMapReader implements ValueReader<ArrayBasedMapData> { private final ValueReader<?> keyReader; private final ValueReader<?> valueReader; private final List<Object> reusedKeyList = Lists.newArrayList(); private final List<Object> reusedValueList = Lists.newArrayList(); private ArrayMapReader(ValueReader<?> keyReader, ValueReader<?> valueReader) { this.keyReader = keyReader; this.valueReader = valueReader; } @Override public ArrayBasedMapData read(Decoder decoder, Object reuse) throws IOException { reusedKeyList.clear(); reusedValueList.clear(); long chunkLength = decoder.readArrayStart(); while (chunkLength > 0) { for (int i = 0; i < chunkLength; i += 1) { reusedKeyList.add(keyReader.read(decoder, null)); reusedValueList.add(valueReader.read(decoder, null)); } chunkLength = decoder.arrayNext(); } return new ArrayBasedMapData( new GenericArrayData(reusedKeyList.toArray()), new GenericArrayData(reusedValueList.toArray())); } } private static class MapReader implements ValueReader<ArrayBasedMapData> { private final ValueReader<?> keyReader; private final ValueReader<?> valueReader; private final List<Object> reusedKeyList = Lists.newArrayList(); private final List<Object> reusedValueList = Lists.newArrayList(); private MapReader(ValueReader<?> keyReader, ValueReader<?> valueReader) { this.keyReader = keyReader; this.valueReader = valueReader; } @Override public ArrayBasedMapData read(Decoder decoder, Object reuse) throws IOException { reusedKeyList.clear(); reusedValueList.clear(); long chunkLength = decoder.readMapStart(); while (chunkLength > 0) { for (int i = 0; i < chunkLength; i += 1) { reusedKeyList.add(keyReader.read(decoder, null)); reusedValueList.add(valueReader.read(decoder, null)); } chunkLength = decoder.mapNext(); } return new ArrayBasedMapData( new GenericArrayData(reusedKeyList.toArray()), new GenericArrayData(reusedValueList.toArray())); } } static class StructReader implements ValueReader<InternalRow> { final ValueReader<?>[] readers; private StructReader(List<ValueReader<?>> readers) { this.readers = new ValueReader[readers.size()]; for (int i = 0; i < this.readers.length; i += 1) { this.readers[i] = readers.get(i); } } @Override public InternalRow read(Decoder decoder, Object reuse) throws IOException { GenericInternalRow row = new GenericInternalRow(readers.length); if (decoder instanceof ResolvingDecoder) { // this may not set all of the fields. nulls are set by default. for (Schema.Field field : ((ResolvingDecoder) decoder).readFieldOrder()) { Object value = readers[field.pos()].read(decoder, null); if (value != null) { row.update(field.pos(), value); } else { row.setNullAt(field.pos()); } } } else { for (int i = 0; i < readers.length; i += 1) { Object value = readers[i].read(decoder, null); if (value != null) { row.update(i, value); } else { row.setNullAt(i); } } } return row; } } }
2,252
0
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark
Create_ds/iceberg/spark/src/main/java/com/netflix/iceberg/spark/data/SparkParquetReaders.java
/* * Copyright 2017 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.iceberg.spark.data; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.netflix.iceberg.Schema; import com.netflix.iceberg.parquet.ParquetValueReader; import com.netflix.iceberg.parquet.ParquetValueReaders; import com.netflix.iceberg.parquet.ParquetValueReaders.FloatAsDoubleReader; import com.netflix.iceberg.parquet.ParquetValueReaders.IntAsLongReader; import com.netflix.iceberg.parquet.ParquetValueReaders.PrimitiveReader; import com.netflix.iceberg.parquet.ParquetValueReaders.RepeatedKeyValueReader; import com.netflix.iceberg.parquet.ParquetValueReaders.RepeatedReader; import com.netflix.iceberg.parquet.ParquetValueReaders.ReusableEntry; import com.netflix.iceberg.parquet.ParquetValueReaders.StructReader; import com.netflix.iceberg.parquet.ParquetValueReaders.UnboxedReader; import com.netflix.iceberg.parquet.TypeWithSchemaVisitor; import com.netflix.iceberg.types.Type.TypeID; import com.netflix.iceberg.types.Types; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.io.api.Binary; import org.apache.parquet.schema.DecimalMetadata; import org.apache.parquet.schema.GroupType; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.PrimitiveType; import org.apache.parquet.schema.Type; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.GenericInternalRow; import org.apache.spark.sql.catalyst.util.ArrayBasedMapData; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.GenericArrayData; import org.apache.spark.sql.catalyst.util.MapData; import org.apache.spark.sql.types.DataType; import org.apache.spark.sql.types.Decimal; import org.apache.spark.unsafe.types.CalendarInterval; import org.apache.spark.unsafe.types.UTF8String; import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; import static com.netflix.iceberg.parquet.ParquetSchemaUtil.hasIds; import static com.netflix.iceberg.parquet.ParquetValueReaders.option; public class SparkParquetReaders { private SparkParquetReaders() { } @SuppressWarnings("unchecked") public static ParquetValueReader<InternalRow> buildReader(Schema expectedSchema, MessageType fileSchema) { if (hasIds(fileSchema)) { return (ParquetValueReader<InternalRow>) TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema, new ReadBuilder(fileSchema)); } else { return (ParquetValueReader<InternalRow>) TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema, new FallbackReadBuilder(fileSchema)); } } private static class FallbackReadBuilder extends ReadBuilder { FallbackReadBuilder(MessageType type) { super(type); } @Override public ParquetValueReader<?> message(Types.StructType expected, MessageType message, List<ParquetValueReader<?>> fieldReaders) { // the top level matches by ID, but the remaining IDs are missing return super.struct(expected, message, fieldReaders); } @Override public ParquetValueReader<?> struct(Types.StructType ignored, GroupType struct, List<ParquetValueReader<?>> fieldReaders) { // the expected struct is ignored because nested fields are never found when the List<ParquetValueReader<?>> newFields = Lists.newArrayListWithExpectedSize( fieldReaders.size()); List<Type> types = Lists.newArrayListWithExpectedSize(fieldReaders.size()); List<Type> fields = struct.getFields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i); int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName()))-1; newFields.add(option(fieldType, fieldD, fieldReaders.get(i))); types.add(fieldType); } return new InternalRowReader(types, newFields); } } private static class ReadBuilder extends TypeWithSchemaVisitor<ParquetValueReader<?>> { protected final MessageType type; ReadBuilder(MessageType type) { this.type = type; } @Override public ParquetValueReader<?> message(Types.StructType expected, MessageType message, List<ParquetValueReader<?>> fieldReaders) { return struct(expected, message.asGroupType(), fieldReaders); } @Override public ParquetValueReader<?> struct(Types.StructType expected, GroupType struct, List<ParquetValueReader<?>> fieldReaders) { // match the expected struct's order Map<Integer, ParquetValueReader<?>> readersById = Maps.newHashMap(); Map<Integer, Type> typesById = Maps.newHashMap(); List<Type> fields = struct.getFields(); for (int i = 0; i < fields.size(); i += 1) { Type fieldType = fields.get(i); int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName()))-1; int id = fieldType.getId().intValue(); readersById.put(id, option(fieldType, fieldD, fieldReaders.get(i))); typesById.put(id, fieldType); } List<Types.NestedField> expectedFields = expected != null ? expected.fields() : ImmutableList.of(); List<ParquetValueReader<?>> reorderedFields = Lists.newArrayListWithExpectedSize( expectedFields.size()); List<Type> types = Lists.newArrayListWithExpectedSize(expectedFields.size()); for (Types.NestedField field : expectedFields) { int id = field.fieldId(); ParquetValueReader<?> reader = readersById.get(id); if (reader != null) { reorderedFields.add(reader); types.add(typesById.get(id)); } else { reorderedFields.add(ParquetValueReaders.nulls()); types.add(null); } } return new InternalRowReader(types, reorderedFields); } @Override public ParquetValueReader<?> list(Types.ListType expectedList, GroupType array, ParquetValueReader<?> elementReader) { GroupType repeated = array.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath)-1; int repeatedR = type.getMaxRepetitionLevel(repeatedPath)-1; Type elementType = repeated.getType(0); int elementD = type.getMaxDefinitionLevel(path(elementType.getName()))-1; return new ArrayReader<>(repeatedD, repeatedR, option(elementType, elementD, elementReader)); } @Override public ParquetValueReader<?> map(Types.MapType expectedMap, GroupType map, ParquetValueReader<?> keyReader, ParquetValueReader<?> valueReader) { GroupType repeatedKeyValue = map.getFields().get(0).asGroupType(); String[] repeatedPath = currentPath(); int repeatedD = type.getMaxDefinitionLevel(repeatedPath)-1; int repeatedR = type.getMaxRepetitionLevel(repeatedPath)-1; Type keyType = repeatedKeyValue.getType(0); int keyD = type.getMaxDefinitionLevel(path(keyType.getName()))-1; Type valueType = repeatedKeyValue.getType(1); int valueD = type.getMaxDefinitionLevel(path(valueType.getName()))-1; return new MapReader<>(repeatedD, repeatedR, option(keyType, keyD, keyReader), option(valueType, valueD, valueReader)); } @Override public ParquetValueReader<?> primitive(com.netflix.iceberg.types.Type.PrimitiveType expected, PrimitiveType primitive) { ColumnDescriptor desc = type.getColumnDescription(currentPath()); if (primitive.getOriginalType() != null) { switch (primitive.getOriginalType()) { case ENUM: case JSON: case UTF8: return new StringReader(desc); case INT_8: case INT_16: case INT_32: if (expected != null && expected.typeId() == Types.LongType.get().typeId()) { return new IntAsLongReader(desc); } else { return new UnboxedReader(desc); } case DATE: case INT_64: case TIMESTAMP_MICROS: return new UnboxedReader<>(desc); case TIMESTAMP_MILLIS: return new TimestampMillisReader(desc); case DECIMAL: DecimalMetadata decimal = primitive.getDecimalMetadata(); switch (primitive.getPrimitiveTypeName()) { case BINARY: case FIXED_LEN_BYTE_ARRAY: return new BinaryDecimalReader(desc, decimal.getScale()); case INT64: return new LongDecimalReader(desc, decimal.getPrecision(), decimal.getScale()); case INT32: return new IntegerDecimalReader(desc, decimal.getPrecision(), decimal.getScale()); default: throw new UnsupportedOperationException( "Unsupported base type for decimal: " + primitive.getPrimitiveTypeName()); } case BSON: return new BytesReader(desc); default: throw new UnsupportedOperationException( "Unsupported logical type: " + primitive.getOriginalType()); } } switch (primitive.getPrimitiveTypeName()) { case FIXED_LEN_BYTE_ARRAY: case BINARY: return new BytesReader(desc); case INT32: if (expected != null && expected.typeId() == TypeID.LONG) { return new IntAsLongReader(desc); } else { return new UnboxedReader<>(desc); } case FLOAT: if (expected != null && expected.typeId() == TypeID.DOUBLE) { return new FloatAsDoubleReader(desc); } else { return new UnboxedReader<>(desc); } case BOOLEAN: case INT64: case DOUBLE: return new UnboxedReader<>(desc); default: throw new UnsupportedOperationException("Unsupported type: " + primitive); } } private String[] currentPath() { String[] path = new String[fieldNames.size()]; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } protected String[] path(String name) { String[] path = new String[fieldNames.size() + 1]; path[fieldNames.size()] = name; if (!fieldNames.isEmpty()) { Iterator<String> iter = fieldNames.descendingIterator(); for (int i = 0; iter.hasNext(); i += 1) { path[i] = iter.next(); } } return path; } } private static class BinaryDecimalReader extends PrimitiveReader<Decimal> { private final int scale; BinaryDecimalReader(ColumnDescriptor desc, int scale) { super(desc); this.scale = scale; } @Override public Decimal read(Decimal ignored) { Binary binary = column.nextBinary(); return Decimal.fromDecimal(new BigDecimal(new BigInteger(binary.getBytes()), scale)); } } private static class IntegerDecimalReader extends PrimitiveReader<Decimal> { private final int precision; private final int scale; IntegerDecimalReader(ColumnDescriptor desc, int precision, int scale) { super(desc); this.precision = precision; this.scale = scale; } @Override public Decimal read(Decimal ignored) { return Decimal.apply(column.nextInteger(), precision, scale); } } private static class LongDecimalReader extends PrimitiveReader<Decimal> { private final int precision; private final int scale; LongDecimalReader(ColumnDescriptor desc, int precision, int scale) { super(desc); this.precision = precision; this.scale = scale; } @Override public Decimal read(Decimal ignored) { return Decimal.apply(column.nextLong(), precision, scale); } } private static class TimestampMillisReader extends UnboxedReader<Long> { TimestampMillisReader(ColumnDescriptor desc) { super(desc); } @Override public Long read(Long ignored) { return readLong(); } @Override public long readLong() { return 1000 * column.nextLong(); } } private static class StringReader extends PrimitiveReader<UTF8String> { StringReader(ColumnDescriptor desc) { super(desc); } @Override public UTF8String read(UTF8String ignored) { Binary binary = column.nextBinary(); ByteBuffer buffer = binary.toByteBuffer(); if (buffer.hasArray()) { return UTF8String.fromBytes( buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining()); } else { return UTF8String.fromBytes(binary.getBytes()); } } } private static class BytesReader extends PrimitiveReader<byte[]> { BytesReader(ColumnDescriptor desc) { super(desc); } @Override public byte[] read(byte[] ignored) { return column.nextBinary().getBytes(); } } private static class ArrayReader<E> extends RepeatedReader<ArrayData, ReusableArrayData, E> { private int readPos = 0; private int writePos = 0; ArrayReader(int definitionLevel, int repetitionLevel, ParquetValueReader<E> reader) { super(definitionLevel, repetitionLevel, reader); } @Override @SuppressWarnings("unchecked") protected ReusableArrayData newListData(ArrayData reuse) { this.readPos = 0; this.writePos = 0; if (reuse instanceof ReusableArrayData) { return (ReusableArrayData) reuse; } else { return new ReusableArrayData(); } } @Override @SuppressWarnings("unchecked") protected E getElement(ReusableArrayData list) { E value = null; if (readPos < list.capacity()) { value = (E) list.values[readPos]; } readPos += 1; return value; } @Override protected void addElement(ReusableArrayData reused, E element) { if (writePos >= reused.capacity()) { reused.grow(); } reused.values[writePos] = element; writePos += 1; } @Override protected ArrayData buildList(ReusableArrayData list) { list.setNumElements(writePos); return list; } } private static class MapReader<K, V> extends RepeatedKeyValueReader<MapData, ReusableMapData, K, V> { private int readPos = 0; private int writePos = 0; private final ReusableEntry<K, V> entry = new ReusableEntry<>(); private final ReusableEntry<K, V> nullEntry = new ReusableEntry<>(); MapReader(int definitionLevel, int repetitionLevel, ParquetValueReader<K> keyReader, ParquetValueReader<V> valueReader) { super(definitionLevel, repetitionLevel, keyReader, valueReader); } @Override @SuppressWarnings("unchecked") protected ReusableMapData newMapData(MapData reuse) { this.readPos = 0; this.writePos = 0; if (reuse instanceof ReusableMapData) { return (ReusableMapData) reuse; } else { return new ReusableMapData(); } } @Override @SuppressWarnings("unchecked") protected Map.Entry<K, V> getPair(ReusableMapData map) { Map.Entry<K, V> kv = nullEntry; if (readPos < map.capacity()) { entry.set((K) map.keys.values[readPos], (V) map.values.values[readPos]); kv = entry; } readPos += 1; return kv; } @Override protected void addPair(ReusableMapData map, K key, V value) { if (writePos >= map.capacity()) { map.grow(); } map.keys.values[writePos] = key; map.values.values[writePos] = value; writePos += 1; } @Override protected MapData buildMap(ReusableMapData map) { map.setNumElements(writePos); return map; } } private static class InternalRowReader extends StructReader<InternalRow, GenericInternalRow> { private final int numFields; InternalRowReader(List<Type> types, List<ParquetValueReader<?>> readers) { super(types, readers); this.numFields = readers.size(); } @Override protected GenericInternalRow newStructData(InternalRow reuse) { if (reuse instanceof GenericInternalRow) { return (GenericInternalRow) reuse; } else { return new GenericInternalRow(numFields); } } @Override protected Object getField(GenericInternalRow intermediate, int pos) { return intermediate.genericGet(pos); } @Override protected InternalRow buildStruct(GenericInternalRow struct) { return struct; } @Override protected void set(GenericInternalRow row, int pos, Object value) { row.update(pos, value); } @Override protected void setNull(GenericInternalRow row, int pos) { row.setNullAt(pos); } @Override protected void setBoolean(GenericInternalRow row, int pos, boolean value) { row.setBoolean(pos, value); } @Override protected void setInteger(GenericInternalRow row, int pos, int value) { row.setInt(pos, value); } @Override protected void setLong(GenericInternalRow row, int pos, long value) { row.setLong(pos, value); } @Override protected void setFloat(GenericInternalRow row, int pos, float value) { row.setFloat(pos, value); } @Override protected void setDouble(GenericInternalRow row, int pos, double value) { row.setDouble(pos, value); } } private static class ReusableMapData extends MapData { private final ReusableArrayData keys; private final ReusableArrayData values; private int numElements; private ReusableMapData() { this.keys = new ReusableArrayData(); this.values = new ReusableArrayData(); } private void grow() { keys.grow(); values.grow(); } private int capacity() { return keys.capacity(); } public void setNumElements(int numElements) { this.numElements = numElements; keys.setNumElements(numElements); values.setNumElements(numElements); } @Override public int numElements() { return numElements; } @Override public MapData copy() { return new ArrayBasedMapData(keyArray().copy(), valueArray().copy()); } @Override public ReusableArrayData keyArray() { return keys; } @Override public ReusableArrayData valueArray() { return values; } } private static class ReusableArrayData extends ArrayData { private static final Object[] EMPTY = new Object[0]; private Object[] values = EMPTY; private int numElements = 0; private void grow() { if (values.length == 0) { this.values = new Object[20]; } else { Object[] old = values; this.values = new Object[old.length << 2]; // copy the old array in case it has values that can be reused System.arraycopy(old, 0, values, 0, old.length); } } private int capacity() { return values.length; } public void setNumElements(int numElements) { this.numElements = numElements; } @Override public Object get(int ordinal, DataType dataType) { return values[ordinal]; } @Override public int numElements() { return numElements; } @Override public ArrayData copy() { return new GenericArrayData(array()); } @Override public Object[] array() { return Arrays.copyOfRange(values, 0, numElements); } // @Override public void setNullAt(int i) { values[i] = null; } // @Override public void update(int ordinal, Object value) { values[ordinal] = value; } @Override public boolean isNullAt(int ordinal) { return null == values[ordinal]; } @Override public boolean getBoolean(int ordinal) { return (boolean) values[ordinal]; } @Override public byte getByte(int ordinal) { return (byte) values[ordinal]; } @Override public short getShort(int ordinal) { return (short) values[ordinal]; } @Override public int getInt(int ordinal) { return (int) values[ordinal]; } @Override public long getLong(int ordinal) { return (long) values[ordinal]; } @Override public float getFloat(int ordinal) { return (float) values[ordinal]; } @Override public double getDouble(int ordinal) { return (double) values[ordinal]; } @Override public Decimal getDecimal(int ordinal, int precision, int scale) { return (Decimal) values[ordinal]; } @Override public UTF8String getUTF8String(int ordinal) { return (UTF8String) values[ordinal]; } @Override public byte[] getBinary(int ordinal) { return (byte[]) values[ordinal]; } @Override public CalendarInterval getInterval(int ordinal) { return (CalendarInterval) values[ordinal]; } @Override public InternalRow getStruct(int ordinal, int numFields) { return (InternalRow) values[ordinal]; } @Override public ArrayData getArray(int ordinal) { return (ArrayData) values[ordinal]; } @Override public MapData getMap(int ordinal) { return (MapData) values[ordinal]; } } }
2,253
0
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test/GeronimoConfigExtension.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.test; import org.jboss.arquillian.container.test.spi.client.deployment.ApplicationArchiveProcessor; import org.jboss.arquillian.core.spi.LoadableExtension; /** * @author <a href="mailto:struberg@yahoo.de">Mark Struberg</a> */ public class GeronimoConfigExtension implements LoadableExtension { @Override public void register(ExtensionBuilder extensionBuilder) { extensionBuilder.service(ApplicationArchiveProcessor.class, GeronimoConfigArchiveProcessor.class); } }
2,254
0
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test/GeronimoConfigArchiveProcessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.test; import org.apache.geronimo.config.ConfigImpl; import org.apache.geronimo.config.DefaultConfigProvider; import org.apache.geronimo.config.cdi.ConfigExtension; import org.apache.geronimo.config.configsource.BaseConfigSource; import org.apache.geronimo.config.converters.BooleanConverter; import org.eclipse.microprofile.config.spi.ConfigProviderResolver; import org.jboss.arquillian.container.test.spi.client.deployment.ApplicationArchiveProcessor; import org.jboss.arquillian.test.spi.TestClass; import org.jboss.shrinkwrap.api.Archive; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.asset.EmptyAsset; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.jboss.shrinkwrap.api.spec.WebArchive; /** * Adds the whole Config implementation classes and resources to the * Arqillian deployment archive. This is needed to have the container * pick up the beans from within the impl for the TCK tests. * * @author <a href="mailto:struberg@yahoo.de">Mark Struberg</a> */ public class GeronimoConfigArchiveProcessor implements ApplicationArchiveProcessor { @Override public void process(Archive<?> applicationArchive, TestClass testClass) { if (applicationArchive instanceof WebArchive) { JavaArchive configJar = ShrinkWrap .create(JavaArchive.class, "geronimo-config-impl.jar") .addPackage(ConfigImpl.class.getPackage()) .addPackage(BooleanConverter.class.getPackage()) .addPackage(BaseConfigSource.class.getPackage()) .addPackage(ConfigExtension.class.getPackage()) .addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml") .addAsServiceProvider(ConfigProviderResolver.class, DefaultConfigProvider.class); ((WebArchive) applicationArchive).addAsLibraries(configJar); } } }
2,255
0
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test/testng/SystemPropertiesLeakProtector.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.test.testng; import org.testng.ITestContext; import org.testng.TestListenerAdapter; import java.util.Properties; public class SystemPropertiesLeakProtector extends TestListenerAdapter { private Properties props; @Override public void onStart(final ITestContext context) { props = new Properties(); props.putAll(System.getProperties()); props.put("org.apache.geronimo.config.configsource.SystemPropertyConfigSource.copy", "false"); } @Override public void onFinish(final ITestContext testContext) { System.getProperties().clear(); // keep the same ref System.getProperties().putAll(props); } }
2,256
0
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test/internal/SupplierTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.test.internal; import org.apache.geronimo.config.test.testng.SystemPropertiesLeakProtector; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.testng.Arquillian; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.asset.EmptyAsset; import org.jboss.shrinkwrap.api.asset.StringAsset; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.jboss.shrinkwrap.api.spec.WebArchive; import org.testng.Assert; import org.testng.annotations.Test; import javax.enterprise.context.RequestScoped; import javax.inject.Inject; import java.util.function.Supplier; public class SupplierTest extends Arquillian{ private static final String SOME_KEY = "org.apache.geronimo.config.test.internal.somekey"; private static final String SUPPLIER_DEFAULT_VALUE = "supplierDefaultValue"; private static final String SOME_INT_KEY = "some.supplier.int.key"; @Deployment public static WebArchive deploy() { JavaArchive testJar = ShrinkWrap .create(JavaArchive.class, "configSupplierTest.jar") .addClasses(SupplierTest.class, SomeBean.class) .addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml") .addAsManifestResource( new StringAsset(SOME_KEY + "=someval\n"), "microprofile-config.properties") .as(JavaArchive.class); return ShrinkWrap .create(WebArchive.class, "supplierTest.war") .addAsLibrary(testJar); } private @Inject SomeBean someBean; @Test public void testConfigProvider() { final SystemPropertiesLeakProtector fixer = new SystemPropertiesLeakProtector(); // lazy way to reset all the system props manipulated by this test fixer.onStart(null); String someval = "someval"; System.setProperty(SOME_KEY, someval); String myconfig = someBean.getMyconfig(); Assert.assertEquals(myconfig, someval); String otherval = "otherval"; System.setProperty(SOME_KEY, otherval); myconfig = someBean.getMyconfig(); Assert.assertEquals(myconfig, otherval); Assert.assertEquals(someBean.getAnotherconfig().get(), SUPPLIER_DEFAULT_VALUE); System.setProperty(SOME_INT_KEY, "42"); Assert.assertEquals(someBean.getSomeInt(), 42); Assert.assertNull(someBean.getUndefinedValue().get()); fixer.onFinish(null); } @RequestScoped public static class SomeBean { @Inject @ConfigProperty(name=SOME_KEY) private Supplier<String> myconfig; @Inject @ConfigProperty(name = SOME_INT_KEY) private Supplier<Integer> someIntValue; @Inject @ConfigProperty(name="missing.key", defaultValue = SUPPLIER_DEFAULT_VALUE) private Supplier<String> anotherconfig; @Inject @ConfigProperty(name = "UNDEFINED_VALUE") private Supplier<Integer> undefinedValue; public int getSomeInt() { return someIntValue.get(); } public String getMyconfig() { return myconfig.get(); } public Supplier<String> getAnotherconfig() { return anotherconfig; } public Supplier<Integer> getUndefinedValue() { return undefinedValue; } } }
2,257
0
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test/internal/DefaultNullValueTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.test.internal; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.testng.Arquillian; import org.jboss.shrinkwrap.api.Archive; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.asset.EmptyAsset; import org.jboss.shrinkwrap.api.spec.WebArchive; import org.testng.annotations.Test; import javax.enterprise.context.ApplicationScoped; import javax.inject.Inject; import java.net.URL; import java.time.Duration; import java.util.List; import java.util.Optional; import static org.testng.Assert.assertNull; /** * @author <a href="mailto:danielsoro@apache.org">Daniel 'soro' Cunha</a> */ public class DefaultNullValueTest extends Arquillian { @Deployment public static Archive<?> archive() { return ShrinkWrap.create(WebArchive.class, DefaultNullValueTest.class.getSimpleName() + ".war") .addAsWebInfResource(EmptyAsset.INSTANCE, "classes/META-INF/beans.xml") .addClasses(DefaultNullValueTest.class, DefaultNullValueTest.Injected.class); } @Inject private Injected injected; @Test public void testDefaultNullValue() { assertNull(injected.getBooleanNullValue()); assertNull(injected.getStringNullValue()); assertNull(injected.getByteNullValue()); assertNull(injected.getIntegerNullValue()); assertNull(injected.getLongNullValue()); assertNull(injected.getShortNullValue()); assertNull(injected.getListNullValue()); assertNull(injected.getClassNullValue()); assertNull(injected.getDoubleNullValue()); assertNull(injected.getDurationNullValue()); } @ApplicationScoped public static class Injected { @Inject @ConfigProperty(name = "boolean.nullvalue.default") private Optional<Boolean> booleanNullValue; @Inject @ConfigProperty(name = "string.nullvalue.default") private Optional<String> stringNullValue; @Inject @ConfigProperty(name = "long.nullvalue.default") private Optional<Long> longNullValue; @Inject @ConfigProperty(name = "integer.nullvalue.default") private Optional<Integer> integerNullValue; @Inject @ConfigProperty(name = "float.nullvalue.default") private Optional<Float> floatNullValue; @Inject @ConfigProperty(name = "double.nullvalue.default") private Optional<Double> doubleNullValue; @Inject @ConfigProperty(name = "character.nullvalue.default") private Optional<Character> characterNullValue; @Inject @ConfigProperty(name = "short.nullvalue.default") private Optional<Short> shortNullValue; @Inject @ConfigProperty(name = "byte.nullvalue.default") private Optional<Byte> byteNullValue; @Inject @ConfigProperty(name = "list.nullvalue.default") private Optional<List<String>> listNullValue; @Inject @ConfigProperty(name = "class.nullvalue.default") private Optional<Class> classNullValue; @Inject @ConfigProperty(name = "url.nullvalue.default") private Optional<URL> urlNullValue; @Inject @ConfigProperty(name = "duration.nullvalue.default") private Optional<Duration> durationNullValue; public Boolean getBooleanNullValue() { return booleanNullValue.orElse(null); } public String getStringNullValue() { return stringNullValue.orElse(null); } public Long getLongNullValue() { return longNullValue.orElse(null); } public Integer getIntegerNullValue() { return integerNullValue.orElse(null); } public Float getFloatNullValue() { return floatNullValue.orElse(null); } public Double getDoubleNullValue() { return doubleNullValue.orElse(null); } public Character getCharacterNullValue() { return characterNullValue.orElse(null); } public Short getShortNullValue() { return shortNullValue.orElse(null); } public Byte getByteNullValue() { return byteNullValue.orElse(null); } public List<String> getListNullValue() { return listNullValue.orElse(null); } public Class getClassNullValue() { return classNullValue.orElse(null); } public URL getUrlNullValue() { return urlNullValue.orElse(null); } public Duration getDurationNullValue() { return durationNullValue.orElse(null); } } }
2,258
0
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test/internal/ProxyTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.test.internal; import static java.util.Arrays.asList; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import java.net.URL; import java.time.Duration; import java.util.Collection; import java.util.List; import javax.inject.Inject; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.testng.Arquillian; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.asset.EmptyAsset; import org.jboss.shrinkwrap.api.asset.StringAsset; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.jboss.shrinkwrap.api.spec.WebArchive; import org.testng.annotations.Test; public class ProxyTest extends Arquillian { private static final String LIST_KEY = SomeProxy.class.getName() + ".list"; private static final String SOME_KEY = SomeProxy.class.getName() + ".key"; private static final String SOME_OTHER_KEY = SomeProxy.class.getName() + ".key2"; @Deployment public static WebArchive deploy() { JavaArchive testJar = ShrinkWrap .create(JavaArchive.class, "PoxyTest.jar") .addClasses(ProxyTest.class, SomeProxy.class, PrefixedSomeProxy.class) .addAsManifestResource( new StringAsset("" + "interpolated=a,${my_int_property},${MY_STRING_PROPERTY},${my.string.property}\n" + "list.interpolated=a,${my_int_property},${MY_STRING_PROPERTY},${my.string.property}\n" + "my.string.property=haha\n" + "prefix.val=yes\n" + LIST_KEY + "=a,b,1\n" + SOME_KEY + "=yeah\n" + SOME_OTHER_KEY + "=123\n" ), "microprofile-config.properties") .addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"); return ShrinkWrap .create(WebArchive.class, "providerTest.war") .addAsLibrary(testJar); } @Inject private PrefixedSomeProxy prefixed; @Inject private SomeProxy proxy; @Test public void test() { assertEquals(proxy.key(), "yeah"); assertEquals(proxy.renamed(), "yeah"); assertEquals(proxy.key2(), 123); assertEquals(proxy.key3(), "def"); assertEquals(proxy.list(), asList("a", "b", "1")); assertEquals(proxy.listDefaults(), asList(1, 2, 1)); assertEquals(proxy.listClasses(), asList(String.class, Integer.class)); assertNull(proxy.booleanNullValue()); assertNull(proxy.stringNullValue()); assertNull(proxy.byteNullValue()); assertNull(proxy.integerNullValue()); assertNull(proxy.longNullValue()); assertNull(proxy.shortNullValue()); assertNull(proxy.listNullValue()); assertNull(proxy.classNullValue()); assertNull(proxy.doubleNullValue()); assertNull(proxy.durationNullValue()); assertFalse(proxy.primitiveBooleanNullValue()); assertEquals(0, proxy.primitiveLongNullValue()); assertEquals(0, proxy.primitiveIntegerNullValue()); assertEquals(0, proxy.primitiveShortNullValue()); assertEquals(0, proxy.primitiveByteNullValue()); assertEquals(0.0F, proxy.primitiveFloatNullValue()); assertEquals(0.0D, proxy.primitiveDoubleNullValue()); assertEquals('\u0000', proxy.primitiveCharacterNullValue()); assertEquals(proxy.interpolated(), "a,45,woohoo,haha"); assertEquals(proxy.listInterpolatedValue(), asList("a", "45", "woohoo", "haha")); } @Test public void prefix() { assertEquals(prefixed.val(), "yes"); } @ConfigProperty(name = "prefix.") public interface PrefixedSomeProxy { @ConfigProperty(name = "val") String val(); } public interface SomeProxy { @ConfigProperty int key2(); @ConfigProperty(defaultValue = "def") String key3(); @ConfigProperty String key(); @ConfigProperty(name = "org.apache.geronimo.config.test.internal.ProxyTest$SomeProxy.key") String renamed(); @ConfigProperty Collection<String> list(); @ConfigProperty(defaultValue = "java.lang.String,java.lang.Integer") Collection<Class<?>> listClasses(); @ConfigProperty(defaultValue = "1,2,1") Collection<Integer> listDefaults(); @ConfigProperty(name = "boolean.nullvalue.default") Boolean booleanNullValue(); @ConfigProperty(name = "boolean.nullvalue.default") boolean primitiveBooleanNullValue(); @ConfigProperty(name = "string.nullvalue.default") String stringNullValue(); @ConfigProperty(name = "long.nullvalue.default") Long longNullValue(); @ConfigProperty(name = "long.nullvalue.default") long primitiveLongNullValue(); @ConfigProperty(name = "integer.nullvalue.default") Integer integerNullValue(); @ConfigProperty(name = "integer.nullvalue.default") int primitiveIntegerNullValue(); @ConfigProperty(name = "float.nullvalue.default") Float floatNullValue(); @ConfigProperty(name = "float.nullvalue.default") float primitiveFloatNullValue(); @ConfigProperty(name = "double.nullvalue.default") Double doubleNullValue(); @ConfigProperty(name = "double.nullvalue.default") double primitiveDoubleNullValue(); @ConfigProperty(name = "character.nullvalue.default") Character characterNullValue(); @ConfigProperty(name = "character.nullvalue.default") char primitiveCharacterNullValue(); @ConfigProperty(name = "short.nullvalue.default") Short shortNullValue(); @ConfigProperty(name = "short.nullvalue.default") short primitiveShortNullValue(); @ConfigProperty(name = "byte.nullvalue.default") Byte byteNullValue(); @ConfigProperty(name = "byte.nullvalue.default") byte primitiveByteNullValue(); @ConfigProperty(name = "list.nullvalue.default") List<String> listNullValue(); @ConfigProperty(name = "list.interpolated") List<String> listInterpolatedValue(); @ConfigProperty(name = "interpolated") String interpolated(); @ConfigProperty(name = "class.nullvalue.default") Class classNullValue(); @ConfigProperty(name = "url.nullvalue.default") URL urlNullValue(); @ConfigProperty(name = "duration.nullvalue.default") Duration durationNullValue(); } }
2,259
0
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test/internal/SystemEnvConfigSourceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.test.internal; import org.eclipse.microprofile.config.Config; import org.eclipse.microprofile.config.ConfigProvider; import org.testng.Assert; import org.testng.annotations.Test; /** * This needs to have some ENV settings set up. * This is usually done via maven. * For running the test yourself you have to set the following environment properties: * * A_b_c=1 * A_B_C=2 * A_B_D=3 * A_B_e=4 */ public class SystemEnvConfigSourceTest { @Test public void testEnvReplacement() { Config config = ConfigProvider.getConfig(); Assert.assertEquals(config.getValue("A.b#c", Integer.class), Integer.valueOf(1)); Assert.assertEquals(config.getValue("a.b.c", Integer.class), Integer.valueOf(2)); Assert.assertEquals(config.getValue("a.b.d", Integer.class), Integer.valueOf(3)); Assert.assertEquals(config.getValue("a.b.e", Integer.class), Integer.valueOf(4)); } }
2,260
0
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test/internal/SystemPropertyConfigSourceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.test.internal; import static org.testng.Assert.assertTrue; import javax.enterprise.context.ApplicationScoped; import javax.enterprise.event.Observes; import javax.enterprise.inject.spi.AfterBeanDiscovery; import javax.enterprise.inject.spi.BeforeBeanDiscovery; import javax.enterprise.inject.spi.BeforeShutdown; import javax.enterprise.inject.spi.Extension; import javax.inject.Inject; import org.eclipse.microprofile.config.Config; import org.eclipse.microprofile.config.ConfigProvider; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.testng.Arquillian; import org.jboss.shrinkwrap.api.Archive; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.asset.EmptyAsset; import org.jboss.shrinkwrap.api.spec.WebArchive; import org.testng.Assert; import org.testng.annotations.Test; /** * It is important to ensure the config is reloaded before going live in case some * system properties are set during the starting extension lifecycle. */ public class SystemPropertyConfigSourceTest extends Arquillian { @Deployment public static Archive<?> archive() { return ShrinkWrap.create(WebArchive.class, SystemPropertyConfigSourceTest.class.getSimpleName() + ".war") .addAsWebInfResource(EmptyAsset.INSTANCE, "classes/META-INF/beans.xml") .addAsServiceProvider(Extension.class, InitInExtension.class) .addClasses(SystemPropertyConfigSourceTest.class, Injected.class); } @Inject private Injected injected; @Test public void testSystemPropsLoadedExtensionValue() { assertTrue(injected.getSet()); } @ApplicationScoped public static class Injected { @Inject @ConfigProperty(name = "org.apache.geronimo.config.test.internal.SystemPropertyConfigSourceTest$InitInExtension") private Boolean set; public Boolean getSet() { return set; } } public static class InitInExtension implements Extension { private String originalCopy; void eagerInit(@Observes final BeforeBeanDiscovery beforeBeanDiscovery) { originalCopy = System.getProperty("org.apache.geronimo.config.configsource.SystemPropertyConfigSource.copy"); // enfore the default, it is overriden for surefire System.setProperty("org.apache.geronimo.config.configsource.SystemPropertyConfigSource.copy", "true"); // eager load -> loads system props and copy ConfigProvider.getConfig(); } // before validation to ensure config validation passes void afterBeanDiscovery(@Observes final AfterBeanDiscovery afterBeanDiscovery) { // with copy this should get ignored but we will reload it before the validation System.setProperty(InitInExtension.class.getName(), "true"); } void beforeShutdown(@Observes final BeforeShutdown beforeShutdown) { System.clearProperty(InitInExtension.class.getName()); if (originalCopy != null) { System.setProperty("org.apache.geronimo.config.configsource.SystemPropertyConfigSource.copy", originalCopy); } else { System.clearProperty("org.apache.geronimo.config.configsource.SystemPropertyConfigSource.copy"); } } } }
2,261
0
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test/internal/PropertyFileConfigSourceTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.test.internal; import org.apache.geronimo.config.configsource.PropertyFileConfigSource; import org.testng.annotations.Test; import java.net.URL; import java.nio.file.Paths; import static org.testng.AssertJUnit.assertTrue; public class PropertyFileConfigSourceTest { @Test public void testLoadMissingFile() throws Exception{ URL url = Paths.get("some/missing/File.txt").toUri().toURL(); PropertyFileConfigSource propertyFileConfigSource = new PropertyFileConfigSource(url); assertTrue(propertyFileConfigSource.getProperties().isEmpty()); } }
2,262
0
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test/internal/ProviderTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.test.internal; import java.util.Optional; import javax.enterprise.context.RequestScoped; import javax.inject.Inject; import javax.inject.Provider; import org.apache.geronimo.config.test.testng.SystemPropertiesLeakProtector; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.testng.Arquillian; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.asset.EmptyAsset; import org.jboss.shrinkwrap.api.asset.StringAsset; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.jboss.shrinkwrap.api.spec.WebArchive; import org.testng.Assert; import org.testng.annotations.Test; public class ProviderTest extends Arquillian { private static final String SOME_KEY = "org.apache.geronimo.config.test.internal.somekey"; @Deployment public static WebArchive deploy() { JavaArchive testJar = ShrinkWrap .create(JavaArchive.class, "configProviderTest.jar") .addClasses(ProviderTest.class, SomeBean.class) .addAsManifestResource( new StringAsset(SOME_KEY + "=someval\n"), "microprofile-config.properties") .addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"); return ShrinkWrap .create(WebArchive.class, "providerTest.war") .addAsLibrary(testJar); } private @Inject SomeBean someBean; @Test public void testConfigProvider() { final SystemPropertiesLeakProtector fixer = new SystemPropertiesLeakProtector(); // lazy way to reset all the system props manipulated by this test fixer.onStart(null); System.setProperty(SOME_KEY, "someval"); String myconfig = someBean.getMyconfig(); Assert.assertEquals(myconfig, "someval"); Assert.assertEquals(someBean.getOptionalProvider().get().get(), "someval"); Assert.assertEquals(someBean.getProviderOptional().get().get(), "someval"); System.setProperty(SOME_KEY, "otherval"); myconfig = someBean.getMyconfig(); Assert.assertEquals(myconfig, "otherval"); fixer.onFinish(null); } @RequestScoped public static class SomeBean { @Inject @ConfigProperty(name=SOME_KEY) private Provider<String> myconfig; @Inject @ConfigProperty(name=SOME_KEY) private Optional<Provider<String>> optionalProvider; @Inject @ConfigProperty(name=SOME_KEY) private Provider<Optional<String>> providerOptional; public Optional<Provider<String>> getOptionalProvider() { return optionalProvider; } public Provider<Optional<String>> getProviderOptional() { return providerOptional; } public String getMyconfig() { return myconfig.get(); } } }
2,263
0
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test/internal/ConfigInjectionTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.test.internal; import javax.inject.Inject; import org.eclipse.microprofile.config.Config; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.testng.Arquillian; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.asset.EmptyAsset; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.jboss.shrinkwrap.api.spec.WebArchive; import org.testng.Assert; import org.testng.annotations.Test; public class ConfigInjectionTest extends Arquillian { @Deployment public static WebArchive deploy() { JavaArchive testJar = ShrinkWrap .create(JavaArchive.class, "configProviderTest.jar") .addClasses(ConfigInjectionTest.class) .addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"); return ShrinkWrap .create(WebArchive.class, "providerTest.war") .addAsLibrary(testJar); } @Inject private Config config; @Test public void testConfigProvider() { Assert.assertNotNull(config); // is injected Assert.assertNotNull(config.getValue("java.version", String.class)); // is usable } }
2,264
0
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test
Create_ds/geronimo-config/impl/src/test/java/org/apache/geronimo/config/test/internal/ArrayTypeTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.test.internal; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.jboss.arquillian.container.test.api.Deployment; import org.jboss.arquillian.testng.Arquillian; import org.jboss.shrinkwrap.api.ShrinkWrap; import org.jboss.shrinkwrap.api.asset.EmptyAsset; import org.jboss.shrinkwrap.api.asset.StringAsset; import org.jboss.shrinkwrap.api.spec.JavaArchive; import org.jboss.shrinkwrap.api.spec.WebArchive; import org.testng.Assert; import org.testng.annotations.Test; import javax.enterprise.context.RequestScoped; import javax.inject.Inject; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; import static java.util.Arrays.asList; public class ArrayTypeTest extends Arquillian { private static final String SOME_KEY = "org.apache.geronimo.config.test.internal.somekey"; private static final String SOME_OTHER_KEY = "org.apache.geronimo.config.test.internal.someotherkey"; @Deployment public static WebArchive deploy() { JavaArchive testJar = ShrinkWrap .create(JavaArchive.class, "arrayTest.jar") .addClasses(ArrayTypeTest.class, SomeBean.class) .addAsManifestResource(new StringAsset( SOME_KEY + "=1,2,3\n" + SOME_OTHER_KEY + "=1,2\\\\,3\n" + "placeholder=4,5,6\n" ), "microprofile-config.properties") .addAsManifestResource(EmptyAsset.INSTANCE, "beans.xml"); return ShrinkWrap .create(WebArchive.class, "arrayTest.war") .addAsLibrary(testJar); } @Inject private SomeBean someBean; @Test public void testArraySetListInjection() { Assert.assertEquals(someBean.getStringValue(), "1,2,3"); Assert.assertEquals(someBean.getMyconfig(), new int[]{1, 2, 3}); Assert.assertEquals(someBean.getIntValues(), asList(1, 2, 3)); Assert.assertEquals(someBean.getIntSet(), new LinkedHashSet<>(asList(1, 2, 3))); Assert.assertEquals(someBean.getIntSetDefault(), new LinkedHashSet<>(asList(1, 2, 3))); Assert.assertEquals(someBean.getIntSetPlaceholderDefault(), new LinkedHashSet<>(asList(4, 5, 6))); } @Test public void testListWithEscaping() { Assert.assertEquals(someBean.getValues(), asList("1", "2,3")); } @RequestScoped public static class SomeBean { @Inject @ConfigProperty(name = SOME_KEY) private int[] myconfig; @Inject @ConfigProperty(name = SOME_KEY) private List<Integer> intValues; @Inject @ConfigProperty(name = SOME_KEY) private Set<Integer> intSet; @Inject @ConfigProperty(name = SOME_KEY + ".missing", defaultValue = "1,2,3") private Set<Integer> intSetDefault; @Inject @ConfigProperty(name = SOME_KEY + ".missing", defaultValue = "${placeholder}") private Set<Integer> intSetPlaceholderDefault; @Inject @ConfigProperty(name = SOME_KEY) private String stringValue; @Inject @ConfigProperty(name = SOME_OTHER_KEY) private List<String> values; public Set<Integer> getIntSetPlaceholderDefault() { return intSetPlaceholderDefault; } public Set<Integer> getIntSetDefault() { return intSetDefault; } public String getStringValue() { return stringValue; } public int[] getMyconfig() { return myconfig; } public List<Integer> getIntValues() { return intValues; } public Set<Integer> getIntSet() { return intSet; } public List<String> getValues() { return values; } } }
2,265
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/ConfigValueImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config; import org.eclipse.microprofile.config.spi.Converter; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.NoSuchElementException; import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; import javax.enterprise.inject.Typed; /** * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ @Typed public class ConfigValueImpl<T> { private static final Logger logger = Logger.getLogger(ConfigValueImpl.class.getName()); private final ConfigImpl config; private String keyOriginal; private String keyResolved; private Class<?> configEntryType = String.class; private String[] lookupChain; private boolean evaluateVariables = false; private long cacheTimeNs = -1; private volatile long reloadAfter = -1; private long lastReloadedAt = -1; private T lastValue = null; //X will later get added again private ConfigChanged valueChangeListener; private boolean isList; private boolean isSet; private T defaultValue; private boolean withDefault; /** * Alternative Converter to be used instead of the default converter */ private Converter<T> converter; public ConfigValueImpl(ConfigImpl config, String key) { this.config = config; this.keyOriginal = key; } //X @Override public <N> ConfigValueImpl<N> as(Class<N> clazz) { configEntryType = clazz; return (ConfigValueImpl<N>) this; } //X @Override public ConfigValueImpl<List<T>> asList() { isList = true; ConfigValueImpl<List<T>> listTypedResolver = (ConfigValueImpl<List<T>>) this; if (defaultValue == null) { // the default for lists is an empty list instead of null return listTypedResolver.withDefault(Collections.<T>emptyList()); } return listTypedResolver; } //X @Override public ConfigValueImpl<Set<T>> asSet() { isSet = true; ConfigValueImpl<Set<T>> listTypedResolver = (ConfigValueImpl<Set<T>>) this; if (defaultValue == null) { // the default for lists is an empty list instead of null return listTypedResolver.withDefault(Collections.<T>emptySet()); } return listTypedResolver; } //X @Override public ConfigValueImpl<T> withDefault(T value) { defaultValue = value; withDefault = true; return this; } //X @Override public ConfigValueImpl<T> withStringDefault(String value) { if (value == null || value.isEmpty()) { throw new RuntimeException("Empty String or null supplied as string-default value for property " + keyOriginal); } value = replaceVariables(value); if (isList) { defaultValue = splitAndConvertListValue(value); } else { defaultValue = convert(value); } withDefault = true; return this; } //X @Override public T getDefaultValue() { return defaultValue; } //X @Override public ConfigValueImpl<T> useConverter(Converter<T> converter) { this.converter = converter; return this; } //X @Override public ConfigValueImpl<T> cacheFor(long value, TimeUnit timeUnit) { this.cacheTimeNs = timeUnit.toNanos(value); return this; } //X @Override public ConfigValueImpl<T> evaluateVariables(boolean evaluateVariables) { this.evaluateVariables = evaluateVariables; return this; } public ConfigValueImpl<T> withLookupChain(String... postfixNames) { this.lookupChain = postfixNames; return this; } //X @Override public Optional<T> getOptionalValue() { return Optional.ofNullable(get()); } //X will later get added again @Override /*X public ConfigValueImpl<T> onChange(ConfigChanged valueChangeListener) { this.valueChangeListener = valueChangeListener; return this; } */ //X @Override public List<T> getValueList() { String rawList = (String) get(false); List<T> values = new ArrayList<T>(); StringBuilder sb = new StringBuilder(64); for (int i= 0; i < rawList.length(); i++) { char c = rawList.charAt(i); if ('\\' == c) { if (i == rawList.length()) { throw new IllegalStateException("incorrect escaping of key " + keyOriginal + " value: " + rawList); } char nextChar = rawList.charAt(i+1); if (nextChar == '\\') { sb.append('\\'); } else if (nextChar == ',') { sb.append(','); } i++; } else if (',' == c) { addListValue(values, sb); } else { sb.append(c); } } addListValue(values, sb); return values; } private void addListValue(List<T> values, StringBuilder sb) { String val = sb.toString().trim(); if (!val.isEmpty()) { values.add(convert(val)); } sb.setLength(0); } public T get() { return get(true); } //X @Override public T getValue() { T val = get(); if (val == null) { throw new NoSuchElementException("No config value present for key " + keyOriginal); } return val; } //X @Override /*X will come with the next version public T getValue(ConfigSnapshot configSnapshot) { ConfigSnapshotImpl snapshotImpl = (ConfigSnapshotImpl) configSnapshot; if (!snapshotImpl.getConfigValues().containsKey(this)) { throw new IllegalArgumentException("The TypedResolver for key " + getPropertyName() + " does not belong the given ConfigSnapshot!"); } return (T) snapshotImpl.getConfigValues().get(this); } */ private T get(boolean convert) { long now = -1; if (cacheTimeNs > 0) { now = System.nanoTime(); if (now <= reloadAfter) { // now check if anything in the underlying Config got changed long lastCfgChange = config.getLastChanged(); if (lastCfgChange < lastReloadedAt) { return lastValue; } } } String valueStr = resolveStringValue(); if ((valueStr == null || valueStr.isEmpty()) && withDefault) { return defaultValue; } T value; if (isList || isSet) { value = splitAndConvertListValue(valueStr); if (isSet) { value = (T) new HashSet((List) value); } } else { value = convert ? convert(valueStr) : (T) valueStr; } //X will later get added again /*X if (valueChangeListener != null && (value != null && !value.equals(lastValue) || (value == null && lastValue != null)) ) { valueChangeListener.onValueChange(keyOriginal, lastValue, value); } */ lastValue = value; if (cacheTimeNs > 0) { reloadAfter = now + cacheTimeNs; lastReloadedAt = now; } return value; } private String resolveStringValue() { String value = null; if (lookupChain != null) { // first we resolve the value List<String> postfixVals = new ArrayList<>(); for (String postfix : lookupChain) { if (postfix.startsWith("${") && postfix.length() > 3) { String varName = postfix.substring(2, postfix.length()-1); String varValue = config.getValue(varName); if (varValue != null && varValue.length() > 0) { postfixVals.add(varValue); } } else { postfixVals.add(postfix); } } // binary count down for (int mask = (1 << postfixVals.size()) - 1; mask > 0; mask--) { StringBuilder sb = new StringBuilder(keyOriginal); for (int loc = 0; loc < postfixVals.size(); loc++) { int bitPos = 1 << (postfixVals.size() - loc - 1); if ((mask & bitPos) > 0) { sb.append('.').append(postfixVals.get(loc)); } } value = config.getValue(sb.toString()); if (value != null && value.length() > 0) { keyResolved = sb.toString(); break; } } } if (value == null) { value = config.getValue(keyOriginal); this.keyResolved = keyOriginal; } if (evaluateVariables && value != null) { value = replaceVariables(value); } return value; } private String replaceVariables(String value) { // recursively resolve any ${varName} in the value int startVar = 0; while ((startVar = value.indexOf("${", startVar)) >= 0) { int endVar = value.indexOf("}", startVar); if (endVar <= 0) { break; } String varName = value.substring(startVar + 2, endVar); if (varName.isEmpty()) { break; } String variableValue = config.access(varName).evaluateVariables(true).get(); if (variableValue != null) { value = value.replace("${" + varName + "}", variableValue); } startVar++; } return value; } //X @Override public String getPropertyName() { return keyOriginal; } //X @Override public String getResolvedPropertyName() { return keyResolved; } private T convert(String value) { if (converter != null) { return converter.convert(value); } if (String.class == configEntryType) { return (T) value; } return (T) config.convert(value, configEntryType); } private T splitAndConvertListValue(String valueStr) { if (valueStr == null) { return null; } List list = new ArrayList(); StringBuilder currentValue = new StringBuilder(); int length = valueStr.length(); for (int i = 0; i < length; i++) { char c = valueStr.charAt(i); if (c == '\\') { if (i < length - 1) { char nextC = valueStr.charAt(i + 1); currentValue.append(nextC); i++; } } else if (c == ',') { String trimedVal = currentValue.toString().trim(); if (trimedVal.length() > 0) { list.add(convert(trimedVal)); } currentValue.setLength(0); } else { currentValue.append(c); } } String trimedVal = currentValue.toString().trim(); if (trimedVal.length() > 0) { list.add(convert(trimedVal)); } return (T) list; } }
2,266
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/DefaultConfigProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config; import java.lang.ref.WeakReference; import java.util.Collections; import java.util.Iterator; import java.util.Map; import java.util.WeakHashMap; import javax.enterprise.inject.Typed; import javax.enterprise.inject.Vetoed; import org.eclipse.microprofile.config.Config; import org.eclipse.microprofile.config.spi.ConfigBuilder; import org.eclipse.microprofile.config.spi.ConfigProviderResolver; /** * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ @Typed @Vetoed public class DefaultConfigProvider extends ConfigProviderResolver { private static Map<ClassLoader, WeakReference<Config>> configs = Collections.synchronizedMap(new WeakHashMap<ClassLoader, WeakReference<Config>>()); @Override public Config getConfig() { return getConfig(Thread.currentThread().getContextClassLoader()); } @Override public Config getConfig(ClassLoader forClassLoader) { Config config = existingConfig(forClassLoader); if (config == null) { synchronized (DefaultConfigProvider.class) { config = existingConfig(forClassLoader); if (config == null) { config = getBuilder().forClassLoader(forClassLoader) .addDefaultSources() .addDiscoveredSources() .addDiscoveredConverters() .build(); registerConfig(config, forClassLoader); } } } return config; } Config existingConfig(ClassLoader forClassLoader) { WeakReference<Config> configRef = configs.get(forClassLoader); return configRef != null ? configRef.get() : null; } @Override public void registerConfig(Config config, ClassLoader forClassLoader) { synchronized (DefaultConfigProvider.class) { configs.put(forClassLoader, new WeakReference<>(config)); } } @Override public ConfigBuilder getBuilder() { return new DefaultConfigBuilder(); } @Override public void releaseConfig(Config config) { if (config == null) { // get the config from the current TCCL ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); if (classLoader == null) { classLoader = DefaultConfigProvider.class.getClassLoader(); } config = existingConfig(classLoader); } if (config != null) { synchronized (DefaultConfigProvider.class) { Iterator<Map.Entry<ClassLoader, WeakReference<Config>>> it = configs.entrySet().iterator(); while (it.hasNext()) { Map.Entry<ClassLoader, WeakReference<Config>> entry = it.next(); if (entry.getValue().get() != null && entry.getValue().get() == config) { it.remove(); break; } } if (config instanceof AutoCloseable) { try { ((AutoCloseable) config).close(); } catch (Exception e) { throw new RuntimeException("Error while closing Config", e); } } } } } }
2,267
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/ConfigImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config; import org.apache.geronimo.config.converters.ByteConverter; import org.apache.geronimo.config.converters.CharacterConverter; import org.apache.geronimo.config.converters.ClassConverter; import org.apache.geronimo.config.converters.DurationConverter; import org.apache.geronimo.config.converters.ShortConverter; import org.eclipse.microprofile.config.Config; import org.eclipse.microprofile.config.spi.ConfigSource; import javax.enterprise.inject.Typed; import javax.enterprise.inject.Vetoed; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.net.URL; import java.time.Duration; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.logging.Level; import java.util.logging.Logger; import java.util.stream.Collectors; import org.apache.geronimo.config.converters.BooleanConverter; import org.apache.geronimo.config.converters.DoubleConverter; import org.apache.geronimo.config.converters.FloatConverter; import org.apache.geronimo.config.converters.ImplicitConverter; import org.apache.geronimo.config.converters.IntegerConverter; import org.apache.geronimo.config.converters.LongConverter; import org.apache.geronimo.config.converters.StringConverter; import org.apache.geronimo.config.converters.URLConverter; import org.eclipse.microprofile.config.spi.Converter; import javax.annotation.Priority; /** * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> * @author <a href="mailto:johndament@apache.org">John D. Ament</a> */ @Typed @Vetoed public class ConfigImpl implements Config, AutoCloseable { protected Logger logger = Logger.getLogger(ConfigImpl.class.getName()); protected List<ConfigSource> configSources = new ArrayList<>(); protected Map<Type, Converter> converters = new HashMap<>(); protected Map<Type, Converter> implicitConverters = new ConcurrentHashMap<>(); // volatile to a.) make the read/write behave atomic and b.) guarantee multi-thread safety private volatile long lastChanged = 0; public ConfigImpl() { registerDefaultConverter(); } private void registerDefaultConverter() { converters.put(String.class, StringConverter.INSTANCE); converters.put(Boolean.class, BooleanConverter.INSTANCE); converters.put(boolean.class, BooleanConverter.INSTANCE); converters.put(Double.class, DoubleConverter.INSTANCE); converters.put(double.class, DoubleConverter.INSTANCE); converters.put(Float.class, FloatConverter.INSTANCE); converters.put(float.class, FloatConverter.INSTANCE); converters.put(Integer.class, IntegerConverter.INSTANCE); converters.put(int.class, IntegerConverter.INSTANCE); converters.put(Long.class, LongConverter.INSTANCE); converters.put(long.class, LongConverter.INSTANCE); converters.put(Byte.class, ByteConverter.INSTANCE); converters.put(byte.class, ByteConverter.INSTANCE); converters.put(Short.class, ShortConverter.INSTANCE); converters.put(short.class, ShortConverter.INSTANCE); converters.put(Character.class, CharacterConverter.INSTANCE); converters.put(char.class, CharacterConverter.INSTANCE); converters.put(Class.class, ClassConverter.INSTANCE); converters.put(Duration.class, DurationConverter.INSTANCE); converters.put(URL.class, URLConverter.INSTANCE); } @Override public <T> Optional<T> getOptionalValue(String propertyName, Class<T> asType) { String value = getValue(propertyName); if (value != null && value.length() == 0) { // treat an empty string as not existing value = null; } return Optional.ofNullable(convert(value, asType)); } @Override public <T> T getValue(String propertyName, Class<T> propertyType) { String value = getValue(propertyName); if (value == null) { throw new NoSuchElementException("No configured value found for config key " + propertyName); } return convert(value, propertyType); } public String getValue(String key) { for (ConfigSource configSource : configSources) { String value = configSource.getValue(key); if (value != null) { if (logger.isLoggable(Level.FINE)) { logger.log(Level.FINE, "found value {0} for key {1} in ConfigSource {2}.", new Object[]{value, key, configSource.getName()}); } return value; } } return null; } public <T> T convert(String value, Class<T> asType) { if (value != null) { Converter<T> converter = getConverter(asType); return converter.convert(value); } return null; } private <T> Converter getConverter(Class<T> asType) { Converter converter = converters.get(asType); if (converter == null) { converter = getImplicitConverter(asType); } if (converter == null) { throw new IllegalArgumentException("No Converter registered for class " + asType); } return converter; } private <T> Converter getImplicitConverter(Class<T> asType) { Converter converter = implicitConverters.get(asType); if (converter == null) { synchronized (implicitConverters) { converter = implicitConverters.get(asType); if (converter == null) { if (asType.isArray()) { Converter singleItemConverter = getConverter(asType.getComponentType()); if (singleItemConverter == null) { return null; } else { converter = new ImplicitConverter.ImplicitArrayConverter(singleItemConverter, asType.getComponentType()); implicitConverters.putIfAbsent(asType, converter); } } else { // try to check whether the class is an 'implicit converter' converter = ImplicitConverter.getImplicitConverter(asType); if (converter != null) { implicitConverters.putIfAbsent(asType, converter); } } } } } return converter; } public ConfigValueImpl<String> access(String key) { return new ConfigValueImpl<>(this, key); } @Override public Iterable<String> getPropertyNames() { return configSources.stream().flatMap(c -> c.getPropertyNames().stream()).collect(Collectors.toSet()); } @Override public Iterable<ConfigSource> getConfigSources() { return Collections.unmodifiableList(configSources); } public synchronized void addConfigSources(List<ConfigSource> configSourcesToAdd) { List<ConfigSource> allConfigSources = new ArrayList<>(configSources); // TODO(To Be Fixed): configSourcesToAdd.forEach(cs -> cs.setOnAttributeChange(this::reportConfigChange)); allConfigSources.addAll(configSourcesToAdd); // finally put all the configSources back into the map configSources = sortDescending(allConfigSources); } public synchronized void addConverter(Converter<?> converter) { if (converter == null) { return; } Type targetType = getTypeOfConverter(converter.getClass()); if (targetType == null ) { throw new IllegalStateException("Converter " + converter.getClass() + " must be a ParameterisedType"); } Converter oldConverter = converters.get(targetType); if (oldConverter == null || getPriority(converter) > getPriority(oldConverter)) { converters.put(targetType, converter); } } public void addPrioritisedConverter(DefaultConfigBuilder.PrioritisedConverter prioritisedConverter) { Converter oldConverter = converters.get(prioritisedConverter.getType()); if (oldConverter == null || prioritisedConverter.getPriority() >= getPriority(oldConverter)) { converters.put(prioritisedConverter.getType(), prioritisedConverter.getConverter()); } } private int getPriority(Converter<?> converter) { int priority = 100; Priority priorityAnnotation = converter.getClass().getAnnotation(Priority.class); if (priorityAnnotation != null) { priority = priorityAnnotation.value(); } return priority; } public Map<Type, Converter> getConverters() { return converters; } @Override public void close() throws Exception { List<Exception> exceptions = new ArrayList<>(); converters.values().stream() .filter(c -> c instanceof AutoCloseable) .map(AutoCloseable.class::cast) .forEach(c -> { try { c.close(); } catch (Exception e) { exceptions.add(e); } }); configSources.stream() .filter(c -> c instanceof AutoCloseable) .map(AutoCloseable.class::cast) .forEach(c -> { try { c.close(); } catch (Exception e) { exceptions.add(e); } }); if (!exceptions.isEmpty()) { StringBuilder sb = new StringBuilder(1024); sb.append("The following Exceptions got detected while shutting down the Config:\n"); for (Exception exception : exceptions) { sb.append(exception.getClass().getName()) .append(" ") .append(exception.getMessage()) .append('\n'); } throw new RuntimeException(sb.toString(), exceptions.get(0)); } } /** * ConfigSources are sorted with descending ordinal. * If 2 ConfigSources have the same ordinal, then they get sorted according to their name, alphabetically. */ private List<ConfigSource> sortDescending(List<ConfigSource> configSources) { configSources.sort( (configSource1, configSource2) -> { int compare = Integer.compare(configSource2.getOrdinal(), configSource1.getOrdinal()); if (compare == 0) { return configSource1.getName().compareTo(configSource2.getName()); } return compare; }); return configSources; } private Type getTypeOfConverter(Class clazz) { if (clazz.equals(Object.class)) { return null; } Type[] genericInterfaces = clazz.getGenericInterfaces(); for (Type genericInterface : genericInterfaces) { if (genericInterface instanceof ParameterizedType) { ParameterizedType pt = (ParameterizedType) genericInterface; if (pt.getRawType().equals(Converter.class)) { Type[] typeArguments = pt.getActualTypeArguments(); if (typeArguments.length != 1) { throw new IllegalStateException("Converter " + clazz + " must be a ParameterisedType"); } return typeArguments[0]; } } } return getTypeOfConverter(clazz.getSuperclass()); } public void onAttributeChange(Set<String> attributesChanged) { // this is to force an incremented lastChanged even on time glitches and fast updates long newLastChanged = System.nanoTime(); lastChanged = lastChanged >= newLastChanged ? lastChanged++ : newLastChanged; } /** * @return the nanoTime when the last change got reported by a ConfigSource */ public long getLastChanged() { return lastChanged; } }
2,268
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/DefaultConfigBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config; import org.eclipse.microprofile.config.Config; import org.eclipse.microprofile.config.spi.ConfigBuilder; import org.eclipse.microprofile.config.spi.ConfigSource; import org.eclipse.microprofile.config.spi.ConfigSourceProvider; import org.eclipse.microprofile.config.spi.Converter; import org.apache.geronimo.config.configsource.PropertyFileConfigSourceProvider; import org.apache.geronimo.config.configsource.SystemEnvConfigSource; import org.apache.geronimo.config.configsource.SystemPropertyConfigSource; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.ServiceLoader; import java.util.logging.Logger; import java.util.stream.Collectors; import javax.enterprise.inject.Typed; import javax.enterprise.inject.Vetoed; import static java.util.Arrays.asList; /** * @author <a href="mailto:rmannibucau@apache.org">Romain Manni-Bucau</a> * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ @Typed @Vetoed public class DefaultConfigBuilder implements ConfigBuilder { private ClassLoader forClassLoader; private final List<ConfigSource> sources = new ArrayList<>(); private final List<Converter<?>> converters = new ArrayList<>(); private final Map<Class<?>, PrioritisedConverter> prioritisedConverters = new HashMap<>(); private boolean ignoreDefaultSources = true; private boolean ignoreDiscoveredSources = true; private boolean ignoreDiscoveredConverters = true; @Override public ConfigBuilder addDefaultSources() { this.ignoreDefaultSources = false; return this; } @Override public ConfigBuilder addDiscoveredSources() { this.ignoreDiscoveredSources = false; return this; } @Override public ConfigBuilder forClassLoader(final ClassLoader loader) { this.forClassLoader = loader; return this; } @Override public ConfigBuilder withSources(final ConfigSource... sources) { this.sources.addAll(asList(sources)); return this; } @Override public ConfigBuilder withConverters(Converter<?>... converters) { this.converters.addAll(asList(converters)); return this; } @Override public <T> ConfigBuilder withConverter(Class<T> type, int priority, Converter<T> converter) { PrioritisedConverter oldPrioritisedConverter = prioritisedConverters.get(type); if (oldPrioritisedConverter != null) { if (oldPrioritisedConverter.priority == priority) { throw new IllegalStateException("Found 2 converters with the same priority for type " + type + ". This will result in random behaviour -> aborting! Previous Converter: " + oldPrioritisedConverter.converter.getClass() + " 2nd Converter: " + converter.getClass()); } if (oldPrioritisedConverter.priority > priority) { return this; } } prioritisedConverters.put(type, new PrioritisedConverter(type, priority, converter)); return this; } @Override public ConfigBuilder addDiscoveredConverters() { ignoreDiscoveredConverters = false; return this; } @Override public Config build() { List<ConfigSource> configSources = new ArrayList<>(); if (forClassLoader == null) { forClassLoader = Thread.currentThread().getContextClassLoader(); if (forClassLoader == null) { forClassLoader = DefaultConfigProvider.class.getClassLoader(); } } if (!ignoreDefaultSources) { configSources.addAll(getBuiltInConfigSources(forClassLoader)); } configSources.addAll(sources); if (!ignoreDiscoveredSources) { // load all ConfigSource services ServiceLoader<ConfigSource> configSourceLoader = ServiceLoader.load(ConfigSource.class, forClassLoader); configSourceLoader.forEach(configSources::add); // load all ConfigSources from ConfigSourceProviders ServiceLoader<ConfigSourceProvider> configSourceProviderLoader = ServiceLoader.load(ConfigSourceProvider.class, forClassLoader); configSourceProviderLoader.forEach(configSourceProvider -> configSourceProvider.getConfigSources(forClassLoader) .forEach(configSources::add)); } if (!ignoreDiscoveredConverters) { ServiceLoader<Converter> converterLoader = ServiceLoader.load(Converter.class, forClassLoader); converterLoader.forEach(converters::add); } ConfigImpl config = new ConfigImpl(); config.addConfigSources(configSources); for (Converter<?> converter : converters) { config.addConverter(converter); } for (PrioritisedConverter prioritisedConverter : prioritisedConverters.values()) { config.addPrioritisedConverter(prioritisedConverter); } return config; } protected Collection<? extends ConfigSource> getBuiltInConfigSources(ClassLoader forClassLoader) { List<ConfigSource> configSources = new ArrayList<>(); configSources.add(new SystemEnvConfigSource()); configSources.add(new SystemPropertyConfigSource()); configSources.addAll(new PropertyFileConfigSourceProvider("META-INF/microprofile-config.properties", true, forClassLoader).getConfigSources(forClassLoader)); return configSources; } static class PrioritisedConverter { private final Class<?> clazz; private final int priority; private final Converter converter; public PrioritisedConverter(Class<?> clazz, int priority, Converter converter) { this.clazz = clazz; this.priority = priority; this.converter = converter; } public Class<?> getType() { return clazz; } public int getPriority() { return priority; } public Converter getConverter() { return converter; } } }
2,269
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/configsource/BaseConfigSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.configsource; import java.util.logging.Level; import java.util.logging.Logger; import org.eclipse.microprofile.config.spi.ConfigSource; /** * Base class for all our ConfigSources * * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> * @author <a href="mailto:gpetracek@apache.org">Gerhard Petracek</a> */ public abstract class BaseConfigSource implements ConfigSource { public final static String CONFIG_ORDINAL = "config_ordinal"; protected Logger log = Logger.getLogger(getClass().getName()); private int ordinal = 1000; // default @Override public int getOrdinal() { return ordinal; } /** * Init method e.g. for initializing the ordinal. * This method can be used from a subclass to determine * the ordinal value * * @param defaultOrdinal the default value for the ordinal if not set via configuration */ protected void initOrdinal(int defaultOrdinal) { ordinal = defaultOrdinal; String configuredOrdinalString = getValue(CONFIG_ORDINAL); try { if (configuredOrdinalString != null) { ordinal = Integer.parseInt(configuredOrdinalString.trim()); } } catch (NumberFormatException e) { log.log(Level.WARNING, "The configured config-ordinal isn't a valid integer. Invalid value: " + configuredOrdinalString); } } }
2,270
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/configsource/PropertyFileConfigSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.configsource; import java.io.IOException; import java.io.InputStream; import java.net.URL; import java.util.Map; import java.util.Properties; import java.util.logging.Level; import java.util.logging.Logger; /** * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ public class PropertyFileConfigSource extends BaseConfigSource { private static final Logger LOG = Logger.getLogger(PropertyFileConfigSource.class.getName()); private Map<String, String> properties; private String fileName; public PropertyFileConfigSource(URL propertyFileUrl) { fileName = propertyFileUrl.toExternalForm(); properties = loadProperties(propertyFileUrl); initOrdinal(100); } /** * The given key gets used for a lookup via a properties file * * @param key for the property * @return value for the given key or null if there is no configured value */ @Override public String getValue(String key) { return properties.get(key); } @Override public String getName() { return fileName; } @Override public Map<String, String> getProperties() { return properties; } private Map<String, String> loadProperties(URL url) { Properties props = new Properties(); InputStream inputStream = null; try { inputStream = url.openStream(); if (inputStream != null) { props.load(inputStream); } } catch (IOException e) { // don't return null on IOException LOG.log(Level.WARNING, "Unable to read URL "+url, e); } finally { try { if (inputStream != null) { inputStream.close(); } } catch (IOException e) { // no worries, means that the file is already closed } } return (Map) props; } }
2,271
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/configsource/PropertyFileConfigSourceProvider.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.configsource; import java.io.IOException; import java.net.URL; import java.util.ArrayList; import java.util.Collection; import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.logging.Level; import java.util.logging.Logger; import javax.enterprise.inject.Typed; import javax.enterprise.inject.Vetoed; import org.eclipse.microprofile.config.spi.ConfigSource; import org.eclipse.microprofile.config.spi.ConfigSourceProvider; /** * Register all property files with the given propertyFileName * as {@link ConfigSource}. * * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ @Typed @Vetoed public class PropertyFileConfigSourceProvider implements ConfigSourceProvider { private static final Logger LOG = Logger.getLogger(PropertyFileConfigSourceProvider.class.getName()); private List<ConfigSource> configSources = new ArrayList<ConfigSource>(); public PropertyFileConfigSourceProvider(String propertyFileName, boolean optional, ClassLoader forClassLoader) { try { Collection<URL> propertyFileUrls = resolvePropertyFiles(forClassLoader, propertyFileName); if (!optional && propertyFileUrls.isEmpty()) { throw new IllegalStateException(propertyFileName + " wasn't found."); } for (URL propertyFileUrl : propertyFileUrls) { LOG.log(Level.INFO, "Custom config found by GeronimoConfig. Name: ''{0}'', URL: ''{1}''", new Object[]{propertyFileName, propertyFileUrl}); configSources.add(new PropertyFileConfigSource(propertyFileUrl)); } } catch (IOException ioe) { throw new IllegalStateException("problem while loading GeronimoConfig property files", ioe); } } public Collection<URL> resolvePropertyFiles(ClassLoader forClassLoader, String propertyFileName) throws IOException { // de-duplicate Map<String, URL> propertyFileUrls = resolveUrls(propertyFileName, forClassLoader); // and once again with preceding a "/" propertyFileUrls.putAll(resolveUrls("/" + propertyFileName, forClassLoader)); return propertyFileUrls.values(); } private Map<String, URL> resolveUrls(String propertyFileName, ClassLoader forClassLoader) throws IOException { Map<String, URL> propertyFileUrls = new HashMap<>(); Enumeration<URL> urls = forClassLoader.getResources(propertyFileName); while (urls.hasMoreElements()) { URL url = urls.nextElement(); propertyFileUrls.put(url.toExternalForm(), url); } return propertyFileUrls; } @Override public List<ConfigSource> getConfigSources(ClassLoader forClassLoader) { return configSources; } }
2,272
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/configsource/SystemEnvConfigSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.configsource; import java.util.HashMap; import java.util.Map; import javax.enterprise.inject.Typed; import javax.enterprise.inject.Vetoed; import org.eclipse.microprofile.config.spi.ConfigSource; /** * {@link ConfigSource} which uses {@link System#getenv()} * <p> * We also allow to write underlines _ instead of dots _ in the * environment via export (unix) or SET (windows) * * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ @Typed @Vetoed public class SystemEnvConfigSource extends BaseConfigSource { private Map<String, String> configValues; private Map<String, String> uppercasePosixValues; public SystemEnvConfigSource() { uppercasePosixValues = new HashMap<>(); configValues = System.getenv(); initOrdinal(300); for (Map.Entry<String, String> e : configValues.entrySet()) { String originalKey = e.getKey(); String posixKey = replaceNonPosixEnvChars(originalKey).toUpperCase(); if (!originalKey.equals(posixKey)) { uppercasePosixValues.put(posixKey, e.getValue()); } } } @Override public String getName() { return "system_env"; } @Override public Map<String, String> getProperties() { return configValues; } @Override public String getValue(String key) { String val = configValues.get(key); if (val == null) { key = replaceNonPosixEnvChars(key); val = configValues.get(key); } if (val == null) { key = key.toUpperCase(); val = configValues.get(key); } if (val == null) { val = uppercasePosixValues.get(key); } return val; } private String replaceNonPosixEnvChars(String key) { return key.replaceAll("[^A-Za-z0-9]", "_"); } }
2,273
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/configsource/SystemPropertyConfigSource.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.configsource; import org.apache.geronimo.config.cdi.configsource.Reloadable; import org.eclipse.microprofile.config.spi.ConfigSource; import javax.enterprise.inject.Typed; import javax.enterprise.inject.Vetoed; import java.util.Map; import static java.lang.Boolean.valueOf; import static java.util.function.Function.identity; import static java.util.stream.Collectors.toMap; /** * {@link ConfigSource} which uses {@link System#getProperties()} * * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ @Typed @Vetoed public class SystemPropertyConfigSource extends BaseConfigSource implements Reloadable { private static final String COPY_PROPERTY = "org.apache.geronimo.config.configsource.SystemPropertyConfigSource.copy"; private final Map<String, String> instance; private final boolean shouldReload; public SystemPropertyConfigSource() { this(valueOf(System.getProperty(COPY_PROPERTY, "true"))); } public SystemPropertyConfigSource(boolean copy) { instance = load(copy); shouldReload = copy; initOrdinal(400); } @Override public Map<String, String> getProperties() { return instance; } @Override public String getValue(String key) { return instance.get(key); } @Override public String getName() { return "system-properties"; } @Override public void reload() { if (!shouldReload) { return; } instance.clear(); instance.putAll(load(true)); } private Map<String, String> load(final boolean copy) { return copy ? System.getProperties().stringPropertyNames().stream().collect(toMap(identity(), System::getProperty)) : Map.class.cast(System.getProperties()); } }
2,274
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/ImplicitConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.converters; import org.eclipse.microprofile.config.spi.Converter; import java.lang.reflect.Array; import java.lang.reflect.Constructor; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.util.ArrayList; import java.util.List; /** * A Converter factory + impl for 'common sense converters' * */ public abstract class ImplicitConverter { public static Converter getImplicitConverter(Class<?> clazz) { // handle ct with String param Converter converter = null; if (converter == null) { converter = hasConverterMethod(clazz, "of", String.class); } if (converter == null) { converter = hasConverterMethod(clazz, "of", CharSequence.class); } if (converter == null) { converter = hasConverterMethod(clazz, "valueOf", String.class); } if (converter == null) { converter = hasConverterMethod(clazz, "valueOf", CharSequence.class); } if (converter == null) { converter = hasConverterMethod(clazz, "parse", String.class); } if (converter == null) { converter = hasConverterMethod(clazz, "parse", CharSequence.class); } if (converter == null) { converter = hasConverterCt(clazz, String.class); } if (converter == null) { converter = hasConverterCt(clazz, CharSequence.class); } return converter; } private static Converter hasConverterCt(Class<?> clazz, Class<?> paramType) { try { final Constructor<?> declaredConstructor = clazz.getDeclaredConstructor(paramType); if (!declaredConstructor.isAccessible()) { declaredConstructor.setAccessible(true); } return new Converter() { @Override public Object convert(String value) { try { return declaredConstructor.newInstance(value); } catch (Exception e) { throw new IllegalArgumentException(e); } } }; } catch (NoSuchMethodException e) { // all fine } return null; } private static Converter hasConverterMethod(Class<?> clazz, String methodName, Class<?> paramType) { // handle valueOf with CharSequence param try { final Method method = clazz.getDeclaredMethod(methodName, paramType); if (!method.isAccessible()) { method.setAccessible(true); } if (Modifier.isStatic(method.getModifiers()) && method.getReturnType().equals(clazz)) { return new Converter() { @Override public Object convert(String value) { try { return method.invoke(null, value); } catch (Exception e) { throw new IllegalArgumentException("Error while converting the value " + value + " to type " + method.getReturnType()); } } }; } } catch (NoSuchMethodException e) { // all fine } return null; } public static class ImplicitArrayConverter<T> implements Converter<T> { private final Converter converter; private final Class<?> type; public ImplicitArrayConverter(Converter converter, Class<?> type) { this.converter = converter; this.type = type; } @Override public T convert(String valueStr) { if (valueStr == null) { return null; } List list = new ArrayList(); StringBuilder currentValue = new StringBuilder(); int length = valueStr.length(); for (int i = 0; i < length; i++) { char c = valueStr.charAt(i); if (c == '\\') { if (i < length - 1) { char nextC = valueStr.charAt(i + 1); currentValue.append(nextC); i++; } } else if (c == ',') { String trimedVal = currentValue.toString().trim(); if (trimedVal.length() > 0) { list.add(converter.convert(trimedVal)); } currentValue.setLength(0); } else { currentValue.append(c); } } String trimedVal = currentValue.toString().trim(); if (trimedVal.length() > 0) { list.add(converter.convert(trimedVal)); } // everything else is an Object array Object array = Array.newInstance(type, list.size()); for (int i=0; i < list.size(); i++) { Array.set(array, i, list.get(i)); } return (T) array; } } }
2,275
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/DurationConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.converters; import java.time.Duration; import java.time.format.DateTimeParseException; import javax.annotation.Priority; import javax.enterprise.inject.Vetoed; import org.eclipse.microprofile.config.spi.Converter; /** * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ @Priority(1) @Vetoed public class DurationConverter implements Converter<Duration> { public static final DurationConverter INSTANCE = new DurationConverter(); @Override public Duration convert(String value) { if (value != null) { try { return Duration.parse(value); } catch (DateTimeParseException dtpe) { throw new IllegalArgumentException(dtpe); } } return null; } }
2,276
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/LongConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.converters; import org.eclipse.microprofile.config.spi.Converter; import javax.annotation.Priority; import javax.enterprise.inject.Vetoed; /** * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ @Priority(1) @Vetoed public class LongConverter implements Converter<Long> { public static final LongConverter INSTANCE = new LongConverter(); @Override public Long convert(String value) { return value != null ? Long.valueOf(value) : null; } }
2,277
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/ShortConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.converters; import org.eclipse.microprofile.config.spi.Converter; import javax.annotation.Priority; import javax.enterprise.inject.Vetoed; /** * @author <a href="mailto:danielsoro@apache.org">Daniel 'soro' Cunha</a> */ @Priority(1) @Vetoed public class ShortConverter implements Converter<Short> { public static final ShortConverter INSTANCE = new ShortConverter(); @Override public Short convert(String value) { return value != null ? Short.valueOf(value) : null; } }
2,278
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/StringConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.converters; import javax.annotation.Priority; import javax.enterprise.inject.Vetoed; import org.eclipse.microprofile.config.spi.Converter; /** * 1:1 string output. Just to make converter logic happy. * * @author <a href="mailto:struberg@yahoo.de">Mark Struberg</a> */ @Priority(1) @Vetoed public class StringConverter implements Converter<String> { public static final StringConverter INSTANCE = new StringConverter(); @Override public String convert(String value) { return value; } }
2,279
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/FloatConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.converters; import javax.annotation.Priority; import javax.enterprise.inject.Vetoed; import org.eclipse.microprofile.config.spi.Converter; /** * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ @Priority(1) @Vetoed public class FloatConverter implements Converter<Float> { public static final FloatConverter INSTANCE = new FloatConverter(); @Override public Float convert(String value) { return value != null ? Float.valueOf(value) : null; } }
2,280
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/URLConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.converters; import org.eclipse.microprofile.config.spi.Converter; import javax.annotation.Priority; import javax.enterprise.inject.Vetoed; import java.net.MalformedURLException; import java.net.URL; @Vetoed @Priority(1) public class URLConverter implements Converter<URL> { public static final URLConverter INSTANCE = new URLConverter(); @Override public URL convert(String value) { try { return new URL(value); } catch (MalformedURLException e) { throw new IllegalArgumentException("Invalid url "+value,e); } } }
2,281
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/BooleanConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.converters; import org.eclipse.microprofile.config.spi.Converter; import javax.annotation.Priority; import javax.enterprise.inject.Vetoed; /** * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ @Priority(1) @Vetoed public class BooleanConverter implements Converter<Boolean> { public static final BooleanConverter INSTANCE = new BooleanConverter(); @Override public Boolean convert(String value) { if (value != null) { return "TRUE".equalsIgnoreCase(value) || "1".equalsIgnoreCase(value) || "YES".equalsIgnoreCase(value) || "Y".equalsIgnoreCase(value) || "ON".equalsIgnoreCase(value) || "JA".equalsIgnoreCase(value) || "J".equalsIgnoreCase(value) || "OUI".equalsIgnoreCase(value); } return null; } }
2,282
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/IntegerConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.converters; import javax.annotation.Priority; import javax.enterprise.inject.Vetoed; import org.eclipse.microprofile.config.spi.Converter; /** * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ @Priority(1) @Vetoed public class IntegerConverter implements Converter<Integer> { public static final IntegerConverter INSTANCE = new IntegerConverter(); @Override public Integer convert(String value) { return value != null ? Integer.valueOf(value) : null; } }
2,283
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/DoubleConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.converters; import org.eclipse.microprofile.config.spi.Converter; import javax.annotation.Priority; import javax.enterprise.inject.Vetoed; /** * @author <a href="mailto:struberg@apache.org">Mark Struberg</a> */ @Priority(1) @Vetoed public class DoubleConverter implements Converter<Double> { public static final DoubleConverter INSTANCE = new DoubleConverter(); @Override public Double convert(String value) { return value != null ? Double.valueOf(value) : null; } }
2,284
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/ClassConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.converters; import org.eclipse.microprofile.config.spi.Converter; import javax.annotation.Priority; import javax.enterprise.inject.Vetoed; @Priority(1) @Vetoed public class ClassConverter implements Converter<Class>{ public static final Converter<Class> INSTANCE = new ClassConverter(); @Override public Class convert(String value) { if(value == null) { return null; } try { ClassLoader loader = Thread.currentThread().getContextClassLoader(); if (loader == null) { return Class.forName(value); } return Class.forName(value, true, loader); } catch (ClassNotFoundException e) { throw new IllegalArgumentException(e); } } }
2,285
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/CharacterConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.converters; import org.eclipse.microprofile.config.spi.Converter; import javax.annotation.Priority; import javax.enterprise.inject.Vetoed; /** * @author <a href="mailto:danielsoro@apache.org">Daniel 'soro' Cunha</a> */ @Priority(1) @Vetoed public class CharacterConverter implements Converter<Character> { public static final CharacterConverter INSTANCE = new CharacterConverter(); @Override public Character convert(String value) { if (value == null || value.length() > 1) { throw new IllegalArgumentException("Error while converting the value " + value + " to type " + char.class); } return value.charAt(0); } }
2,286
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/converters/ByteConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.converters; import org.eclipse.microprofile.config.spi.Converter; import javax.annotation.Priority; import javax.enterprise.inject.Vetoed; /** * @author <a href="mailto:danielsoro@apache.org">Daniel 'soro' Cunha</a> */ @Priority(1) @Vetoed public class ByteConverter implements Converter<Byte> { public static final ByteConverter INSTANCE = new ByteConverter(); @Override public Byte convert(String value) { return value != null ? Byte.valueOf(value) : null; } }
2,287
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/cdi/ConfigInjectionPoint.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.cdi; import javax.enterprise.inject.spi.Annotated; import javax.enterprise.inject.spi.Bean; import javax.enterprise.inject.spi.InjectionPoint; import java.lang.annotation.Annotation; import java.lang.reflect.Member; import java.lang.reflect.Type; import java.util.Collections; import java.util.Set; class ConfigInjectionPoint implements InjectionPoint{ private final Bean bean; ConfigInjectionPoint(Bean bean) { this.bean = bean; } @Override public boolean isTransient() { return false; } @Override public boolean isDelegate() { return false; } @Override public Type getType() { return InjectionPoint.class; } @Override public Set<Annotation> getQualifiers() { return Collections.singleton(DefaultLiteral.INSTANCE); } @Override public Member getMember() { return null; } @Override public Bean<?> getBean() { return bean; } @Override public Annotated getAnnotated() { return null; } }
2,288
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/cdi/AnyLiteral.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.cdi; import javax.enterprise.inject.Any; import javax.enterprise.util.AnnotationLiteral; class AnyLiteral extends AnnotationLiteral<Any> implements Any { public static Any INSTANCE = new AnyLiteral(); @Override public boolean equals(Object other) { return other instanceof Any; } @Override public int hashCode() { return 0; } @Override public String toString() { return "@Any"; } }
2,289
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/cdi/ConfigurationHandler.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.cdi; import static java.util.Optional.ofNullable; import static java.util.function.Function.identity; import static java.util.stream.Collectors.toList; import static java.util.stream.Collectors.toMap; import static java.util.stream.Collectors.toSet; import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.util.Collection; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collector; import java.util.stream.Stream; import org.apache.geronimo.config.ConfigImpl; import org.eclipse.microprofile.config.Config; import org.eclipse.microprofile.config.ConfigProvider; import org.eclipse.microprofile.config.inject.ConfigProperty; public class ConfigurationHandler implements InvocationHandler { private final ConfigImpl config; private final Map<Method, MethodMeta> methodMetas; ConfigurationHandler(final ConfigImpl config, final Class<?> api) { this.config = config; final String prefix = ofNullable(api.getAnnotation(ConfigProperty.class)) .map(ConfigProperty::name) .orElse(""); this.methodMetas = Stream.of(api.getMethods()) .filter(m -> m.isAnnotationPresent(ConfigProperty.class)) .collect(toMap(identity(), e -> new MethodMeta(e, prefix))); } @Override public Object invoke(final Object proxy, final Method method, final Object[] args) throws Throwable { if (Object.class == method.getDeclaringClass()) { try { return method.invoke(this, args); } catch (final InvocationTargetException ite) { throw ite.getTargetException(); } } final MethodMeta methodMeta = methodMetas.get(method); if (methodMeta != null) { return methodMeta.read(config); } return null; } // todo: list, set etc handling but config API is not that friendly for now (Class vs Type) private static class MethodMeta { private final String key; private final Object defaultValue; private final Class lookupType; private final Class collectionConversionType; private final Collector<Object, ?, ? extends Collection<Object>> collectionCollector; private final boolean optional; private MethodMeta(final Method m, final String prefix) { final ConfigProperty annotation = m.getAnnotation(ConfigProperty.class); optional = Optional.class == m.getReturnType(); final Type type = optional ? ParameterizedType.class.cast(m.getGenericReturnType()).getActualTypeArguments()[0] : m.getGenericReturnType(); if (Class.class.isInstance(type)) { lookupType = Class.class.cast(type); collectionCollector = null; collectionConversionType = null; } else if (ParameterizedType.class.isInstance(type)) { final ParameterizedType pt = ParameterizedType.class.cast(type); final Type rawType = pt.getRawType(); if (!Class.class.isInstance(rawType)) { throw new IllegalArgumentException("Unsupported parameterized type: " + type); } final Class<?> clazz = Class.class.cast(pt.getRawType()); if (Collection.class.isAssignableFrom(clazz)) { final Type arg0 = pt.getActualTypeArguments()[0]; collectionConversionType = Class.class.cast(ParameterizedType.class.isInstance(arg0) ? // mainly to tolerate Class<?> as an arg ParameterizedType.class.cast(arg0).getRawType() : Class.class.cast(arg0)); lookupType = String.class; if (Set.class.isAssignableFrom(clazz)) { collectionCollector = toSet(); } else { collectionCollector = toList(); } } else { throw new IllegalArgumentException("Unsupported parameterized type: " + type + ", did you want a Collection?"); } } else { throw new IllegalArgumentException("Unsupported type: " + type); } key = prefix + (annotation.name().isEmpty() ? m.getDeclaringClass().getName() + "." + m.getName() : annotation.name()); final String defaultValue = annotation.defaultValue(); final boolean canBeNull = ConfigProperty.UNCONFIGURED_VALUE.equals(defaultValue); final boolean hasDefault = !ConfigProperty.UNCONFIGURED_VALUE.equals(defaultValue) && !canBeNull; if (hasDefault) { final Config config = ConfigProvider.getConfig(); if (lookupType == long.class || lookupType == Long.class) { this.defaultValue = Long.parseLong(defaultValue); } else if (lookupType == boolean.class || lookupType == Boolean.class) { this.defaultValue = Boolean.parseBoolean(defaultValue); } else if (lookupType == int.class || lookupType == Integer.class) { this.defaultValue = Integer.parseInt(defaultValue); } else if (lookupType == double.class || lookupType == Double.class) { this.defaultValue = Double.parseDouble(defaultValue); } else if (lookupType == float.class || lookupType == Float.class) { this.defaultValue = Float.parseFloat(defaultValue); } else if (lookupType == short.class || lookupType == Short.class) { this.defaultValue = Short.parseShort(defaultValue); } else if (lookupType == char.class || lookupType == Character.class) { this.defaultValue = defaultValue.charAt(0); } else if (lookupType == byte.class || lookupType == Byte.class) { this.defaultValue = Byte.parseByte(defaultValue); } else if (collectionCollector != null) { this.defaultValue = convert(defaultValue, config); } else if (lookupType == String.class) { this.defaultValue = defaultValue; } else { throw new IllegalArgumentException("Unsupported default for " + m); } } else { if (lookupType.isPrimitive()) { if (lookupType == long.class) { this.defaultValue = 0L; } else if (lookupType == boolean.class) { this.defaultValue = false; } else if (lookupType == int.class ) { this.defaultValue = 0; } else if (lookupType == double.class) { this.defaultValue = 0.0D; } else if (lookupType == float.class) { this.defaultValue = 0.0F; } else if (lookupType == short.class) { this.defaultValue = (short) 0; } else if (lookupType == char.class) { this.defaultValue = '\u0000'; } else if (lookupType == byte.class) { this.defaultValue = (byte) 0; } else { this.defaultValue = null; } } else { this.defaultValue = null; } } } Object read(final ConfigImpl config) { final Optional optionalValue = ofNullable(config .access(key) .as(lookupType) .evaluateVariables(true) .get()); if (optional) { return processOptional(optionalValue, config); } return processOptional(optionalValue, config).orElse(defaultValue); } private Optional processOptional(final Optional<?> optionalValue, final Config config) { if (collectionCollector != null) { return optionalValue.map(String.class::cast).map(v -> convert(v, config)); } return optionalValue; } private Collection<?> convert(final String o, final Config config) { final String[] values = o.split(","); return Stream.of(values) .map(v -> mapValue(v, config)) .collect(collectionCollector); } private Object mapValue(final String raw, final Config config) { if (String.class == collectionConversionType) { return raw; } if (ConfigImpl.class.isInstance(config)) { return ConfigImpl.class.cast(config).convert(raw, collectionConversionType); } throw new IllegalArgumentException("Unsupported conversion if config instance is not a ConfigImpl: " + collectionConversionType); } } }
2,290
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/cdi/ProxyBean.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.cdi; import static java.util.Arrays.asList; import static java.util.Collections.emptySet; import java.io.Serializable; import java.lang.annotation.Annotation; import java.lang.reflect.Proxy; import java.lang.reflect.Type; import java.util.HashSet; import java.util.Set; import javax.enterprise.context.ApplicationScoped; import javax.enterprise.context.spi.CreationalContext; import javax.enterprise.inject.spi.Bean; import javax.enterprise.inject.spi.InjectionPoint; import javax.enterprise.inject.spi.PassivationCapable; import org.apache.geronimo.config.ConfigImpl; import org.eclipse.microprofile.config.Config; public class ProxyBean<T> implements Bean<T>, PassivationCapable { private static final Set<Annotation> QUALIFIERS = new HashSet<>( asList(DefaultLiteral.INSTANCE, AnyLiteral.INSTANCE)); private final Class<T> beanClass; private final Set<Type> types; private final Set<Annotation> qualifiers; private final String id; private ConfigImpl config; ProxyBean(final Class<T> api) { this.beanClass = api; this.qualifiers = QUALIFIERS; this.types = new HashSet<>(asList(api, Serializable.class)); this.id = ProxyBean.class.getName() + "[" + api.getName() + "]"; } void init(final ConfigImpl config) { this.config = config; } @Override public Set<InjectionPoint> getInjectionPoints() { return emptySet(); } @Override public Class<?> getBeanClass() { return beanClass; } @Override public boolean isNullable() { return false; } @Override public T create(final CreationalContext<T> context) { return (T) Proxy.newProxyInstance( Thread.currentThread().getContextClassLoader(), new Class<?>[] { beanClass }, new ConfigurationHandler(config, beanClass)); } @Override public void destroy(final T instance, final CreationalContext<T> context) { // no-op } @Override public Set<Type> getTypes() { return types; } @Override public Set<Annotation> getQualifiers() { return qualifiers; } @Override public Class<? extends Annotation> getScope() { return ApplicationScoped.class; } @Override public String getName() { return null; } @Override public Set<Class<? extends Annotation>> getStereotypes() { return emptySet(); } @Override public boolean isAlternative() { return false; } @Override public String getId() { return id; } }
2,291
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/cdi/ConfigExtension.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.cdi; import static java.util.stream.Collectors.toList; import org.apache.geronimo.config.ConfigImpl; import org.apache.geronimo.config.DefaultConfigBuilder; import org.apache.geronimo.config.cdi.configsource.Reloadable; import org.eclipse.microprofile.config.Config; import org.eclipse.microprofile.config.ConfigProvider; import org.eclipse.microprofile.config.inject.ConfigProperty; import org.eclipse.microprofile.config.spi.ConfigProviderResolver; import javax.enterprise.event.Observes; import javax.enterprise.inject.spi.AfterBeanDiscovery; import javax.enterprise.inject.spi.AfterDeploymentValidation; import javax.enterprise.inject.spi.BeanManager; import javax.enterprise.inject.spi.BeforeShutdown; import javax.enterprise.inject.spi.DeploymentException; import javax.enterprise.inject.spi.Extension; import javax.enterprise.inject.spi.InjectionPoint; import javax.enterprise.inject.spi.ProcessAnnotatedType; import javax.enterprise.inject.spi.ProcessInjectionPoint; import javax.inject.Provider; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; /** * @author <a href="mailto:struberg@yahoo.de">Mark Struberg</a> */ public class ConfigExtension implements Extension { private ConfigImpl config; private static final Predicate<InjectionPoint> NOT_PROVIDERS = ip -> (ip.getType() instanceof Class) || (ip.getType() instanceof ParameterizedType && ((ParameterizedType)ip.getType()).getRawType() != Provider.class); private static final Map<Type, Type> REPLACED_TYPES = new HashMap<>(); static { REPLACED_TYPES.put(double.class, Double.class); REPLACED_TYPES.put(int.class, Integer.class); REPLACED_TYPES.put(float.class, Float.class); REPLACED_TYPES.put(long.class, Long.class); REPLACED_TYPES.put(boolean.class, Boolean.class); REPLACED_TYPES.put(byte.class, Byte.class); REPLACED_TYPES.put(short.class, Short.class); REPLACED_TYPES.put(char.class, Character.class); } private Set<InjectionPoint> injectionPoints = new HashSet<>(); private Set<Class<?>> proxies = new HashSet<>(); private List<Class<?>> validProxies; private List<ProxyBean<?>> proxyBeans; private boolean hasConfigProxy; private ConfigBean configBean; public ConfigExtension() { final Config raw = ConfigProvider.getConfig(); // ensure to store the ref the whole lifecycle, java gc is aggressive now this.config = ConfigImpl.class.cast(ConfigImpl.class.isInstance(raw) ? raw : // custom overrided config, unlikely but possible in wrong setups new DefaultConfigBuilder() .forClassLoader(Thread.currentThread().getContextClassLoader()) .addDefaultSources() .addDiscoveredSources() .addDiscoveredConverters() .build()); } public void findProxies(@Observes ProcessAnnotatedType<?> pat) { final Class<?> javaClass = pat.getAnnotatedType().getJavaClass(); if (javaClass.isInterface() && Stream.of(javaClass.getMethods()).anyMatch(m -> m.isAnnotationPresent(ConfigProperty.class))) { proxies.add(javaClass); } } public void collectConfigProducer(@Observes ProcessInjectionPoint<?, ?> pip) { ConfigProperty configProperty = pip.getInjectionPoint().getAnnotated().getAnnotation(ConfigProperty.class); if (configProperty != null) { injectionPoints.add(pip.getInjectionPoint()); } } public void registerConfigProducer(@Observes AfterBeanDiscovery abd, BeanManager bm) { Set<Type> types = injectionPoints.stream() .filter(NOT_PROVIDERS) .map(ip -> REPLACED_TYPES.getOrDefault(ip.getType(), ip.getType())) .collect(Collectors.toSet()); Set<Type> providerTypes = injectionPoints.stream() .filter(NOT_PROVIDERS.negate()) .map(ip -> ((ParameterizedType)ip.getType()).getActualTypeArguments()[0]) .collect(Collectors.toSet()); types.addAll(providerTypes); types.stream() .peek(type -> { if (type == Config.class) { hasConfigProxy = true; } }) .map(type -> new ConfigInjectionBean(bm, type)) .forEach(abd::addBean); validProxies = proxies.stream() .filter(this::isValidProxy) .collect(toList()); if (validProxies.size() == proxies.size()) { proxyBeans = validProxies.stream() .map((Function<Class<?>, ? extends ProxyBean<?>>) ProxyBean::new) .collect(toList()); proxyBeans.forEach(abd::addBean); } // else there are errors if (!hasConfigProxy) { configBean = new ConfigBean(); abd.addBean(configBean); } } public void validate(@Observes AfterDeploymentValidation add) { List<String> deploymentProblems = new ArrayList<>(); StreamSupport.stream(config.getConfigSources().spliterator(), false) .filter(Reloadable.class::isInstance) .map(Reloadable.class::cast) .forEach(Reloadable::reload); if (!hasConfigProxy) { configBean.init(config); } proxyBeans.forEach(b -> b.init(config)); proxyBeans.clear(); for (InjectionPoint injectionPoint : injectionPoints) { Type type = injectionPoint.getType(); // replace native types with their Wrapper types type = REPLACED_TYPES.getOrDefault(type, type); ConfigProperty configProperty = injectionPoint.getAnnotated().getAnnotation(ConfigProperty.class); if (type instanceof Class) { // a direct injection of a ConfigProperty // that means a Converter must exist. String key = ConfigInjectionBean.getConfigKey(injectionPoint, configProperty); if ((isDefaultUnset(configProperty.defaultValue())) && !config.getOptionalValue(key, (Class) type).isPresent()) { deploymentProblems.add("No Config Value exists for " + key); } } } if (!deploymentProblems.isEmpty()) { add.addDeploymentProblem(new DeploymentException("Error while validating Configuration\n" + String.join("\n", deploymentProblems))); } if (validProxies.size() != proxies.size()) { proxies.stream() .filter(p -> !validProxies.contains(p)) .forEach(p -> add.addDeploymentProblem( new DeploymentException("Invalid proxy: " + p + ". All method should have @ConfigProperty."))); } proxies.clear(); } public void shutdown(@Observes BeforeShutdown bsd) { ConfigProviderResolver.instance().releaseConfig(config); } private boolean isValidProxy(final Class<?> api) { return Stream.of(api.getMethods()) .allMatch(m -> m.isAnnotationPresent(ConfigProperty.class) || Object.class == m.getDeclaringClass()); } static boolean isDefaultUnset(String defaultValue) { return ConfigProperty.UNCONFIGURED_VALUE.equals(defaultValue); } }
2,292
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/cdi/DefaultLiteral.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.cdi; import javax.enterprise.inject.Default; import javax.enterprise.util.AnnotationLiteral; class DefaultLiteral extends AnnotationLiteral<Default> implements Default { public static Default INSTANCE = new DefaultLiteral(); @Override public boolean equals(Object other) { return other instanceof Default; } @Override public int hashCode() { return 0; } @Override public String toString() { return "@Default"; } }
2,293
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/cdi/ConfigBean.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.cdi; import static java.util.Arrays.asList; import static java.util.Collections.emptySet; import java.io.Serializable; import java.lang.annotation.Annotation; import java.lang.reflect.Type; import java.util.HashSet; import java.util.Set; import javax.enterprise.context.ApplicationScoped; import javax.enterprise.context.spi.CreationalContext; import javax.enterprise.inject.spi.Bean; import javax.enterprise.inject.spi.InjectionPoint; import javax.enterprise.inject.spi.PassivationCapable; import org.eclipse.microprofile.config.Config; public class ConfigBean implements Bean<Config>, PassivationCapable { private final Set<Type> types; private final Set<Annotation> qualifiers; private final String id; private Config config; ConfigBean() { this.qualifiers = new HashSet<>(asList(DefaultLiteral.INSTANCE, AnyLiteral.INSTANCE)); this.types = new HashSet<>(asList(Config.class, Serializable.class)); this.id = ConfigBean.class.getName() + "[" + Config.class.getName() + "]"; } void init(final Config config) { this.config = config; } @Override public Set<InjectionPoint> getInjectionPoints() { return emptySet(); } @Override public Class<?> getBeanClass() { return Config.class; } @Override public boolean isNullable() { return false; } @Override public Config create(final CreationalContext<Config> context) { return config; } @Override public void destroy(final Config instance, final CreationalContext<Config> context) { // no-op } @Override public Set<Type> getTypes() { return types; } @Override public Set<Annotation> getQualifiers() { return qualifiers; } @Override public Class<? extends Annotation> getScope() { return ApplicationScoped.class; } @Override public String getName() { return null; } @Override public Set<Class<? extends Annotation>> getStereotypes() { return emptySet(); } @Override public boolean isAlternative() { return false; } @Override public String getId() { return id; } }
2,294
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/cdi/ConfigInjectionBean.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.geronimo.config.cdi; import static java.util.Optional.of; import static java.util.Optional.ofNullable; import java.lang.annotation.Annotation; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.NoSuchElementException; import java.util.Optional; import java.util.Set; import java.util.function.Supplier; import javax.enterprise.context.Dependent; import javax.enterprise.context.spi.CreationalContext; import javax.enterprise.inject.spi.Annotated; import javax.enterprise.inject.spi.AnnotatedMember; import javax.enterprise.inject.spi.AnnotatedType; import javax.enterprise.inject.spi.Bean; import javax.enterprise.inject.spi.BeanManager; import javax.enterprise.inject.spi.InjectionPoint; import javax.enterprise.inject.spi.PassivationCapable; import javax.enterprise.util.AnnotationLiteral; import javax.inject.Provider; import org.apache.geronimo.config.ConfigImpl; import org.apache.geronimo.config.ConfigValueImpl; import org.eclipse.microprofile.config.Config; import org.eclipse.microprofile.config.ConfigProvider; import org.eclipse.microprofile.config.inject.ConfigProperty; /** * @author <a href="mailto:struberg@yahoo.de">Mark Struberg</a> */ public class ConfigInjectionBean<T> implements Bean<T>, PassivationCapable { private final static Set<Annotation> QUALIFIERS = new HashSet<>(); static { QUALIFIERS.add(new ConfigPropertyLiteral()); } private final BeanManager bm; private final Class rawType; private final Set<Type> types; private final String id; /** * only access via {@link #getConfig(} */ private ConfigImpl _config; public ConfigInjectionBean(BeanManager bm, Type type) { this.bm = bm; types = new HashSet<>(); types.add(type); rawType = getRawType(type); this.id = "ConfigInjectionBean_" + types; } private Class getRawType(Type type) { if (type instanceof Class) { return (Class) type; } else if (type instanceof ParameterizedType) { ParameterizedType paramType = (ParameterizedType) type; return (Class) paramType.getRawType(); } throw new UnsupportedOperationException("No idea how to handle " + type); } @Override public Set<InjectionPoint> getInjectionPoints() { return Collections.emptySet(); } @Override public Class<?> getBeanClass() { return rawType; } @Override public boolean isNullable() { return false; } @Override public T create(CreationalContext<T> context) { final InjectionPoint ip = (InjectionPoint)bm.getInjectableReference(new ConfigInjectionPoint(this),context); if (ip == null) { throw new IllegalStateException("Could not retrieve InjectionPoint"); } final Annotated annotated = ip.getAnnotated(); final ConfigProperty configProperty = annotated.getAnnotation(ConfigProperty.class); final String key = getConfigKey(ip, configProperty); final String defaultValue = configProperty.defaultValue(); final boolean canBeNull = ConfigProperty.UNCONFIGURED_VALUE.equals(defaultValue); return toInstance( annotated.getBaseType(), key, canBeNull || ConfigExtension.isDefaultUnset(defaultValue) ? null : defaultValue, true, canBeNull); } private T toInstance(final Type baseType, final String key, final String defaultValue, final boolean skipProviderLevel, final boolean acceptNull) { if (baseType instanceof ParameterizedType) { ParameterizedType paramType = (ParameterizedType) baseType; Type rawType = paramType.getRawType(); if (paramType.getActualTypeArguments().length == 0) { throw new IllegalArgumentException("No argument to " + paramType); } Type arg = paramType.getActualTypeArguments()[0]; if (!Class.class.isInstance(arg)) { if (ParameterizedType.class.isInstance(arg)) { ParameterizedType nested = ParameterizedType.class.cast(arg); if (rawType == Optional.class) { return (T) ofNullable(toInstance(nested, key, defaultValue, false, true)); } if (rawType == Provider.class) { if (nested.getActualTypeArguments().length != 1) { throw new IllegalArgumentException("Invalid arguments for " + paramType); } return skipProviderLevel ? toInstance(nested, key, defaultValue, false, acceptNull) : (T) (Provider<?>) () -> toInstance(nested, key, defaultValue, false, true); } if (rawType == Supplier.class) { if (nested.getActualTypeArguments().length != 1) { throw new IllegalArgumentException("Invalid arguments for " + paramType); } return (T) (Supplier<?>) () -> toInstance(nested, key, defaultValue, false, true); } } throw new IllegalArgumentException("Unsupported multiple generics level: " + paramType); } Class clazzParam = (Class) arg; // handle Provider<T> if (rawType instanceof Class && rawType == Provider.class && paramType.getActualTypeArguments().length == 1) { return skipProviderLevel ? toInstance(clazzParam, key, defaultValue, false, acceptNull) : (T) (Provider<?>) () -> toInstance(clazzParam, key, defaultValue, false, true); } // handle Optional<T> if (rawType instanceof Class && rawType == Optional.class && paramType.getActualTypeArguments().length == 1) { return (T) getConfig().getOptionalValue(key, clazzParam); } if (rawType instanceof Class && rawType == Supplier.class && paramType.getActualTypeArguments().length == 1) { return (T) (Supplier<?>) () -> toInstance(clazzParam, key, defaultValue, false, true); } if (Set.class.equals(rawType)) { final List list = getList(key, clazzParam, defaultValue, acceptNull); return list == null ? null : (T) new HashSet(list); } if (List.class.equals(rawType)) { return (T) getList(key, clazzParam, defaultValue, acceptNull); } throw new IllegalStateException("unhandled ConfigProperty"); } Class clazz = (Class) baseType; return getConfigValue(key, defaultValue, clazz, acceptNull); } private List getList(final String key, final Class clazzParam, final String defaultValue, final boolean nullable) { final ConfigImpl config = getConfig(); ConfigValueImpl configValue = config .access(key) .as(clazzParam) .asList() .evaluateVariables(true); if (defaultValue != null) { configValue.withStringDefault(defaultValue); } else if (nullable) // list default is emptyList, reset it for nullable values { configValue.withDefault(null); } return (List) configValue.get(); } private T getConfigValue(final String key, final String defaultValue, final Class clazz, final boolean canBeNull) { final ConfigImpl config = getConfig(); final T value = (T) config .access(key) .as(clazz) .evaluateVariables(true) .withDefault(defaultValue == null ? null : config.convert(defaultValue, clazz)) .get(); if (value != null || canBeNull) { return value; } throw new NoSuchElementException("No configured value found for config key '" + key + "'"); } /** * Get the property key to use. * In case the {@link ConfigProperty#name()} is empty we will try to determine the key name from the InjectionPoint. */ static String getConfigKey(InjectionPoint ip, ConfigProperty configProperty) { String key = configProperty.name(); if (key.length() > 0) { return key; } if (ip.getAnnotated() instanceof AnnotatedMember) { AnnotatedMember member = (AnnotatedMember) ip.getAnnotated(); AnnotatedType declaringType = member.getDeclaringType(); if (declaringType != null) { return declaringType.getJavaClass().getCanonicalName() + "." + member.getJavaMember().getName(); } } throw new IllegalStateException("Could not find default name for @ConfigProperty InjectionPoint " + ip); } public ConfigImpl getConfig() { if (_config == null) { _config = (ConfigImpl) ConfigProvider.getConfig(); } return _config; } @Override public void destroy(T instance, CreationalContext<T> context) { } @Override public Set<Type> getTypes() { return types; } @Override public Set<Annotation> getQualifiers() { return QUALIFIERS; } @Override public Class<? extends Annotation> getScope() { return Dependent.class; } @Override public String getName() { return null; } @Override public Set<Class<? extends Annotation>> getStereotypes() { return Collections.emptySet(); } @Override public boolean isAlternative() { return false; } @Override public String getId() { return id; } private static class ConfigPropertyLiteral extends AnnotationLiteral<ConfigProperty> implements ConfigProperty { @Override public String name() { return ""; } @Override public String defaultValue() { return ""; } } }
2,295
0
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/cdi
Create_ds/geronimo-config/impl/src/main/java/org/apache/geronimo/config/cdi/configsource/Reloadable.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.geronimo.config.cdi.configsource; /** * Enable config source to be reloaded in AfterDeploymentValidation * phase. Particularly useful for sources relying on a state which can * be mutated in previous phases like system properties but which * desire to stay immutable at runtime for perf/lock reasons. */ public interface Reloadable { void reload(); }
2,296
0
Create_ds/datasketches-java/src/test/java/org/apache/datasketches
Create_ds/datasketches-java/src/test/java/org/apache/datasketches/hll/UnionTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.datasketches.hll; import static java.lang.Math.min; import static org.apache.datasketches.hll.TgtHllType.HLL_4; import static org.apache.datasketches.hll.TgtHllType.HLL_6; import static org.apache.datasketches.hll.TgtHllType.HLL_8; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; import static org.testng.Assert.fail; import org.apache.datasketches.common.SketchesArgumentException; import org.apache.datasketches.memory.Memory; import org.apache.datasketches.memory.WritableMemory; import org.testng.annotations.Test; /** * @author Lee Rhodes */ public class UnionTest { static final String LS = System.getProperty("line.separator"); static final int[] nArr = new int[] {1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000}; // n1,... lgK,... tgtHll, Mode Ooo Est static final String hdrFmt = "%6s%6s%6s" + "%8s%5s%5s%5s" + "%7s%6s" + "%7s%6s%6s" +"%3s%2s%2s"+ "%13s%12s"; static final String hdr = String.format(hdrFmt, "n1", "n2", "tot", "lgMaxK", "lgK1", "lgK2", "lgKR", "tgt1", "tgt2", "Mode1", "Mode2", "ModeR", "1", "2", "R", "Est", "Err%"); /** * The task here is to check the transition boundaries as the sketch morphs between LIST to * SET to HLL modes. The transition points vary as a function of lgConfigK. In addition, * this checks that the union operation is operating properly based on the order the * sketches are presented to the union. */ @Test public void checkUnions() { //HLL_4: t=0, HLL_6: t=1, HLL_8: t=2 int t1 = 2; //type = HLL_8 int t2 = 2; int rt = 2; //result type println("TgtR: " + TgtHllType.values()[rt].toString()); int lgK1 = 7; int lgK2 = 7; int lgMaxK = 7; int n1 = 7; int n2 = 7; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 = 8; n2 = 7; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 = 7; n2 = 8; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 = 8; n2 = 8; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 = 7; n2 = 14; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); println("++END BASE GROUP++"); int i = 0; for (i = 7; i <= 13; i++) { lgK1 = i; lgK2 = i; lgMaxK = i; { n1 = ((1 << (i - 3)) * 3)/4; //compute the transition point n2 = n1; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 += 2; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 -= 2; n2 += 2; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 += 2; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); } println("--END MINOR GROUP--"); lgK1 = i; lgK2 = i + 1; lgMaxK = i; { n1 = ((1 << (i - 3)) * 3)/4; n2 = n1; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 += 2; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 -= 2; n2 += 2; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 += 2; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); } println("--END MINOR GROUP--"); lgK1 = i + 1; lgK2 = i; lgMaxK = i; { n1 = ((1 << (i - 3)) * 3)/4; n2 = n1; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 += 2; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 -= 2; n2 += 2; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 += 2; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); } println("--END MINOR GROUP--"); lgK1 = i + 1; lgK2 = i + 1; lgMaxK = i; { n1 = ((1 << (i - 3)) * 3)/4; n2 = n1; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 += 2; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 -= 2; n2 += 2; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); n1 += 2; basicUnion(n1, n2, lgK1, lgK2, lgMaxK, t1, t2, rt); } println("++END MAJOR GROUP++"); } //End for } @Test public void check() { //n1=8, n2=7, lgK1=lgK2=lgMaxK=7, all HLL_8 basicUnion(8, 7, 7, 7, 7, 2, 2, 2); } private static void basicUnion(int n1, int n2, int lgK1, int lgK2, int lgMaxK, int t1, int t2, int rt) { long v = 0; int tot = n1 + n2; TgtHllType type1 = TgtHllType.values()[t1]; String t1str = type1.toString(); TgtHllType type2 = TgtHllType.values()[t2]; String t2str = type2.toString(); TgtHllType resultType = TgtHllType.values()[rt]; //String rtStr = resultType.toString(); HllSketch h1 = new HllSketch(lgK1, type1); HllSketch h2 = new HllSketch(lgK2, type2); int lgControlK = min(min(lgK1, lgK2), lgMaxK); //min of all 3 HllSketch control = new HllSketch(lgControlK, resultType); String dataFmt = "%6d%6d%6d," + "%7d%5d%5d%5d," + "%6s%6s," + "%6s%6s%6s," +"%2s%2s%2s," + "%12f%12f%%"; for (long i = 0; i < n1; i++) { h1.update(v + i); control.update(v + i); } v += n1; for (long i = 0; i < n2; i++) { h2.update(v + i); control.update(v + i); } v += n2; String h1SketchStr = ("H1 SKETCH: \n" + h1.toString()); String h2SketchStr = ("H2 SKETCH: \n" + h2.toString()); Union union = newUnion(lgMaxK); union.update(h1); String uH1SketchStr = ("Union after H1: \n" + union.getResult(resultType).toString()); //println(uH1SketchStr); union.update(h2); HllSketch result = union.getResult(resultType); int lgKR = result.getLgConfigK(); String uSketchStr =("Union after H2: \n" + result.toString()); double uEst = result.getEstimate(); double uUb = result.getUpperBound(2); double uLb = result.getLowerBound(2); double rerr = ((uEst/tot) - 1.0) * 100; String mode1 = h1.getCurMode().toString(); String mode2 = h2.getCurMode().toString(); String modeR = result.getCurMode().toString(); //Control String cSketchStr = ("CONTROL SKETCH: \n" + control.toString()); double controlEst = control.getEstimate(); double controlUb = control.getUpperBound(2); double controlLb = control.getLowerBound(2); String h1ooo = h1.isOutOfOrder() ? "T" : "F"; String h2ooo = h2.isOutOfOrder() ? "T" : "F"; String resultooo = result.isOutOfOrder() ? "T" : "F"; String row = String.format(dataFmt, n1, n2, tot, lgMaxK, lgK1, lgK2, lgKR, t1str, t2str, mode1, mode2, modeR, h1ooo, h2ooo, resultooo, uEst, rerr); println(h1SketchStr); println(h2SketchStr); println(uH1SketchStr); println(uSketchStr); println(cSketchStr); println(hdr); println(row); assertTrue((controlUb - controlEst) >= 0); assertTrue((uUb - uEst) >= 0); assertTrue((controlEst - controlLb) >= 0); assertTrue((uEst -uLb) >= 0); } @Test public void checkToFromUnion1() { for (int i = 0; i < 10; i++) { int n = nArr[i]; for (int lgK = 4; lgK <= 13; lgK++) { toFrom1(lgK, HLL_4, n); toFrom1(lgK, HLL_6, n); toFrom1(lgK, HLL_8, n); } println("======="); } } private static void toFrom1(int lgK, TgtHllType tgtHllType, int n) { Union srcU = newUnion(lgK); HllSketch srcSk = new HllSketch(lgK, tgtHllType); for (int i = 0; i < n; i++) { srcSk.update(i); } println("n: " + n + ", lgK: " + lgK + ", type: " + tgtHllType); //printSketch(src, "SRC"); srcU.update(srcSk); byte[] byteArr = srcU.toCompactByteArray(); Memory mem = Memory.wrap(byteArr); Union dstU = Union.heapify(mem); assertFalse(dstU.isSameResource(mem)); assertEquals(dstU.getEstimate(), srcU.getEstimate(), 0.0); } @Test public void checkToFromUnion2() { for (int i = 0; i < 10; i++) { int n = nArr[i]; for (int lgK = 4; lgK <= 13; lgK++) { toFrom2(lgK, HLL_4, n); toFrom2(lgK, HLL_6, n); toFrom2(lgK, HLL_8, n); } println("======="); } } private static void toFrom2(int lgK, TgtHllType tgtHllType, int n) { Union srcU = newUnion(lgK); HllSketch srcSk = new HllSketch(lgK, tgtHllType); for (int i = 0; i < n; i++) { srcSk.update(i); } println("n: " + n + ", lgK: " + lgK + ", type: " + tgtHllType); //printSketch(src, "SRC"); srcU.update(srcSk); byte[] byteArr = srcU.toCompactByteArray(); Union dstU = Union.heapify(byteArr); assertEquals(dstU.getEstimate(), srcU.getEstimate(), 0.0); } @Test public void checkCompositeEst() { Union u = new Union(); assertEquals(u.getCompositeEstimate(), 0, .03); for (int i = 1; i <= 15; i++) { u.update(i); } assertEquals(u.getCompositeEstimate(), 15, 15 *.03); for (int i = 15; i <= 1000; i++) { u.update(i); } assertEquals(u.getCompositeEstimate(), 1000, 1000 * .03); } @SuppressWarnings("unused") @Test public void checkMisc() { try { Union u = newUnion(HllUtil.MIN_LOG_K - 1); fail(); } catch (SketchesArgumentException e) { //expected } try { Union u = newUnion(HllUtil.MAX_LOG_K + 1); fail(); } catch (SketchesArgumentException e) { //expected } Union u = newUnion(7); HllSketch sk = u.getResult(); assertTrue(sk.isEmpty()); } @Test public void checkHeapify() { Union u = newUnion(16); for (int i = 0; i < (1 << 20); i++) { u.update(i); } double est1 = u.getEstimate(); byte[] byteArray = u.toUpdatableByteArray(); Union u2 = Union.heapify(byteArray); assertEquals(u2.getEstimate(), est1, 0.0); } @Test //for lgK <= 12 public void checkUbLb() { int lgK = 4; int n = 1 << 20; boolean oooFlag = false; println("LgK="+lgK+", UB3, " + ((getBound(lgK, true, oooFlag, 3, n) / n) - 1)); println("LgK="+lgK+", UB2, " + ((getBound(lgK, true, oooFlag, 2, n) / n) - 1)); println("LgK="+lgK+", UB1, " + ((getBound(lgK, true, oooFlag, 1, n) / n) - 1)); println("LgK="+lgK+", LB1, " + ((getBound(lgK, false, oooFlag, 1, n) / n) - 1)); println("LgK="+lgK+", LB2, " + ((getBound(lgK, false, oooFlag, 2, n) / n) - 1)); println("LgK="+lgK+", LB3, " + ((getBound(lgK, false, oooFlag, 3, n) / n) - 1)); } @Test public void checkEmptyCouponMisc() { int lgK = 8; Union union = newUnion(lgK); for (int i = 0; i < 20; i++) { union.update(i); } //SET mode union.couponUpdate(0); assertEquals(union.getEstimate(), 20.0, 0.001); assertEquals(union.getTgtHllType(), TgtHllType.HLL_8); assertFalse(union.isMemory()); assertFalse(union.isOffHeap()); int bytes = union.getUpdatableSerializationBytes(); assertTrue(bytes <= Union.getMaxSerializationBytes(lgK)); assertFalse(union.isCompact()); } @Test public void checkUnionWithWrap() { int lgConfigK = 4; TgtHllType type = TgtHllType.HLL_4; int n = 2; HllSketch sk = new HllSketch(lgConfigK, type); for (int i = 0; i < n; i++) { sk.update(i); } double est = sk.getEstimate(); byte[] skByteArr = sk.toCompactByteArray(); HllSketch sk2 = HllSketch.wrap(Memory.wrap(skByteArr)); assertEquals(sk2.getEstimate(), est, 0.0); Union union = newUnion(lgConfigK); union.update(HllSketch.wrap(Memory.wrap(skByteArr))); assertEquals(union.getEstimate(), est, 0.0); } @Test public void checkUnionWithWrap2() { int lgConfigK = 10; int n = 128; HllSketch sk1 = new HllSketch(lgConfigK); for (int i = 0; i < n; i++) { sk1.update(i); } double est1 = sk1.getEstimate(); byte[] byteArr1 = sk1.toCompactByteArray(); Union union = newUnion(lgConfigK); union.update(HllSketch.wrap(Memory.wrap(byteArr1))); double est2 = union.getEstimate(); assertEquals(est2, est1); } @Test public void checkConversions() { int lgK = 4; HllSketch sk1 = new HllSketch(lgK, TgtHllType.HLL_8); HllSketch sk2 = new HllSketch(lgK, TgtHllType.HLL_8); int u = 1 << 20; for (int i = 0; i < u; i++) { sk1.update(i); sk2.update(i + u); } Union union = new Union(lgK); union.update(sk1); union.update(sk2); HllSketch rsk1 = union.getResult(TgtHllType.HLL_8); HllSketch rsk2 = union.getResult(TgtHllType.HLL_6); HllSketch rsk3 = union.getResult(TgtHllType.HLL_4); double est1 = rsk1.getEstimate(); double est2 = rsk2.getEstimate(); double est3 = rsk3.getEstimate(); //println("Est1: " + est1); //println("Est2: " + est2); //println("Est3: " + est3); //println("Result HLL8: " + rsk1.toString(true, true, true, false)); //println("Result HLL4: " + rsk3.toString(true, true, true, false)); assertEquals(est2, est1, 0.0); assertEquals(est3, est1, 0.0); } @Test public void checkUnionHeapifyRebuildAfterMerge() { int lgK = 12; //Build 2 sketches in HLL (dense) mode. int u = 1 << (lgK - 3); //(lgK < 8) ? 16 : 1 << (lgK - 3) //allows changing lgK above HllSketch sk1 = new HllSketch(lgK); HllSketch sk2 = new HllSketch(lgK); for (int i = 0; i < u; i++) { sk1.update(i); sk2.update(i + u); } final int bytes = Union.getMaxSerializationBytes(lgK); WritableMemory wmem = WritableMemory.allocate(bytes); Union union1 = new Union(lgK, wmem); //Create original union off-heap union1.update(sk1); union1.update(sk2); //oooFlag = Rebuild_KxQ = TRUE boolean rebuild = PreambleUtil.extractRebuildCurMinNumKxQFlag(wmem); double hipAccum = PreambleUtil.extractHipAccum(wmem); assertTrue(rebuild); assertTrue(hipAccum == 0.0); //Heapify byteArr as if it were a sketch, but it is actually a union! HllSketch sk3 = HllSketch.heapify(wmem); //rebuilds sk3 rebuild = sk3.hllSketchImpl.isRebuildCurMinNumKxQFlag(); assertFalse(rebuild); } @Test //similar to above except uses wrap instead of heapify public void druidUseCase() { final int lgK = 12; final int bytes = Union.getMaxSerializationBytes(lgK); WritableMemory wmem = WritableMemory.allocate(bytes); new Union(lgK, wmem); // result is unused, relying on side effect int trueCount = 0; int delta = 1 << (lgK - 3); //(lgK < 8) ? 16 : 1 << (lgK - 3) //allows changing lgK above for (int i = 0; i < 3; i++) { Union.writableWrap(wmem).update(buildSketch(trueCount, delta)); trueCount += delta; } boolean rebuild = PreambleUtil.extractRebuildCurMinNumKxQFlag(wmem); double hipAccum = PreambleUtil.extractHipAccum(wmem); assertTrue(rebuild); assertTrue(hipAccum == 0.0); HllSketch result = Union.writableWrap(wmem).getResult(); //rebuilds result rebuild = result.hllSketchImpl.isRebuildCurMinNumKxQFlag(); assertFalse(rebuild); double est = result.getEstimate(); double err = (est / trueCount) - 1.0; double rse3 = (3 * 1.04)/Math.sqrt(1 << lgK); println(err + " < " + rse3); assertTrue(err < rse3); } private static HllSketch buildSketch(final int start, final int count) { HllSketch sketch = new HllSketch(10); for (int i = start; i < (start + count); i++) { sketch.update(i); } return sketch; } private static Union newUnion(int lgK) { return new Union(lgK); } private static double getBound(int lgK, boolean ub, boolean oooFlag, int numStdDev, double est) { double re = RelativeErrorTables.getRelErr(ub, oooFlag, lgK, numStdDev); return est / (1.0 + re); } @Test public void printlnTest() { println("PRINTING: "+this.getClass().getName()); } /** * @param s value to print */ static void println(Object s) { print(s.toString() + LS); } /** * @param s value to print */ static void print(Object s) { //System.out.print(s.toString()); //disable here } }
2,297
0
Create_ds/datasketches-java/src/test/java/org/apache/datasketches
Create_ds/datasketches-java/src/test/java/org/apache/datasketches/hll/UnionCaseTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.datasketches.hll; import static org.apache.datasketches.hll.CurMode.LIST; import static org.apache.datasketches.hll.CurMode.SET; import static org.apache.datasketches.hll.HllUtil.HLL_HIP_RSE_FACTOR; import static org.apache.datasketches.hll.HllUtil.HLL_NON_HIP_RSE_FACTOR; import static org.apache.datasketches.hll.TgtHllType.HLL_4; import static org.apache.datasketches.hll.TgtHllType.HLL_6; import static org.apache.datasketches.hll.TgtHllType.HLL_8; import static org.testng.Assert.assertEquals; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertTrue; import org.apache.datasketches.common.SketchesStateException; import org.apache.datasketches.memory.WritableMemory; import org.testng.annotations.Test; /** * @author Lee Rhodes */ public class UnionCaseTest { private static final String LS = System.getProperty("line.separator"); long v = 0; final static int maxLgK = 12; HllSketch source; //Union union; String hfmt = "%10s%10s%10s%10s%10s%10s%10s%10s%10s%10s%10s" + LS; String hdr = String.format(hfmt, "caseNum","srcLgKStr","gdtLgKStr","srcType","gdtType", "srcMem","gdtMem","srcMode","gdtMode","srcOoof","gdtOoof"); @Test public void checkAllCases() { print(hdr); for (int i = 0; i < 24; i++) { checkCase(i, HLL_4, false); } println(""); print(hdr); for (int i = 0; i < 24; i++) { checkCase(i, HLL_6, false); } println(""); print(hdr); for (int i = 0; i < 24; i++) { checkCase(i, HLL_8, false); } println(""); print(hdr); for (int i = 0; i < 24; i++) { checkCase(i, HLL_4, true); } println(""); print(hdr); for (int i = 0; i < 24; i++) { checkCase(i, HLL_6, true); } println(""); print(hdr); for (int i = 0; i < 24; i++) { checkCase(i, HLL_8, true); } println(""); } private void checkCase(int caseNum, TgtHllType srcType, boolean srcMem) { source = getSource(caseNum, srcType, srcMem); boolean gdtMem = (caseNum & 1) > 0; Union union = getUnion(caseNum, gdtMem); union.update(source); int totalU = getSrcCount(caseNum, maxLgK) + getUnionCount(caseNum); output(caseNum, source, union, totalU); } private void output(int caseNum, HllSketch source, Union union, int totalU) { double estU = union.getEstimate(); double err = Math.abs((estU / totalU) - 1.0); int gdtLgK = union.getLgConfigK(); boolean uooof = union.isOutOfOrder(); double rseFactor = (uooof) ? HLL_NON_HIP_RSE_FACTOR : HLL_HIP_RSE_FACTOR; double rse = (rseFactor * 3) / Math.sqrt(1 << gdtLgK); //99.7% conf //output other parameters String caseNumStr = Integer.toString(caseNum); String srcLgKStr = Integer.toString(source.getLgConfigK()); String gdtLgKStr = Integer.toString(union.getLgConfigK()); String srcType = source.getTgtHllType().toString(); String gdtType = union.getTgtHllType().toString(); String srcMem = Boolean.toString(source.isMemory()); String gdtMem = Boolean.toString(union.isMemory()); String srcMode = source.getCurMode().toString(); String gdtMode = union.getCurMode().toString(); String srcOoof = Boolean.toString(source.isOutOfOrder()); String gdtOoof = Boolean.toString(union.isOutOfOrder()); printf(hfmt, caseNumStr, srcLgKStr, gdtLgKStr, srcType, gdtType, srcMem, gdtMem, srcMode, gdtMode, srcOoof, gdtOoof); assertTrue(err < rse, "Err: " + err + ", RSE: " + rse); } private HllSketch getSource(int caseNum, TgtHllType tgtHllType, boolean memory) { int srcLgK = getSrcLgK(caseNum, maxLgK); int srcU = getSrcCount(caseNum, maxLgK); if (memory) { return buildMemorySketch(srcLgK, tgtHllType, srcU); } else { return buildHeapSketch(srcLgK, tgtHllType, srcU); } } private Union getUnion(int caseNum, boolean memory) { int unionU = getUnionCount(caseNum); return (memory) ? buildMemoryUnion(maxLgK, unionU) : buildHeapUnion(maxLgK, unionU); } private static int getUnionCount(int caseNum) { int gdtMode = (caseNum >> 1) & 3; //list, set, hll, empty return (gdtMode == 0) ? 4 : (gdtMode == 1) ? 380 : (gdtMode == 2) ? 400 : 0; } private static int getSrcCount(int caseNum, int maxLgK) { int srcLgK = getSrcLgK(caseNum, maxLgK); return (((1 << srcLgK) * 3) / 4) + 100; //always HLL } private static int getSrcLgK(int caseNum, int maxLgK) { int srcLgK = maxLgK; int bits34 = (caseNum >> 3) & 3; if (bits34 == 1) { srcLgK = maxLgK - 1;} if (bits34 == 2) { srcLgK = maxLgK + 1;} return srcLgK; } @Test public void checkMisc() { Union u = buildHeapUnion(12, 0); int bytes = u.getCompactSerializationBytes(); assertEquals(bytes, 8); bytes = Union.getMaxSerializationBytes(7); assertEquals(bytes, 40 + 128); double v = u.getEstimate(); assertEquals(v, 0.0, 0.0); v = u.getLowerBound(1); assertEquals(v, 0.0, 0.0); v = u.getUpperBound(1); assertEquals(v, 0.0, 0.0); assertTrue(u.isEmpty()); u.reset(); assertTrue(u.isEmpty()); println(u.toString(true, false, false, false)); byte[] bArr = u.toCompactByteArray(); assertEquals(bArr.length, 8); } @Test public void checkSrcListList() { //src: LIST, gadget: LIST int n1 = 2; int n2 = 3; int n3 = 2; int sum = n1 + n2 + n3; Union u = buildHeapUnion(12, n1); //gdt = list HllSketch h2 = buildHeapSketch(11, HLL_6, n2); //src = list HllSketch h3 = buildHeapSketch(10, HLL_8, n3); //src = list u.update(h2); println(u.toString()); assertEquals(u.getCurMode(), LIST); u.update(h3); println(u.toString()); assertEquals(u.getCurMode(), LIST); assertEquals(u.getLgConfigK(), 12); assertFalse(u.isOutOfOrder()); double err = sum * errorFactor(u.getLgConfigK(), u.isOutOfOrder(), 3.0); println("ErrToll: " + err); assertEquals(u.getEstimate(), sum, err); } @Test public void checkSrcListSet() { //src: SET, gadget: LIST int n1 = 5; int n2 = 2; int n3 = 16; int sum = n1 + n2 + n3; Union u = buildHeapUnion(12, n1); //LIST, 5 HllSketch h2 = buildHeapSketch(11, HLL_6, n2); //LIST, 2 HllSketch h3 = buildHeapSketch(10, HLL_8, n3); //SET, 16 u.update(h2); println(u.toString()); assertEquals(u.getCurMode(), LIST); u.update(h3); println(u.toString()); assertEquals(u.getCurMode(), SET); assertEquals(u.getLgConfigK(), 12); assertFalse(u.isOutOfOrder()); double err = sum * errorFactor(u.getLgConfigK(), u.isOutOfOrder(), 3.0); println("ErrToll: " + err); assertEquals(u.getEstimate(), sum, err); } @Test public void checkSrcSetList() { //src: LIST, gadget: SET int n1 = 6; int n2 = 10; int n3 = 6; int sum = n1 + n2 + n3; Union u = buildHeapUnion(12, n1); HllSketch h2 = buildHeapSketch(11, HLL_6, n2); //SET HllSketch h3 = buildHeapSketch(10, HLL_8, n3); //LIST u.update(h2); println(u.toString()); assertEquals(u.getCurMode(), SET); u.update(h3); println(u.toString()); assertEquals(u.getCurMode(), SET); assertEquals(u.getLgConfigK(), 12); assertFalse(u.isOutOfOrder()); double err = sum * errorFactor(u.getLgConfigK(), u.isOutOfOrder(), 3.0); println("ErrToll: " + err); assertEquals(u.getEstimate(), sum, err); } @Test public void checkSrcSetSet() { //src: SET, gadget: SET int n1 = 6; int n2 = 10; int n3 = 16; int sum = n1 + n2 + n3; Union u = buildHeapUnion(12, n1); HllSketch h2 = buildHeapSketch(11, HLL_6, n2); //src: SET HllSketch h3 = buildHeapSketch(10, HLL_8, n3); //src: SET u.update(h2); println(u.toString()); assertEquals(u.getCurMode(), SET); u.update(h3); println(u.toString()); assertEquals(u.getCurMode(), SET); assertEquals(u.getLgConfigK(), 12); assertFalse(u.isOutOfOrder()); double err = sum * errorFactor(u.getLgConfigK(), u.isOutOfOrder(), 3.0); println("ErrToll: " + err); assertEquals(u.getEstimate(), sum, err); } @Test public void checkSrcEmptyList() { //src: LIST, gadget: empty int n1 = 0; int n2 = 0; int n3 = 7; int sum = n1 + n2 + n3; Union u = buildHeapUnion(12, n1); //LIST empty HllSketch h2 = buildHeapSketch(11, HLL_6, n2); //src: LIST empty, ignored HllSketch h3 = buildHeapSketch(10, HLL_8, n3); //src: LIST u.update(h2); println(u.toString()); assertEquals(u.getCurMode(), LIST); u.update(h3); println(u.toString()); assertEquals(u.getCurMode(), LIST); assertEquals(u.getLgConfigK(), 12); assertFalse(u.isOutOfOrder()); double err = sum * errorFactor(u.getLgConfigK(), u.isOutOfOrder(), 3.0); println("ErrToll: " + err); assertEquals(u.getEstimate(), sum, err); } @Test public void checkSrcEmptySet() { int n1 = 0; int n2 = 0; int n3 = 16; int sum = n1 + n2 + n3; Union u = buildHeapUnion(12, n1); //LIST empty HllSketch h2 = buildHeapSketch(11, HLL_6, n2); //LIST empty, ignored HllSketch h3 = buildHeapSketch(10, HLL_8, n3); // Src Set u.update(h2); println(u.toString()); assertEquals(u.getCurMode(), LIST); u.update(h3); println(u.toString()); assertEquals(u.getCurMode(), SET); assertEquals(u.getLgConfigK(), 12); assertFalse(u.isOutOfOrder()); double err = sum * errorFactor(u.getLgConfigK(), u.isOutOfOrder(), 3.0); println("ErrToll: " + err); assertEquals(u.getEstimate(), sum, err); } @SuppressWarnings("unused") @Test public void checkSpecialMergeCase4() { Union u = buildHeapUnion(12, 1 << 9); HllSketch sk = buildHeapSketch(12, HLL_8, 1 << 9); u.update(sk); assertTrue(u.isRebuildCurMinNumKxQFlag()); u.getCompositeEstimate(); assertFalse(u.isRebuildCurMinNumKxQFlag()); u.update(sk); assertTrue(u.isRebuildCurMinNumKxQFlag()); u.getLowerBound(2); assertFalse(u.isRebuildCurMinNumKxQFlag()); u.update(sk); assertTrue(u.isRebuildCurMinNumKxQFlag()); u.getUpperBound(2); assertFalse(u.isRebuildCurMinNumKxQFlag()); u.update(sk); assertTrue(u.isRebuildCurMinNumKxQFlag()); u.getResult(); assertFalse(u.isRebuildCurMinNumKxQFlag()); u.update(sk); assertTrue(u.isRebuildCurMinNumKxQFlag()); byte[] ba = u.toCompactByteArray(); assertFalse(u.isRebuildCurMinNumKxQFlag()); u.update(sk); assertTrue(u.isRebuildCurMinNumKxQFlag()); ba = u.toUpdatableByteArray(); assertFalse(u.isRebuildCurMinNumKxQFlag()); u.putRebuildCurMinNumKxQFlag(true); assertTrue(u.isRebuildCurMinNumKxQFlag()); u.putRebuildCurMinNumKxQFlag(false); assertFalse(u.isRebuildCurMinNumKxQFlag()); } @Test public void checkRebuildCurMinNumKxQFlag1() { HllSketch sk = buildHeapSketch(4, HLL_8, 16); HllArray hllArr = (HllArray)(sk.hllSketchImpl); hllArr.putRebuildCurMinNumKxQFlag(true); //corrupt the flag Union union = buildHeapUnion(4, 0); union.update(sk); } @Test public void checkRebuildCurMinNumKxQFlag2() { HllSketch sk = buildMemorySketch(4, HLL_8, 16); DirectHllArray hllArr = (DirectHllArray)(sk.hllSketchImpl); hllArr.putRebuildCurMinNumKxQFlag(true); //corrupt the flag WritableMemory wmem = sk.getWritableMemory(); Union.writableWrap(wmem); } @Test(expectedExceptions = SketchesStateException.class) public void checkHllMergeToException() { HllSketch src = buildHeapSketch(4, HLL_8, 16); HllSketch tgt = buildHeapSketch(4, HLL_8, 16); AbstractHllArray absHllArr = (AbstractHllArray)(src.hllSketchImpl); absHllArr.mergeTo(tgt); } private static double errorFactor(int lgK, boolean oooFlag, double numStdDev) { double f; if (oooFlag) { f = (1.04 * numStdDev) / Math.sqrt(1 << lgK); } else { f = (0.9 * numStdDev) / Math.sqrt(1 << lgK); } return f; } //BUILDERS private Union buildHeapUnion(int lgMaxK, int n) { Union u = new Union(lgMaxK); for (int i = 0; i < n; i++) { u.update(i + v); } v += n; return u; } private Union buildMemoryUnion(int lgMaxK, int n) { final int bytes = HllSketch.getMaxUpdatableSerializationBytes(lgMaxK, TgtHllType.HLL_8); WritableMemory wmem = WritableMemory.allocate(bytes); Union u = new Union(lgMaxK, wmem); for (int i = 0; i < n; i++) { u.update(i + v); } v += n; return u; } private HllSketch buildHeapSketch(int lgK, TgtHllType tgtHllType, int n) { HllSketch sk = new HllSketch(lgK, tgtHllType); for (int i = 0; i < n; i++) { sk.update(i + v); } v += n; return sk; } private HllSketch buildMemorySketch(int lgK, TgtHllType tgtHllType, int n) { final int bytes = HllSketch.getMaxUpdatableSerializationBytes(lgK,tgtHllType); WritableMemory wmem = WritableMemory.allocate(bytes); HllSketch sk = new HllSketch(lgK, tgtHllType, wmem); for (int i = 0; i < n; i++) { sk.update(i + v); } v += n; return sk; } @Test public void printlnTest() { println("PRINTING: "+this.getClass().getName()); } /** * @param o value to print */ static void println(Object o) { print(o.toString() + "\n"); } /** * @param o value to print */ static void print(Object o) { //System.out.print(o.toString()); //disable here } /** * @param fmt format * @param args arguments */ static void printf(String fmt, Object...args) { //System.out.printf(fmt, args); //disable here } }
2,298
0
Create_ds/datasketches-java/src/test/java/org/apache/datasketches
Create_ds/datasketches-java/src/test/java/org/apache/datasketches/hll/CrossCountingTest.java
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.datasketches.hll; import static org.apache.datasketches.hll.TgtHllType.HLL_4; import static org.apache.datasketches.hll.TgtHllType.HLL_6; import static org.apache.datasketches.hll.TgtHllType.HLL_8; import static org.testng.Assert.assertEquals; import org.testng.annotations.Test; /** * @author Lee Rhodes */ @SuppressWarnings("unused") public class CrossCountingTest { static final String LS = System.getProperty("line.separator"); @Test public void crossCountingChecks() { crossCountingCheck(4, 100); crossCountingCheck(4, 10000); crossCountingCheck(12, 7); crossCountingCheck(12, 384); crossCountingCheck(12, 10000); } void crossCountingCheck(int lgK, int n) { HllSketch sk4 = buildSketch(n, lgK, HLL_4); int s4csum = computeChecksum(sk4); //println(sk4.toString(true, true, true, true)); int csum; HllSketch sk6 = buildSketch(n, lgK, HLL_6); csum = computeChecksum(sk6); assertEquals(csum, s4csum); //println(sk6.toString(true, true, true, true)); HllSketch sk8 = buildSketch(n, lgK, HLL_8); csum = computeChecksum(sk8); assertEquals(csum, s4csum); //println(sk8.toString(true, true, true, true)); //Conversions // println("\nConverted HLL_6 to HLL_4:"); HllSketch sk6to4 = sk6.copyAs(HLL_4); csum = computeChecksum(sk6to4); assertEquals(csum, s4csum); // println(sk6to4.toString(true, true, true, true)); // println("\nConverted HLL_8 to HLL_4:"); HllSketch sk8to4 = sk8.copyAs(HLL_4); csum = computeChecksum(sk8to4); assertEquals(csum, s4csum); // println(sk8to4.toString(true, true, true, true)); // println("\nConverted HLL_4 to HLL_6:"); HllSketch sk4to6 = sk4.copyAs(HLL_6); csum = computeChecksum(sk4to6); //println(sk4to6.toString(true, true, true, true)); assertEquals(csum, s4csum); // println("\nConverted HLL_8 to HLL_6:"); HllSketch sk8to6 = sk8.copyAs(HLL_6); csum = computeChecksum(sk8to6); assertEquals(csum, s4csum); // println(sk8to6.toString(true, true, true, true)); // println("\nConverted HLL_4 to HLL_8:"); HllSketch sk4to8 = sk4.copyAs(HLL_8); csum = computeChecksum(sk4to8); assertEquals(csum, s4csum); // println(sk4to8.toString(true, true, true, true)); // println("\nConverted HLL_6 to HLL_8:"); HllSketch sk6to8 = sk6.copyAs(HLL_8); csum = computeChecksum(sk6to8); assertEquals(csum, s4csum); // println(sk6to8.toString(true, true, true, true)); } private static HllSketch buildSketch(int n, int lgK, TgtHllType tgtHllType) { HllSketch sketch = new HllSketch(lgK, tgtHllType); for (int i = 0; i < n; i++) { sketch.update(i); } return sketch; } private static int computeChecksum(HllSketch sketch) { PairIterator itr = sketch.iterator(); int checksum = 0; int key = 0; while (itr.nextAll()) { checksum += itr.getPair(); key = itr.getKey(); //dummy } return checksum; } @Test public void printlnTest() { println("PRINTING: "+this.getClass().getName()); } /** * @param s value to print */ static void println(String s) { print(s + LS); } /** * @param s value to print */ static void print(String s) { //System.out.print(s); //disable here } }
2,299