index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/SchemaParser.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.collect.Lists;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.JsonUtil;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.ExecutionException;
public class SchemaParser {
private static final String TYPE = "type";
private static final String STRUCT = "struct";
private static final String LIST = "list";
private static final String MAP = "map";
private static final String FIELDS = "fields";
private static final String ELEMENT = "element";
private static final String KEY = "key";
private static final String VALUE = "value";
private static final String NAME = "name";
private static final String ID = "id";
private static final String ELEMENT_ID = "element-id";
private static final String KEY_ID = "key-id";
private static final String VALUE_ID = "value-id";
private static final String REQUIRED = "required";
private static final String ELEMENT_REQUIRED = "element-required";
private static final String VALUE_REQUIRED = "value-required";
static void toJson(Types.StructType struct, JsonGenerator generator) throws IOException {
generator.writeStartObject();
generator.writeStringField(TYPE, STRUCT);
generator.writeArrayFieldStart(FIELDS);
for (Types.NestedField field : struct.fields()) {
generator.writeStartObject();
generator.writeNumberField(ID, field.fieldId());
generator.writeStringField(NAME, field.name());
generator.writeBooleanField(REQUIRED, field.isRequired());
generator.writeFieldName(TYPE);
toJson(field.type(), generator);
generator.writeEndObject();
}
generator.writeEndArray();
generator.writeEndObject();
}
static void toJson(Types.ListType list, JsonGenerator generator) throws IOException {
generator.writeStartObject();
generator.writeStringField(TYPE, LIST);
generator.writeNumberField(ELEMENT_ID, list.elementId());
generator.writeFieldName(ELEMENT);
toJson(list.elementType(), generator);
generator.writeBooleanField(ELEMENT_REQUIRED, !list.isElementOptional());
generator.writeEndObject();
}
static void toJson(Types.MapType map, JsonGenerator generator) throws IOException {
generator.writeStartObject();
generator.writeStringField(TYPE, MAP);
generator.writeNumberField(KEY_ID, map.keyId());
generator.writeFieldName(KEY);
toJson(map.keyType(), generator);
generator.writeNumberField(VALUE_ID, map.valueId());
generator.writeFieldName(VALUE);
toJson(map.valueType(), generator);
generator.writeBooleanField(VALUE_REQUIRED, !map.isValueOptional());
generator.writeEndObject();
}
static void toJson(Type.PrimitiveType primitive, JsonGenerator generator) throws IOException {
generator.writeString(primitive.toString());
}
static void toJson(Type type, JsonGenerator generator) throws IOException {
if (type.isPrimitiveType()) {
toJson(type.asPrimitiveType(), generator);
} else {
Type.NestedType nested = type.asNestedType();
switch (type.typeId()) {
case STRUCT:
toJson(nested.asStructType(), generator);
break;
case LIST:
toJson(nested.asListType(), generator);
break;
case MAP:
toJson(nested.asMapType(), generator);
break;
default:
throw new IllegalArgumentException("Cannot write unknown type: " + type);
}
}
}
public static void toJson(Schema schema, JsonGenerator generator) throws IOException {
toJson(schema.asStruct(), generator);
}
public static String toJson(Schema schema) {
return toJson(schema, false);
}
public static String toJson(Schema schema, boolean pretty) {
try {
StringWriter writer = new StringWriter();
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
if (pretty) {
generator.useDefaultPrettyPrinter();
}
toJson(schema.asStruct(), generator);
generator.flush();
return writer.toString();
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
private static Type typeFromJson(JsonNode json) {
if (json.isTextual()) {
return Types.fromPrimitiveString(json.asText());
} else if (json.isObject()) {
String type = json.get(TYPE).asText();
if (STRUCT.equals(type)) {
return structFromJson(json);
} else if (LIST.equals(type)) {
return listFromJson(json);
} else if (MAP.equals(type)) {
return mapFromJson(json);
}
}
throw new IllegalArgumentException("Cannot parse type from json: " + json);
}
private static Types.StructType structFromJson(JsonNode json) {
JsonNode fieldArray = json.get(FIELDS);
Preconditions.checkArgument(fieldArray.isArray(),
"Cannot parse struct fields from non-array: %s", fieldArray);
List<Types.NestedField> fields = Lists.newArrayListWithExpectedSize(fieldArray.size());
Iterator<JsonNode> iterator = fieldArray.elements();
while (iterator.hasNext()) {
JsonNode field = iterator.next();
Preconditions.checkArgument(field.isObject(),
"Cannot parse struct field from non-object: %s", field);
int id = JsonUtil.getInt(ID, field);
String name = JsonUtil.getString(NAME, field);
Type type = typeFromJson(field.get(TYPE));
boolean isRequired = JsonUtil.getBool(REQUIRED, field);
if (isRequired) {
fields.add(Types.NestedField.required(id, name, type));
} else {
fields.add(Types.NestedField.optional(id, name, type));
}
}
return Types.StructType.of(fields);
}
private static Types.ListType listFromJson(JsonNode json) {
int elementId = JsonUtil.getInt(ELEMENT_ID, json);
Type elementType = typeFromJson(json.get(ELEMENT));
boolean isRequired = JsonUtil.getBool(ELEMENT_REQUIRED, json);
if (isRequired) {
return Types.ListType.ofRequired(elementId, elementType);
} else {
return Types.ListType.ofOptional(elementId, elementType);
}
}
private static Types.MapType mapFromJson(JsonNode json) {
int keyId = JsonUtil.getInt(KEY_ID, json);
Type keyType = typeFromJson(json.get(KEY));
int valueId = JsonUtil.getInt(VALUE_ID, json);
Type valueType = typeFromJson(json.get(VALUE));
boolean isRequired = JsonUtil.getBool(VALUE_REQUIRED, json);
if (isRequired) {
return Types.MapType.ofRequired(keyId, valueId, keyType, valueType);
} else {
return Types.MapType.ofOptional(keyId, valueId, keyType, valueType);
}
}
public static Schema fromJson(JsonNode json) {
Type type = typeFromJson(json);
Preconditions.checkArgument(type.isNestedType() && type.asNestedType().isStructType(),
"Cannot create schema, not a struct type: %s", type);
return new Schema(type.asNestedType().asStructType().fields());
}
private static Cache<String, Schema> SCHEMA_CACHE = CacheBuilder.newBuilder()
.weakValues()
.build();
public static Schema fromJson(String json) {
try {
return SCHEMA_CACHE.get(json,
() -> fromJson(JsonUtil.mapper().readValue(json, JsonNode.class)));
} catch (ExecutionException e) {
if (e.getCause() instanceof IOException) {
throw new RuntimeIOException(
(IOException) e.getCause(), "Failed to parse schema: %s", json);
} else {
throw new RuntimeException("Failed to parse schema: " + json, e.getCause());
}
}
}
}
| 6,300 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ScanSummary.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.expressions.And;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Expression.Operation;
import com.netflix.iceberg.expressions.Expressions;
import com.netflix.iceberg.expressions.Literal;
import com.netflix.iceberg.expressions.NamedReference;
import com.netflix.iceberg.expressions.UnboundPredicate;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.types.Comparators;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.Pair;
import java.io.IOException;
import java.util.Comparator;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.Function;
public class ScanSummary {
private ScanSummary() {
}
private static final List<String> SCAN_SUMMARY_COLUMNS = ImmutableList.of(
"partition", "record_count", "file_size_in_bytes");
/**
* Create a scan summary builder for a table scan.
*
* @param scan a TableScan
* @return a scan summary builder
*/
public static ScanSummary.Builder of(TableScan scan) {
return new Builder(scan);
}
public static class Builder {
private static final Set<String> TIMESTAMP_NAMES = Sets.newHashSet(
"dateCreated", "lastUpdated");
private final TableScan scan;
private final Table table;
private final TableOperations ops;
private final Map<Long, Long> snapshotTimestamps;
private int limit = Integer.MAX_VALUE;
private boolean throwIfLimited = false;
private List<UnboundPredicate<Long>> timeFilters = Lists.newArrayList();
public Builder(TableScan scan) {
this.scan = scan;
this.table = scan.table();
this.ops = ((HasTableOperations) table).operations();
ImmutableMap.Builder<Long, Long> builder = ImmutableMap.builder();
for (Snapshot snap : table.snapshots()) {
builder.put(snap.snapshotId(), snap.timestampMillis());
}
this.snapshotTimestamps = builder.build();
}
private void addTimestampFilter(UnboundPredicate<Long> filter) {
throwIfLimited(); // ensure all partitions can be returned
timeFilters.add(filter);
}
public Builder after(String timestamp) {
Literal<Long> tsLiteral = Literal.of(timestamp).to(Types.TimestampType.withoutZone());
return after(tsLiteral.value() / 1000);
}
public Builder after(long timestampMillis) {
addTimestampFilter(Expressions.greaterThanOrEqual("timestamp_ms", timestampMillis));
return this;
}
public Builder before(String timestamp) {
Literal<Long> tsLiteral = Literal.of(timestamp).to(Types.TimestampType.withoutZone());
return before(tsLiteral.value() / 1000);
}
public Builder before(long timestampMillis) {
addTimestampFilter(Expressions.lessThanOrEqual("timestamp_ms", timestampMillis));
return this;
}
public Builder throwIfLimited() {
this.throwIfLimited = true;
return this;
}
public Builder limit(int numPartitions) {
this.limit = numPartitions;
return this;
}
private void removeTimeFilters(List<Expression> expressions, Expression expression) {
if (expression.op() == Operation.AND) {
And and = (And) expression;
removeTimeFilters(expressions, and.left());
removeTimeFilters(expressions, and.right());
return;
} else if (expression instanceof UnboundPredicate) {
UnboundPredicate pred = (UnboundPredicate) expression;
NamedReference ref = (NamedReference) pred.ref();
Literal<?> lit = pred.literal();
if (TIMESTAMP_NAMES.contains(ref.name())) {
Literal<Long> tsLiteral = lit.to(Types.TimestampType.withoutZone());
long millis = toMillis(tsLiteral.value());
addTimestampFilter(Expressions.predicate(pred.op(), "timestamp_ms", millis));
return;
}
}
expressions.add(expression);
}
/**
* Summarizes a table scan as a map of partition key to metrics for that partition.
*
* @return a map from partition key to metrics for that partition.
*/
public Map<String, PartitionMetrics> build() {
if (table.currentSnapshot() == null) {
return ImmutableMap.of(); // no snapshots, so there are no partitions
}
TopN<String, PartitionMetrics> topN = new TopN<>(
limit, throwIfLimited, Comparators.charSequences());
List<Expression> filters = Lists.newArrayList();
removeTimeFilters(filters, Expressions.rewriteNot(scan.filter()));
Expression rowFilter = joinFilters(filters);
Iterable<ManifestFile> manifests = table.currentSnapshot().manifests();
boolean filterByTimestamp = !timeFilters.isEmpty();
Set<Long> snapshotsInTimeRange = Sets.newHashSet();
if (filterByTimestamp) {
Pair<Long, Long> range = timestampRange(timeFilters);
long minTimestamp = range.first();
long maxTimestamp = range.second();
Snapshot oldestSnapshot = table.currentSnapshot();
for (Map.Entry<Long, Long> entry : snapshotTimestamps.entrySet()) {
long snapshotId = entry.getKey();
long timestamp = entry.getValue();
if (timestamp < oldestSnapshot.timestampMillis()) {
oldestSnapshot = ops.current().snapshot(snapshotId);
}
if (timestamp >= minTimestamp && timestamp <= maxTimestamp) {
snapshotsInTimeRange.add(snapshotId);
}
}
// if oldest known snapshot is in the range, then there may be an expired snapshot that has
// been removed that matched the range. because the timestamp of that snapshot is unknown,
// it can't be included in the results and the results are not reliable.
if (snapshotsInTimeRange.contains(oldestSnapshot.snapshotId()) &&
minTimestamp < oldestSnapshot.timestampMillis()) {
throw new IllegalArgumentException(
"Cannot satisfy time filters: time range may include expired snapshots");
}
// filter down to the the set of manifest files that were added after the start of the
// time range. manifests after the end of the time range must be included because
// compaction may create a manifest after the time range that includes files added in the
// range.
manifests = Iterables.filter(manifests, manifest -> {
if (manifest.snapshotId() == null) {
return true; // can't tell when the manifest was written, so it may contain matches
}
Long timestamp = snapshotTimestamps.get(manifest.snapshotId());
// if the timestamp is null, then its snapshot has expired. the check for the oldest
// snapshot ensures that all expired snapshots are not in the time range.
return timestamp != null && timestamp >= minTimestamp;
});
}
try (CloseableIterable<ManifestEntry> entries = new ManifestGroup(ops, manifests)
.filterData(rowFilter)
.ignoreDeleted()
.select(SCAN_SUMMARY_COLUMNS)
.entries()) {
PartitionSpec spec = table.spec();
for (ManifestEntry entry : entries) {
Long timestamp = snapshotTimestamps.get(entry.snapshotId());
// if filtering, skip timestamps that are outside the range
if (filterByTimestamp && !snapshotsInTimeRange.contains(entry.snapshotId())) {
continue;
}
String partition = spec.partitionToPath(entry.file().partition());
topN.update(partition, metrics -> (metrics == null ? new PartitionMetrics() : metrics)
.updateFromFile(entry.file(), timestamp));
}
} catch (IOException e) {
throw new RuntimeIOException(e);
}
return topN.get();
}
}
public static class PartitionMetrics {
private int fileCount = 0;
private long recordCount = 0L;
private long totalSize = 0L;
private Long dataTimestampMillis = null;
public int fileCount() {
return fileCount;
}
public long recordCount() {
return recordCount;
}
public long totalSize() {
return totalSize;
}
public Long dataTimestampMillis() {
return dataTimestampMillis;
}
private PartitionMetrics updateFromFile(DataFile file, Long timestampMillis) {
this.fileCount += 1;
this.recordCount += file.recordCount();
this.totalSize += file.fileSizeInBytes();
if (timestampMillis != null &&
(dataTimestampMillis == null || dataTimestampMillis < timestampMillis)) {
this.dataTimestampMillis = timestampMillis;
}
return this;
}
@Override
public String toString() {
String dataTimestamp = dataTimestampMillis != null ?
new Date(dataTimestampMillis).toString() : null;
return "PartitionMetrics(fileCount=" + fileCount +
", recordCount=" + recordCount +
", totalSize=" + totalSize +
", dataTimestamp=" + dataTimestamp + ")";
}
}
private static class TopN<K, V> {
private final int maxSize;
private final boolean throwIfLimited;
private final TreeMap<K, V> map;
private final Comparator<? super K> keyComparator;
private K cut = null;
TopN(int N, boolean throwIfLimited, Comparator<? super K> keyComparator) {
this.maxSize = N;
this.throwIfLimited = throwIfLimited;
this.map = Maps.newTreeMap(keyComparator);
this.keyComparator = keyComparator;
}
public void update(K key, Function<V, V> updateFunc) {
// if there is a cut and it comes before the given key, do nothing
if (cut != null && keyComparator.compare(cut, key) <= 0) {
return;
}
// call the update function and add the result to the map
map.put(key, updateFunc.apply(map.get(key)));
// enforce the size constraint and update the cut if some keys are excluded
while (map.size() > maxSize) {
if (throwIfLimited) {
throw new IllegalStateException(
String.format("Too many matching keys: more than %d", maxSize));
}
this.cut = map.lastKey();
map.remove(cut);
}
}
public Map<K, V> get() {
return ImmutableMap.copyOf(map);
}
}
static Expression joinFilters(List<Expression> expressions) {
Expression result = Expressions.alwaysTrue();
for (Expression expression : expressions) {
result = Expressions.and(result, expression);
}
return result;
}
static long toMillis(long timestamp) {
if (timestamp < 10000000000L) {
// in seconds
return timestamp * 1000;
} else if (timestamp < 10000000000000L) {
// in millis
return timestamp;
}
// in micros
return timestamp / 1000;
}
static Pair<Long, Long> timestampRange(List<UnboundPredicate<Long>> timeFilters) {
// evaluation is inclusive
long minTimestamp = Long.MIN_VALUE;
long maxTimestamp = Long.MAX_VALUE;
for (UnboundPredicate<Long> pred : timeFilters) {
long value = pred.literal().value();
switch (pred.op()) {
case LT:
if (value - 1 < maxTimestamp) {
maxTimestamp = value - 1;
}
break;
case LT_EQ:
if (value < maxTimestamp) {
maxTimestamp = value;
}
break;
case GT:
if (value + 1 > minTimestamp) {
minTimestamp = value + 1;
}
break;
case GT_EQ:
if (value > minTimestamp) {
minTimestamp = value;
}
break;
case EQ:
if (value < maxTimestamp) {
maxTimestamp = value;
}
if (value > minTimestamp) {
minTimestamp = value;
}
break;
default:
throw new UnsupportedOperationException(
"Cannot filter timestamps using predicate: " + pred);
}
}
if (maxTimestamp < minTimestamp) {
throw new IllegalArgumentException(
"No timestamps can match filters: " + Joiner.on(", ").join(timeFilters));
}
return Pair.of(minTimestamp, maxTimestamp);
}
}
| 6,301 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseMetastoreTableOperations.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.hadoop.HadoopFileIO;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.util.Tasks;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.UUID;
import static com.netflix.iceberg.TableMetadataParser.getFileExtension;
import static com.netflix.iceberg.TableMetadataParser.read;
import static com.netflix.iceberg.hadoop.HadoopInputFile.fromLocation;
public abstract class BaseMetastoreTableOperations implements TableOperations {
private static final Logger LOG = LoggerFactory.getLogger(BaseMetastoreTableOperations.class);
public static final String TABLE_TYPE_PROP = "table_type";
public static final String ICEBERG_TABLE_TYPE_VALUE = "iceberg";
public static final String METADATA_LOCATION_PROP = "metadata_location";
public static final String PREVIOUS_METADATA_LOCATION_PROP = "previous_metadata_location";
private static final String METADATA_FOLDER_NAME = "metadata";
private static final String DATA_FOLDER_NAME = "data";
private static final String HIVE_LOCATION_FOLDER_NAME = "empty";
private final Configuration conf;
private final FileIO fileIo;
private TableMetadata currentMetadata = null;
private String currentMetadataLocation = null;
private boolean shouldRefresh = true;
private String baseLocation = null;
private int version = -1;
protected BaseMetastoreTableOperations(Configuration conf) {
this.conf = conf;
this.fileIo = new HadoopFileIO(conf);
}
@Override
public TableMetadata current() {
if (shouldRefresh) {
return refresh();
}
return currentMetadata;
}
public String currentMetadataLocation() {
return currentMetadataLocation;
}
public int currentVersion() {
return version;
}
protected void requestRefresh() {
this.shouldRefresh = true;
}
public String hiveTableLocation() {
return String.format("%s/%s", baseLocation, HIVE_LOCATION_FOLDER_NAME);
}
protected String writeNewMetadata(TableMetadata metadata, int version) {
if (baseLocation == null) {
baseLocation = metadata.location();
}
String newTableMetadataFilePath = newTableMetadataFilePath(baseLocation, version);
OutputFile newMetadataLocation = fileIo.newOutputFile(newTableMetadataFilePath);
// write the new metadata
TableMetadataParser.write(metadata, newMetadataLocation);
return newTableMetadataFilePath;
}
protected void refreshFromMetadataLocation(String newLocation) {
refreshFromMetadataLocation(newLocation, 20);
}
protected void refreshFromMetadataLocation(String newLocation, int numRetries) {
// use null-safe equality check because new tables have a null metadata location
if (!Objects.equal(currentMetadataLocation, newLocation)) {
LOG.info("Refreshing table metadata from new version: " + newLocation);
Tasks.foreach(newLocation)
.retry(numRetries).exponentialBackoff(100, 5000, 600000, 4.0 /* 100, 400, 1600, ... */ )
.suppressFailureWhenFinished()
.run(location -> {
this.currentMetadata = read(this, fromLocation(location, conf));
this.currentMetadataLocation = location;
this.baseLocation = currentMetadata.location();
this.version = parseVersion(location);
});
}
this.shouldRefresh = false;
}
@Override
public String metadataFileLocation(String fileName) {
return String.format("%s/%s/%s", baseLocation, METADATA_FOLDER_NAME, fileName);
}
@Override
public FileIO io() {
return fileIo;
}
@Override
public long newSnapshotId() {
return System.currentTimeMillis();
}
private String newTableMetadataFilePath(String baseLocation, int newVersion) {
return String.format("%s/%s/%05d-%s%s",
baseLocation,
METADATA_FOLDER_NAME,
newVersion,
UUID.randomUUID(),
getFileExtension(this.conf));
}
private static int parseVersion(String metadataLocation) {
int versionStart = metadataLocation.lastIndexOf('/') + 1; // if '/' isn't found, this will be 0
int versionEnd = metadataLocation.indexOf('-', versionStart);
try {
return Integer.valueOf(metadataLocation.substring(versionStart, versionEnd));
} catch (NumberFormatException e) {
LOG.warn("Unable to parse version from metadata location: " + metadataLocation);
return -1;
}
}
private static FileSystem getFS(Path path, Configuration conf) {
try {
return path.getFileSystem(conf);
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
}
| 6,302 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/StreamingDelete.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.expressions.Expression;
/**
* {@link DeleteFiles Delete} implementation that avoids loading full manifests in memory.
* <p>
* This implementation will attempt to commit 5 times before throwing {@link CommitFailedException}.
*/
class StreamingDelete extends MergingSnapshotUpdate implements DeleteFiles {
StreamingDelete(TableOperations ops) {
super(ops);
}
@Override
public StreamingDelete deleteFile(CharSequence path) {
delete(path);
return this;
}
@Override
public StreamingDelete deleteFromRowFilter(Expression expr) {
deleteByRowFilter(expr);
return this;
}
}
| 6,303 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/RollbackToSnapshot.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.netflix.iceberg.exceptions.ValidationException;
class RollbackToSnapshot implements Rollback {
private final TableOperations ops;
private TableMetadata base = null;
private Long targetSnapshotId = null;
RollbackToSnapshot(TableOperations ops) {
this.ops = ops;
this.base = ops.current(); // do not retry
}
@Override
public Rollback toSnapshotId(long snapshotId) {
Preconditions.checkArgument(base.snapshot(snapshotId) != null,
"Cannot roll back to unknown snapshot id: %s", snapshotId);
this.targetSnapshotId = snapshotId;
return this;
}
@Override
public Rollback toSnapshotAtTime(long timestampMillis) {
long snapshotId = 0;
long snapshotTimestamp = 0;
// find the latest snapshot by timestamp older than timestampMillis
for (Snapshot snapshot : base.snapshots()) {
if (snapshot.timestampMillis() < timestampMillis &&
snapshot.timestampMillis() > snapshotTimestamp) {
snapshotId = snapshot.snapshotId();
snapshotTimestamp = snapshot.timestampMillis();
}
}
Preconditions.checkArgument(base.snapshot(snapshotId) != null,
"Cannot roll back, no valid snapshot older than: %s", timestampMillis);
this.targetSnapshotId = snapshotId;
return this;
}
@Override
public Snapshot apply() {
ValidationException.check(targetSnapshotId != null,
"Cannot roll back to unknown version: call toSnapshotId or toSnapshotAtTime");
return base.snapshot(targetSnapshotId);
}
@Override
public void commit() {
// rollback does not refresh or retry. it only operates on the state of the table when rollback
// was called to create the transaction.
ops.commit(base, base.rollbackTo(apply()));
}
}
| 6,304 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseSnapshot.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.netflix.iceberg.avro.Avro;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.io.InputFile;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
class BaseSnapshot implements Snapshot {
private final TableOperations ops;
private final long snapshotId;
private final Long parentId;
private final long timestampMillis;
private final InputFile manifestList;
// lazily initialized
private List<ManifestFile> manifests = null;
private List<DataFile> adds = null;
private List<DataFile> deletes = null;
/**
* For testing only.
*/
BaseSnapshot(TableOperations ops,
long snapshotId,
String... manifestFiles) {
this(ops, snapshotId, null, System.currentTimeMillis(),
Lists.transform(Arrays.asList(manifestFiles),
path -> new GenericManifestFile(ops.io().newInputFile(path), 0)));
}
BaseSnapshot(TableOperations ops,
long snapshotId,
Long parentId,
long timestampMillis,
InputFile manifestList) {
this.ops = ops;
this.snapshotId = snapshotId;
this.parentId = parentId;
this.timestampMillis = timestampMillis;
this.manifestList = manifestList;
}
BaseSnapshot(TableOperations ops,
long snapshotId,
Long parentId,
long timestampMillis,
List<ManifestFile> manifests) {
this(ops, snapshotId, parentId, timestampMillis, (InputFile) null);
this.manifests = manifests;
}
@Override
public long snapshotId() {
return snapshotId;
}
@Override
public Long parentId() {
return parentId;
}
@Override
public long timestampMillis() {
return timestampMillis;
}
@Override
public List<ManifestFile> manifests() {
if (manifests == null) {
// if manifests isn't set, then the snapshotFile is set and should be read to get the list
try (CloseableIterable<ManifestFile> files = Avro.read(manifestList)
.rename("manifest_file", GenericManifestFile.class.getName())
.rename("partitions", GenericPartitionFieldSummary.class.getName())
.rename("r508", GenericPartitionFieldSummary.class.getName())
.project(ManifestFile.schema())
.reuseContainers(false)
.build()) {
this.manifests = Lists.newLinkedList(files);
} catch (IOException e) {
throw new RuntimeIOException(e, "Cannot read snapshot file: %s", manifestList.location());
}
}
return manifests;
}
@Override
public List<DataFile> addedFiles() {
if (adds == null) {
cacheChanges();
}
return adds;
}
@Override
public List<DataFile> deletedFiles() {
if (deletes == null) {
cacheChanges();
}
return deletes;
}
@Override
public String manifestListLocation() {
return manifestList != null ? manifestList.location() : null;
}
private void cacheChanges() {
List<DataFile> adds = Lists.newArrayList();
List<DataFile> deletes = Lists.newArrayList();
// accumulate adds and deletes from all manifests.
// because manifests can be reused in newer snapshots, filter the changes by snapshot id.
for (String manifest : Iterables.transform(manifests(), ManifestFile::path)) {
try (ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest))) {
for (ManifestEntry add : reader.addedFiles()) {
if (add.snapshotId() == snapshotId) {
adds.add(add.file().copy());
}
}
for (ManifestEntry delete : reader.deletedFiles()) {
if (delete.snapshotId() == snapshotId) {
deletes.add(delete.file().copy());
}
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to close reader while caching changes");
}
}
this.adds = adds;
this.deletes = deletes;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("id", snapshotId)
.add("timestamp_ms", timestampMillis)
.add("manifests", manifests())
.toString();
}
}
| 6,305 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/RemoveSnapshots.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.util.Tasks;
import com.netflix.iceberg.util.ThreadPools;
import io.netty.util.internal.ConcurrentSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Date;
import java.util.List;
import java.util.Set;
import java.util.function.Consumer;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT;
class RemoveSnapshots implements ExpireSnapshots {
private static final Logger LOG = LoggerFactory.getLogger(RemoveSnapshots.class);
private final Consumer<String> defaultDelete = new Consumer<String>() {
@Override
public void accept(String file) {
ops.io().deleteFile(file);
}
};
private final TableOperations ops;
private final Set<Long> idsToRemove = Sets.newHashSet();
private TableMetadata base;
private Long expireOlderThan = null;
private Consumer<String> deleteFunc = defaultDelete;
RemoveSnapshots(TableOperations ops) {
this.ops = ops;
this.base = ops.current();
}
@Override
public ExpireSnapshots expireSnapshotId(long snapshotId) {
LOG.info("Expiring snapshot with id: {}", snapshotId);
idsToRemove.add(snapshotId);
return this;
}
@Override
public ExpireSnapshots expireOlderThan(long timestampMillis) {
LOG.info("Expiring snapshots older than: {} ({})", new Date(timestampMillis), timestampMillis);
this.expireOlderThan = timestampMillis;
return this;
}
@Override
public ExpireSnapshots deleteWith(Consumer<String> deleteFunc) {
this.deleteFunc = deleteFunc;
return this;
}
@Override
public List<Snapshot> apply() {
TableMetadata updated = internalApply();
List<Snapshot> removed = Lists.newArrayList(base.snapshots());
removed.removeAll(updated.snapshots());
return removed;
}
private TableMetadata internalApply() {
this.base = ops.refresh();
return base.removeSnapshotsIf(snapshot -> (
idsToRemove.contains(snapshot.snapshotId()) ||
(expireOlderThan != null && snapshot.timestampMillis() < expireOlderThan)
));
}
@Override
public void commit() {
Tasks.foreach(ops)
.retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */ )
.onlyRetryOn(CommitFailedException.class)
.run(item -> {
TableMetadata updated = internalApply();
// only commit the updated metadata if at least one snapshot was removed
if (updated.snapshots().size() != base.snapshots().size()) {
ops.commit(base, updated);
}
});
LOG.info("Committed snapshot changes; cleaning up expired manifests and data files.");
// clean up the expired snapshots:
// 1. Get a list of the snapshots that were removed
// 2. Delete any data files that were deleted by those snapshots and are not in the table
// 3. Delete any manifests that are no longer used by current snapshots
// Reads and deletes are done using Tasks.foreach(...).suppressFailureWhenFinished to complete
// as much of the delete work as possible and avoid orphaned data or manifest files.
TableMetadata current = ops.refresh();
Set<Long> currentIds = Sets.newHashSet();
Set<ManifestFile> currentManifests = Sets.newHashSet();
for (Snapshot snapshot : current.snapshots()) {
currentIds.add(snapshot.snapshotId());
currentManifests.addAll(snapshot.manifests());
}
Set<ManifestFile> allManifests = Sets.newHashSet(currentManifests);
Set<String> manifestsToDelete = Sets.newHashSet();
for (Snapshot snapshot : base.snapshots()) {
long snapshotId = snapshot.snapshotId();
if (!currentIds.contains(snapshotId)) {
// the snapshot was removed, find any manifests that are no longer needed
LOG.info("Removing snapshot: {}", snapshot);
for (ManifestFile manifest : snapshot.manifests()) {
if (!currentManifests.contains(manifest)) {
manifestsToDelete.add(manifest.path());
allManifests.add(manifest);
}
}
}
}
Set<String> filesToDelete = new ConcurrentSet<>();
Tasks.foreach(allManifests)
.noRetry().suppressFailureWhenFinished()
.executeWith(ThreadPools.getWorkerPool())
.onFailure((item, exc) ->
LOG.warn("Failed to get deleted files: this may cause orphaned data files", exc)
).run(manifest -> {
if (manifest.deletedFilesCount() != null && manifest.deletedFilesCount() == 0) {
return;
}
// the manifest has deletes, scan it to find files to delete
try (ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest.path()))) {
for (ManifestEntry entry : reader.entries()) {
// if the snapshot ID of the DELETE entry is no longer valid, the data can be deleted
if (entry.status() == ManifestEntry.Status.DELETED &&
!currentIds.contains(entry.snapshotId())) {
// use toString to ensure the path will not change (Utf8 is reused)
filesToDelete.add(entry.file().path().toString());
}
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read manifest file: " + manifest.path());
}
});
LOG.warn("Manifests to delete: {}", Joiner.on(", ").join(manifestsToDelete));
Tasks.foreach(filesToDelete)
.noRetry().suppressFailureWhenFinished()
.onFailure((file, exc) -> LOG.warn("Delete failed for data file: " + file, exc))
.run(file -> deleteFunc.accept(file));
Tasks.foreach(manifestsToDelete)
.noRetry().suppressFailureWhenFinished()
.onFailure((manifest, exc) -> LOG.warn("Delete failed for manifest: " + manifest, exc))
.run(deleteFunc::accept);
}
}
| 6,306 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/PartitionData.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.netflix.iceberg.avro.AvroSchemaUtil;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData;
import org.apache.avro.util.Utf8;
import java.io.Serializable;
import java.util.Arrays;
class PartitionData
implements IndexedRecord, StructLike, SpecificData.SchemaConstructable, Serializable {
static Schema getSchema(Types.StructType partitionType) {
return AvroSchemaUtil.convert(partitionType, PartitionData.class.getName());
}
private final Types.StructType partitionType;
private final int size;
private final Object[] data;
private final String stringSchema;
private transient Schema schema = null;
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
public PartitionData(Schema schema) {
this.partitionType = AvroSchemaUtil.convert(schema).asNestedType().asStructType();
this.size = partitionType.fields().size();
this.data = new Object[size];
this.stringSchema = schema.toString();
this.schema = schema;
}
PartitionData(Types.StructType partitionType) {
for (Types.NestedField field : partitionType.fields()) {
Preconditions.checkArgument(field.type().isPrimitiveType(),
"Partitions cannot contain nested types: " + field.type());
}
this.partitionType = partitionType;
this.size = partitionType.fields().size();
this.data = new Object[size];
this.schema = getSchema(partitionType);
this.stringSchema = schema.toString();
}
/**
* Copy constructor
*/
private PartitionData(PartitionData toCopy) {
this.partitionType = toCopy.partitionType;
this.size = toCopy.size;
this.data = Arrays.copyOf(toCopy.data, toCopy.data.length);
this.stringSchema = toCopy.stringSchema;
this.schema = toCopy.schema;
}
public Types.StructType getPartitionType() {
return partitionType;
}
public Schema getSchema() {
if (schema == null) {
this.schema = new Schema.Parser().parse(stringSchema);
}
return schema;
}
public Type getType(int pos) {
return partitionType.fields().get(pos).type();
}
public void clear() {
Arrays.fill(data, null);
}
@Override
public int size() {
return size;
}
@Override
@SuppressWarnings("unchecked")
public <T> T get(int pos, Class<T> javaClass) {
Object v = get(pos);
if (v == null || javaClass.isInstance(v)) {
return javaClass.cast(v);
}
throw new IllegalArgumentException(String.format(
"Wrong class, %s, for object: %s",
javaClass.getName(), String.valueOf(v)));
}
@Override
public <T> void set(int pos, T value) {
if (value instanceof Utf8) {
// Utf8 is not Serializable
data[pos] = value.toString();
} else {
data[pos] = value;
}
}
@Override
public void put(int i, Object v) {
set(i, v);
}
@Override
public Object get(int i) {
if (i >= data.length) {
return null;
}
return data[i];
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("PartitionData{");
for (int i = 0; i < data.length; i += 1) {
if (i > 0) {
sb.append(", ");
}
sb.append(partitionType.fields().get(i).name())
.append("=")
.append(data[i]);
}
sb.append("}");
return sb.toString();
}
public PartitionData copy() {
return new PartitionData(this);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
PartitionData that = (PartitionData) o;
return partitionType.equals(that.partitionType) && Arrays.equals(data, that.data);
}
@Override
public int hashCode() {
return Objects.hashCode(partitionType, data);
}
}
| 6,307 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/OverwriteData.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.expressions.Evaluator;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Projections;
import com.netflix.iceberg.expressions.StrictMetricsEvaluator;
import java.util.List;
public class OverwriteData extends MergingSnapshotUpdate implements OverwriteFiles {
private boolean validateAddedFiles = false;
protected OverwriteData(TableOperations ops) {
super(ops);
}
@Override
public OverwriteFiles overwriteByRowFilter(Expression expr) {
deleteByRowFilter(expr);
return this;
}
@Override
public OverwriteFiles addFile(DataFile file) {
add(file);
return this;
}
@Override
public OverwriteFiles validateAddedFiles() {
this.validateAddedFiles = true;
return this;
}
@Override
public List<ManifestFile> apply(TableMetadata base) {
if (validateAddedFiles) {
PartitionSpec spec = writeSpec();
Expression rowFilter = rowFilter();
Expression inclusiveExpr = Projections.inclusive(spec).project(rowFilter);
Evaluator inclusive = new Evaluator(spec.partitionType(), inclusiveExpr);
Expression strictExpr = Projections.strict(spec).project(rowFilter);
Evaluator strict = new Evaluator(spec.partitionType(), strictExpr);
StrictMetricsEvaluator metrics = new StrictMetricsEvaluator(
base.schema(), rowFilter);
for (DataFile file : addedFiles()) {
// the real test is that the strict or metrics test matches the file, indicating that all
// records in the file match the filter. inclusive is used to avoid testing the metrics,
// which is more complicated
ValidationException.check(
inclusive.eval(file.partition()) &&
(strict.eval(file.partition()) || metrics.eval(file)),
"Cannot append file with rows that do not match filter: %s: %s",
rowFilter, file.path());
}
}
return super.apply(base);
}
}
| 6,308 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/GenericDataFile.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.avro.AvroSchemaUtil;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
class GenericDataFile
implements DataFile, IndexedRecord, StructLike, SpecificData.SchemaConstructable, Serializable {
private static final Types.StructType EMPTY_STRUCT_TYPE = Types.StructType.of();
private static final PartitionData EMPTY_PARTITION_DATA = new PartitionData(EMPTY_STRUCT_TYPE) {
@Override
public PartitionData copy() {
return this; // this does not change
}
};
private int[] fromProjectionPos;
private Types.StructType partitionType;
private String filePath = null;
private FileFormat format = null;
private PartitionData partitionData = null;
private Long recordCount = null;
private long fileSizeInBytes = -1L;
private long blockSizeInBytes = -1L;
// optional fields
private Integer fileOrdinal = null; // boxed for nullability
private List<Integer> sortColumns = null;
private Map<Integer, Long> columnSizes = null;
private Map<Integer, Long> valueCounts = null;
private Map<Integer, Long> nullValueCounts = null;
private Map<Integer, ByteBuffer> lowerBounds = null;
private Map<Integer, ByteBuffer> upperBounds = null;
// cached schema
private transient org.apache.avro.Schema avroSchema = null;
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
public GenericDataFile(org.apache.avro.Schema avroSchema) {
this.avroSchema = avroSchema;
Types.StructType schema = AvroSchemaUtil.convert(avroSchema).asNestedType().asStructType();
// partition type may be null if the field was not projected
Type partType = schema.fieldType("partition");
if (partType != null) {
this.partitionType = partType.asNestedType().asStructType();
} else {
this.partitionType = EMPTY_STRUCT_TYPE;
}
List<Types.NestedField> fields = schema.fields();
List<Types.NestedField> allFields = DataFile.getType(partitionType).fields();
this.fromProjectionPos = new int[fields.size()];
for (int i = 0; i < fromProjectionPos.length; i += 1) {
boolean found = false;
for (int j = 0; j < allFields.size(); j += 1) {
if (fields.get(i).fieldId() == allFields.get(j).fieldId()) {
found = true;
fromProjectionPos[i] = j;
}
}
if (!found) {
throw new IllegalArgumentException("Cannot find projected field: " + fields.get(i));
}
}
this.partitionData = new PartitionData(partitionType);
}
GenericDataFile(String filePath, FileFormat format, long recordCount,
long fileSizeInBytes, long blockSizeInBytes) {
this.filePath = filePath;
this.format = format;
this.partitionData = EMPTY_PARTITION_DATA;
this.partitionType = EMPTY_PARTITION_DATA.getPartitionType();
this.recordCount = recordCount;
this.fileSizeInBytes = fileSizeInBytes;
this.blockSizeInBytes = blockSizeInBytes;
this.fileOrdinal = null;
this.sortColumns = null;
this.columnSizes = null;
this.valueCounts = null;
this.nullValueCounts = null;
this.lowerBounds = null;
this.upperBounds = null;
this.fromProjectionPos = null;
}
GenericDataFile(String filePath, FileFormat format, PartitionData partition,
long recordCount, long fileSizeInBytes, long blockSizeInBytes) {
this.filePath = filePath;
this.format = format;
this.partitionData = partition;
this.partitionType = partition.getPartitionType();
this.recordCount = recordCount;
this.fileSizeInBytes = fileSizeInBytes;
this.blockSizeInBytes = blockSizeInBytes;
this.fileOrdinal = null;
this.sortColumns = null;
this.columnSizes = null;
this.valueCounts = null;
this.nullValueCounts = null;
this.lowerBounds = null;
this.upperBounds = null;
this.fromProjectionPos = null;
}
GenericDataFile(String filePath, FileFormat format, PartitionData partition,
long fileSizeInBytes, long blockSizeInBytes, Metrics metrics) {
this.filePath = filePath;
this.format = format;
// this constructor is used by DataFiles.Builder, which passes null for unpartitioned data
if (partition == null) {
this.partitionData = EMPTY_PARTITION_DATA;
this.partitionType = EMPTY_PARTITION_DATA.getPartitionType();
} else {
this.partitionData = partition;
this.partitionType = partition.getPartitionType();
}
// this will throw NPE if metrics.recordCount is null
this.recordCount = metrics.recordCount();
this.fileSizeInBytes = fileSizeInBytes;
this.blockSizeInBytes = blockSizeInBytes;
this.fileOrdinal = null;
this.sortColumns = null;
this.columnSizes = metrics.columnSizes();
this.valueCounts = metrics.valueCounts();
this.nullValueCounts = metrics.nullValueCounts();
this.lowerBounds = SerializableByteBufferMap.wrap(metrics.lowerBounds());
this.upperBounds = SerializableByteBufferMap.wrap(metrics.upperBounds());
this.fromProjectionPos = null;
}
/**
* Copy constructor.
*
* @param toCopy a generic data file to copy.
*/
private GenericDataFile(GenericDataFile toCopy) {
this.filePath = toCopy.filePath;
this.format = toCopy.format;
this.partitionData = toCopy.partitionData.copy();
this.partitionType = toCopy.partitionType;
this.recordCount = toCopy.recordCount;
this.fileSizeInBytes = toCopy.fileSizeInBytes;
this.blockSizeInBytes = toCopy.blockSizeInBytes;
this.fileOrdinal = toCopy.fileOrdinal;
this.sortColumns = toCopy.sortColumns;
// TODO: support lazy conversion to/from map
this.columnSizes = toCopy.columnSizes;
this.valueCounts = toCopy.valueCounts;
this.nullValueCounts = toCopy.nullValueCounts;
this.lowerBounds = toCopy.lowerBounds;
this.upperBounds = toCopy.upperBounds;
this.fromProjectionPos = toCopy.fromProjectionPos;
}
/**
* Constructor for Java serialization.
*/
GenericDataFile() {
}
@Override
public CharSequence path() {
return filePath;
}
@Override
public FileFormat format() {
return format;
}
@Override
public StructLike partition() {
return partitionData;
}
@Override
public long recordCount() {
return recordCount;
}
@Override
public long fileSizeInBytes() {
return fileSizeInBytes;
}
@Override
public long blockSizeInBytes() {
return blockSizeInBytes;
}
@Override
public Integer fileOrdinal() {
return fileOrdinal;
}
@Override
public List<Integer> sortColumns() {
return sortColumns;
}
@Override
public Map<Integer, Long> columnSizes() {
return columnSizes;
}
@Override
public Map<Integer, Long> valueCounts() {
return valueCounts;
}
@Override
public Map<Integer, Long> nullValueCounts() {
return nullValueCounts;
}
@Override
public Map<Integer, ByteBuffer> lowerBounds() {
return lowerBounds;
}
@Override
public Map<Integer, ByteBuffer> upperBounds() {
return upperBounds;
}
@Override
public org.apache.avro.Schema getSchema() {
if (avroSchema == null) {
this.avroSchema = getAvroSchema(partitionType);
}
return avroSchema;
}
@Override
@SuppressWarnings("unchecked")
public void put(int i, Object v) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
// always coerce to String for Serializable
this.filePath = v.toString();
return;
case 1:
this.format = FileFormat.valueOf(v.toString());
return;
case 2:
this.partitionData = (PartitionData) v;
return;
case 3:
this.recordCount = (Long) v;
return;
case 4:
this.fileSizeInBytes = (Long) v;
return;
case 5:
this.blockSizeInBytes = (Long) v;
return;
case 6:
this.fileOrdinal = (Integer) v;
return;
case 7:
this.sortColumns = (List<Integer>) v;
return;
case 8:
this.columnSizes = (Map<Integer, Long>) v;
return;
case 9:
this.valueCounts = (Map<Integer, Long>) v;
return;
case 10:
this.nullValueCounts = (Map<Integer, Long>) v;
return;
case 11:
this.lowerBounds = SerializableByteBufferMap.wrap((Map<Integer, ByteBuffer>) v);
return;
case 12:
this.upperBounds= SerializableByteBufferMap.wrap((Map<Integer, ByteBuffer>) v);
return;
default:
// ignore the object, it must be from a newer version of the format
}
}
@Override
public Object get(int i) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
return filePath;
case 1:
return format != null ? format.toString() : null;
case 2:
return partitionData;
case 3:
return recordCount;
case 4:
return fileSizeInBytes;
case 5:
return blockSizeInBytes;
case 6:
return fileOrdinal;
case 7:
return sortColumns;
case 8:
return columnSizes;
case 9:
return valueCounts;
case 10:
return nullValueCounts;
case 11:
return lowerBounds;
case 12:
return upperBounds;
default:
throw new UnsupportedOperationException("Unknown field ordinal: " + pos);
}
}
private static org.apache.avro.Schema getAvroSchema(Types.StructType partitionType) {
Types.StructType type = DataFile.getType(partitionType);
return AvroSchemaUtil.convert(type, ImmutableMap.of(
type, GenericDataFile.class.getName(),
partitionType, PartitionData.class.getName()));
}
@Override
public int size() {
return 13;
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(get(pos));
}
@Override
public <T> void set(int pos, T value) {
put(pos, value);
}
@Override
public DataFile copy() {
return new GenericDataFile(this);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("file_path", filePath)
.add("file_format", format)
.add("partition", partitionData)
.add("record_count", recordCount)
.add("file_size_in_bytes", fileSizeInBytes)
.add("block_size_in_bytes", blockSizeInBytes)
.add("column_sizes", columnSizes)
.add("value_counts", valueCounts)
.add("null_value_counts", nullValueCounts)
.add("lower_bounds", lowerBounds)
.add("upper_bounds", upperBounds)
.toString();
}
}
| 6,309 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/TableMetadata.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.types.TypeUtil;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Predicate;
/**
* Metadata for a table.
*/
public class TableMetadata {
static final int TABLE_FORMAT_VERSION = 1;
static final int INITIAL_SPEC_ID = 0;
public static TableMetadata newTableMetadata(TableOperations ops,
Schema schema,
PartitionSpec spec,
String location) {
return newTableMetadata(ops, schema, spec, location, ImmutableMap.of());
}
public static TableMetadata newTableMetadata(TableOperations ops,
Schema schema,
PartitionSpec spec,
String location,
Map<String, String> properties) {
// reassign all column ids to ensure consistency
AtomicInteger lastColumnId = new AtomicInteger(0);
Schema freshSchema = TypeUtil.assignFreshIds(schema, lastColumnId::incrementAndGet);
// rebuild the partition spec using the new column ids
PartitionSpec.Builder specBuilder = PartitionSpec.builderFor(freshSchema)
.withSpecId(INITIAL_SPEC_ID);
for (PartitionField field : spec.fields()) {
// look up the name of the source field in the old schema to get the new schema's id
String sourceName = schema.findColumnName(field.sourceId());
specBuilder.add(
freshSchema.findField(sourceName).fieldId(),
field.name(),
field.transform().toString());
}
PartitionSpec freshSpec = specBuilder.build();
return new TableMetadata(ops, null, location,
System.currentTimeMillis(),
lastColumnId.get(), freshSchema, INITIAL_SPEC_ID, ImmutableList.of(freshSpec),
ImmutableMap.copyOf(properties), -1, ImmutableList.of(), ImmutableList.of());
}
public static class SnapshotLogEntry {
private final long timestampMillis;
private final long snapshotId;
SnapshotLogEntry(long timestampMillis, long snapshotId) {
this.timestampMillis = timestampMillis;
this.snapshotId = snapshotId;
}
public long timestampMillis() {
return timestampMillis;
}
public long snapshotId() {
return snapshotId;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
SnapshotLogEntry that = (SnapshotLogEntry) other;
return timestampMillis == that.timestampMillis && snapshotId == that.snapshotId;
}
@Override
public int hashCode() {
return Objects.hashCode(timestampMillis, snapshotId);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("timestampMillis", timestampMillis)
.add("snapshotId", snapshotId)
.toString();
}
}
private final TableOperations ops;
private final InputFile file;
// stored metadata
private final String location;
private final long lastUpdatedMillis;
private final int lastColumnId;
private final Schema schema;
private final int defaultSpecId;
private final List<PartitionSpec> specs;
private final Map<String, String> properties;
private final long currentSnapshotId;
private final List<Snapshot> snapshots;
private final Map<Long, Snapshot> snapshotsById;
private final Map<Integer, PartitionSpec> specsById;
private final List<SnapshotLogEntry> snapshotLog;
TableMetadata(TableOperations ops,
InputFile file,
String location,
long lastUpdatedMillis,
int lastColumnId,
Schema schema,
int defaultSpecId,
List<PartitionSpec> specs,
Map<String, String> properties,
long currentSnapshotId,
List<Snapshot> snapshots,
List<SnapshotLogEntry> snapshotLog) {
this.ops = ops;
this.file = file;
this.location = location;
this.lastUpdatedMillis = lastUpdatedMillis;
this.lastColumnId = lastColumnId;
this.schema = schema;
this.specs = specs;
this.defaultSpecId = defaultSpecId;
this.properties = properties;
this.currentSnapshotId = currentSnapshotId;
this.snapshots = snapshots;
this.snapshotLog = snapshotLog;
this.snapshotsById = indexSnapshots(snapshots);
this.specsById = indexSpecs(specs);
SnapshotLogEntry last = null;
for (SnapshotLogEntry logEntry : snapshotLog) {
if (last != null) {
Preconditions.checkArgument(
(logEntry.timestampMillis() - last.timestampMillis()) >= 0,
"[BUG] Expected sorted snapshot log entries.");
}
last = logEntry;
}
Preconditions.checkArgument(
currentSnapshotId < 0 || snapshotsById.containsKey(currentSnapshotId),
"Invalid table metadata: Cannot find current version");
}
public InputFile file() {
return file;
}
public long lastUpdatedMillis() {
return lastUpdatedMillis;
}
public int lastColumnId() {
return lastColumnId;
}
public Schema schema() {
return schema;
}
public PartitionSpec spec() {
return specsById.get(defaultSpecId);
}
public int defaultSpecId() {
return defaultSpecId;
}
public PartitionSpec spec(int id) {
return specsById.get(id);
}
public List<PartitionSpec> specs() {
return specs;
}
public String location() {
return location;
}
public Map<String, String> properties() {
return properties;
}
public boolean propertyAsBoolean(String property, boolean defaultValue) {
String value = properties.get(property);
if (value != null) {
return Boolean.parseBoolean(properties.get(property));
}
return defaultValue;
}
public int propertyAsInt(String property, int defaultValue) {
String value = properties.get(property);
if (value != null) {
return Integer.parseInt(properties.get(property));
}
return defaultValue;
}
public long propertyAsLong(String property, long defaultValue) {
String value = properties.get(property);
if (value != null) {
return Long.parseLong(properties.get(property));
}
return defaultValue;
}
public Snapshot snapshot(long snapshotId) {
return snapshotsById.get(snapshotId);
}
public Snapshot currentSnapshot() {
return snapshotsById.get(currentSnapshotId);
}
public List<Snapshot> snapshots() {
return snapshots;
}
public List<SnapshotLogEntry> snapshotLog() {
return snapshotLog;
}
public TableMetadata updateTableLocation(String newLocation) {
return new TableMetadata(ops, null, newLocation,
System.currentTimeMillis(), lastColumnId, schema, defaultSpecId, specs, properties,
currentSnapshotId, snapshots, snapshotLog);
}
public TableMetadata updateSchema(Schema schema, int lastColumnId) {
PartitionSpec.checkCompatibility(spec(), schema);
return new TableMetadata(ops, null, location,
System.currentTimeMillis(), lastColumnId, schema, defaultSpecId, specs, properties,
currentSnapshotId, snapshots, snapshotLog);
}
public TableMetadata updatePartitionSpec(PartitionSpec partitionSpec) {
PartitionSpec.checkCompatibility(partitionSpec, schema);
// if the spec already exists, use the same ID. otherwise, use 1 more than the highest ID.
int newDefaultSpecId = INITIAL_SPEC_ID;
for (PartitionSpec spec : specs) {
if (partitionSpec.compatibleWith(spec)) {
newDefaultSpecId = spec.specId();
break;
} else if (newDefaultSpecId <= spec.specId()) {
newDefaultSpecId = spec.specId() + 1;
}
}
Preconditions.checkArgument(defaultSpecId != newDefaultSpecId,
"Cannot set default partition spec to the current default");
ImmutableList.Builder<PartitionSpec> builder = ImmutableList.<PartitionSpec>builder()
.addAll(specs);
if (!specsById.containsKey(newDefaultSpecId)) {
// get a fresh spec to ensure the spec ID is set to the new default
builder.add(freshSpec(newDefaultSpecId, schema, partitionSpec));
}
return new TableMetadata(ops, null, location,
System.currentTimeMillis(), lastColumnId, schema, newDefaultSpecId,
builder.build(), properties,
currentSnapshotId, snapshots, snapshotLog);
}
public TableMetadata replaceCurrentSnapshot(Snapshot snapshot) {
List<Snapshot> newSnapshots = ImmutableList.<Snapshot>builder()
.addAll(snapshots)
.add(snapshot)
.build();
List<SnapshotLogEntry> newSnapshotLog = ImmutableList.<SnapshotLogEntry>builder()
.addAll(snapshotLog)
.add(new SnapshotLogEntry(snapshot.timestampMillis(), snapshot.snapshotId()))
.build();
return new TableMetadata(ops, null, location,
snapshot.timestampMillis(), lastColumnId, schema, defaultSpecId, specs, properties,
snapshot.snapshotId(), newSnapshots, newSnapshotLog);
}
public TableMetadata removeSnapshotsIf(Predicate<Snapshot> removeIf) {
List<Snapshot> filtered = Lists.newArrayListWithExpectedSize(snapshots.size());
for (Snapshot snapshot : snapshots) {
// keep the current snapshot and any snapshots that do not match the removeIf condition
if (snapshot.snapshotId() == currentSnapshotId || !removeIf.test(snapshot)) {
filtered.add(snapshot);
}
}
// update the snapshot log
Set<Long> validIds = Sets.newHashSet(Iterables.transform(filtered, Snapshot::snapshotId));
List<SnapshotLogEntry> newSnapshotLog = Lists.newArrayList();
for (SnapshotLogEntry logEntry : snapshotLog) {
if (validIds.contains(logEntry.snapshotId())) {
// copy the log entries that are still valid
newSnapshotLog.add(logEntry);
} else {
// any invalid entry causes the history before it to be removed. otherwise, there could be
// history gaps that cause time-travel queries to produce incorrect results. for example,
// if history is [(t1, s1), (t2, s2), (t3, s3)] and s2 is removed, the history cannot be
// [(t1, s1), (t3, s3)] because it appears that s3 was current during the time between t2
// and t3 when in fact s2 was the current snapshot.
newSnapshotLog.clear();
}
}
return new TableMetadata(ops, null, location,
System.currentTimeMillis(), lastColumnId, schema, defaultSpecId, specs, properties,
currentSnapshotId, filtered, ImmutableList.copyOf(newSnapshotLog));
}
public TableMetadata rollbackTo(Snapshot snapshot) {
ValidationException.check(snapshotsById.containsKey(snapshot.snapshotId()),
"Cannot set current snapshot to unknown: %s", snapshot.snapshotId());
long nowMillis = System.currentTimeMillis();
List<SnapshotLogEntry> newSnapshotLog = ImmutableList.<SnapshotLogEntry>builder()
.addAll(snapshotLog)
.add(new SnapshotLogEntry(nowMillis, snapshot.snapshotId()))
.build();
return new TableMetadata(ops, null, location,
nowMillis, lastColumnId, schema, defaultSpecId, specs, properties,
snapshot.snapshotId(), snapshots, newSnapshotLog);
}
public TableMetadata replaceProperties(Map<String, String> newProperties) {
ValidationException.check(newProperties != null, "Cannot set properties to null");
return new TableMetadata(ops, null, location,
System.currentTimeMillis(), lastColumnId, schema, defaultSpecId, specs, newProperties,
currentSnapshotId, snapshots, snapshotLog);
}
public TableMetadata removeSnapshotLogEntries(Set<Long> snapshotIds) {
List<SnapshotLogEntry> newSnapshotLog = Lists.newArrayList();
for (SnapshotLogEntry logEntry : snapshotLog) {
if (!snapshotIds.contains(logEntry.snapshotId())) {
// copy the log entries that are still valid
newSnapshotLog.add(logEntry);
}
}
ValidationException.check(currentSnapshotId < 0 || // not set
Iterables.getLast(newSnapshotLog).snapshotId() == currentSnapshotId,
"Cannot set invalid snapshot log: latest entry is not the current snapshot");
return new TableMetadata(ops, null, location,
System.currentTimeMillis(), lastColumnId, schema, defaultSpecId, specs, properties,
currentSnapshotId, snapshots, newSnapshotLog);
}
public TableMetadata buildReplacement(Schema schema, PartitionSpec partitionSpec,
Map<String, String> properties) {
AtomicInteger lastColumnId = new AtomicInteger(0);
Schema freshSchema = TypeUtil.assignFreshIds(schema, lastColumnId::incrementAndGet);
int nextSpecId = TableMetadata.INITIAL_SPEC_ID;
for (Integer specId : specsById.keySet()) {
if (nextSpecId <= specId) {
nextSpecId = specId + 1;
}
}
// rebuild the partition spec using the new column ids
PartitionSpec freshSpec = freshSpec(nextSpecId, freshSchema, partitionSpec);
// if the spec already exists, use the same ID. otherwise, use 1 more than the highest ID.
int specId = nextSpecId;
for (PartitionSpec spec : specs) {
if (freshSpec.compatibleWith(spec)) {
specId = spec.specId();
break;
}
}
ImmutableList.Builder<PartitionSpec> builder = ImmutableList.<PartitionSpec>builder()
.addAll(specs);
if (!specsById.containsKey(specId)) {
builder.add(freshSpec);
}
Map<String, String> newProperties = Maps.newHashMap();
newProperties.putAll(this.properties);
newProperties.putAll(properties);
return new TableMetadata(ops, null, location,
System.currentTimeMillis(), lastColumnId.get(), freshSchema,
specId, builder.build(), ImmutableMap.copyOf(newProperties),
-1, snapshots, ImmutableList.of());
}
private static PartitionSpec freshSpec(int specId, Schema schema, PartitionSpec partitionSpec) {
PartitionSpec.Builder specBuilder = PartitionSpec.builderFor(schema)
.withSpecId(specId);
for (PartitionField field : partitionSpec.fields()) {
// look up the name of the source field in the old schema to get the new schema's id
String sourceName = partitionSpec.schema().findColumnName(field.sourceId());
specBuilder.add(
schema.findField(sourceName).fieldId(),
field.name(),
field.transform().toString());
}
return specBuilder.build();
}
private static Map<Long, Snapshot> indexSnapshots(List<Snapshot> snapshots) {
ImmutableMap.Builder<Long, Snapshot> builder = ImmutableMap.builder();
for (Snapshot version : snapshots) {
builder.put(version.snapshotId(), version);
}
return builder.build();
}
private static Map<Integer, PartitionSpec> indexSpecs(List<PartitionSpec> specs) {
ImmutableMap.Builder<Integer, PartitionSpec> builder = ImmutableMap.builder();
for (PartitionSpec spec : specs) {
builder.put(spec.specId(), spec);
}
return builder.build();
}
}
| 6,310 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/MergeAppend.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
/**
* Append implementation that produces a minimal number of manifest files.
* <p>
* This implementation will attempt to commit 5 times before throwing {@link CommitFailedException}.
*/
class MergeAppend extends MergingSnapshotUpdate implements AppendFiles {
MergeAppend(TableOperations ops) {
super(ops);
}
@Override
public MergeAppend appendFile(DataFile file) {
add(file);
return this;
}
}
| 6,311 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/TableProperties.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
public class TableProperties {
public static final String COMMIT_NUM_RETRIES = "commit.retry.num-retries";
public static final int COMMIT_NUM_RETRIES_DEFAULT = 4;
public static final String COMMIT_MIN_RETRY_WAIT_MS = "commit.retry.min-wait-ms";
public static final int COMMIT_MIN_RETRY_WAIT_MS_DEFAULT = 100;
public static final String COMMIT_MAX_RETRY_WAIT_MS = "commit.retry.max-wait-ms";
public static final int COMMIT_MAX_RETRY_WAIT_MS_DEFAULT = 60000; // 1 minute
public static final String COMMIT_TOTAL_RETRY_TIME_MS = "commit.retry.total-timeout-ms";
public static final int COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT = 1800000; // 30 minutes
public static final String MANIFEST_TARGET_SIZE_BYTES = "commit.manifest.target-size-bytes";
public static final long MANIFEST_TARGET_SIZE_BYTES_DEFAULT = 8388608; // 8 MB
public static final String MANIFEST_MIN_MERGE_COUNT = "commit.manifest.min-count-to-merge";
public static final int MANIFEST_MIN_MERGE_COUNT_DEFAULT = 100;
public static final String DEFAULT_FILE_FORMAT = "write.format.default";
public static final String DEFAULT_FILE_FORMAT_DEFAULT = "parquet";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES = "write.parquet.row-group-size-bytes";
public static final String PARQUET_ROW_GROUP_SIZE_BYTES_DEFAULT = "134217728"; // 128 MB
public static final String PARQUET_PAGE_SIZE_BYTES = "write.parquet.page-size-bytes";
public static final String PARQUET_PAGE_SIZE_BYTES_DEFAULT = "1048576"; // 1 MB
public static final String PARQUET_DICT_SIZE_BYTES = "write.parquet.dict-size-bytes";
public static final String PARQUET_DICT_SIZE_BYTES_DEFAULT = "2097152"; // 2 MB
public static final String PARQUET_COMPRESSION = "write.parquet.compression-codec";
public static final String PARQUET_COMPRESSION_DEFAULT = "gzip";
public static final String AVRO_COMPRESSION = "write.avro.compression-codec";
public static final String AVRO_COMPRESSION_DEFAULT = "gzip";
public static final String SPLIT_SIZE = "read.split.target-size";
public static final long SPLIT_SIZE_DEFAULT = 134217728; // 128 MB
public static final String SPLIT_LOOKBACK = "read.split.planning-lookback";
public static final int SPLIT_LOOKBACK_DEFAULT = 10;
public static final String OBJECT_STORE_ENABLED = "write.object-storage.enabled";
public static final boolean OBJECT_STORE_ENABLED_DEFAULT = false;
public static final String OBJECT_STORE_PATH = "write.object-storage.path";
// This only applies to files written after this property is set. Files previously written aren't relocated to
// reflect this parameter.
// If not set, defaults to a "data" folder underneath the root path of the table.
public static final String WRITE_NEW_DATA_LOCATION = "write.folder-storage.path";
public static final String MANIFEST_LISTS_ENABLED = "write.manifest-lists.enabled";
public static final boolean MANIFEST_LISTS_ENABLED_DEFAULT = true;
}
| 6,312 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/HasTableOperations.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
/**
* Used to expose a table's TableOperations.
*/
public interface HasTableOperations {
TableOperations operations();
}
| 6,313 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/SnapshotParser.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.util.JsonUtil;
import java.io.IOException;
import java.io.StringWriter;
import java.util.List;
public class SnapshotParser {
private static final String SNAPSHOT_ID = "snapshot-id";
private static final String PARENT_SNAPSHOT_ID = "parent-snapshot-id";
private static final String TIMESTAMP_MS = "timestamp-ms";
private static final String MANIFESTS = "manifests";
private static final String MANIFEST_LIST = "manifest-list";
static void toJson(Snapshot snapshot, JsonGenerator generator)
throws IOException {
generator.writeStartObject();
generator.writeNumberField(SNAPSHOT_ID, snapshot.snapshotId());
if (snapshot.parentId() != null) {
generator.writeNumberField(PARENT_SNAPSHOT_ID, snapshot.parentId());
}
generator.writeNumberField(TIMESTAMP_MS, snapshot.timestampMillis());
String manifestList = snapshot.manifestListLocation();
if (manifestList != null) {
// write just the location. manifests should not be embedded in JSON along with a list
generator.writeStringField(MANIFEST_LIST, manifestList);
} else {
// embed the manifest list in the JSON
generator.writeArrayFieldStart(MANIFESTS);
for (ManifestFile file : snapshot.manifests()) {
generator.writeString(file.path());
}
generator.writeEndArray();
}
generator.writeEndObject();
}
public static String toJson(Snapshot snapshot) {
try {
StringWriter writer = new StringWriter();
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
generator.useDefaultPrettyPrinter();
toJson(snapshot, generator);
generator.flush();
return writer.toString();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write json for: %s", snapshot);
}
}
static Snapshot fromJson(TableOperations ops, JsonNode node) {
Preconditions.checkArgument(node.isObject(),
"Cannot parse table version from a non-object: %s", node);
long versionId = JsonUtil.getLong(SNAPSHOT_ID, node);
Long parentId = null;
if (node.has(PARENT_SNAPSHOT_ID)) {
parentId = JsonUtil.getLong(PARENT_SNAPSHOT_ID, node);
}
long timestamp = JsonUtil.getLong(TIMESTAMP_MS, node);
if (node.has(MANIFEST_LIST)) {
// the manifest list is stored in a manifest list file
String manifestList = JsonUtil.getString(MANIFEST_LIST, node);
return new BaseSnapshot(ops, versionId, parentId, timestamp, ops.io().newInputFile(manifestList));
} else {
// fall back to an embedded manifest list. pass in the manifest's InputFile so length can be
// loaded lazily, if it is needed
List<ManifestFile> manifests = Lists.transform(JsonUtil.getStringList(MANIFESTS, node),
location -> new GenericManifestFile(ops.io().newInputFile(location), 0));
return new BaseSnapshot(ops, versionId, parentId, timestamp, manifests);
}
}
public static Snapshot fromJson(TableOperations ops, String json) {
try {
return fromJson(ops, JsonUtil.mapper().readValue(json, JsonNode.class));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read version from json: %s", json);
}
}
}
| 6,314 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/SnapshotUpdate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.util.Exceptions;
import com.netflix.iceberg.util.Tasks;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.MANIFEST_LISTS_ENABLED;
import static com.netflix.iceberg.TableProperties.MANIFEST_LISTS_ENABLED_DEFAULT;
import static com.netflix.iceberg.util.ThreadPools.getWorkerPool;
abstract class SnapshotUpdate implements PendingUpdate<Snapshot> {
private static final Logger LOG = LoggerFactory.getLogger(SnapshotUpdate.class);
static final Set<ManifestFile> EMPTY_SET = Sets.newHashSet();
/**
* Cache used to enrich ManifestFile instances that are written to a ManifestListWriter.
*/
private final LoadingCache<ManifestFile, ManifestFile> manifestsWithMetadata = CacheBuilder
.newBuilder()
.build(new CacheLoader<ManifestFile, ManifestFile>() {
@Override
public ManifestFile load(ManifestFile file) {
if (file.snapshotId() != null) {
return file;
}
return addMetadata(ops, file);
}
});
private final TableOperations ops;
private final String commitUUID = UUID.randomUUID().toString();
private final AtomicInteger attempt = new AtomicInteger(0);
private final List<String> manifestLists = Lists.newArrayList();
private Long snapshotId = null;
private TableMetadata base = null;
protected SnapshotUpdate(TableOperations ops) {
this.ops = ops;
this.base = ops.current();
}
/**
* Apply the update's changes to the base table metadata and return the new manifest list.
*
* @param base the base table metadata to apply changes to
* @return a manifest list for the new snapshot.
*/
protected abstract List<ManifestFile> apply(TableMetadata base);
/**
* Clean up any uncommitted manifests that were created.
* <p>
* Manifests may not be committed if apply is called more because a commit conflict has occurred.
* Implementations may keep around manifests because the same changes will be made by both apply
* calls. This method instructs the implementation to clean up those manifests and passes the
* paths of the manifests that were actually committed.
*
* @param committed a set of manifest paths that were actually committed
*/
protected abstract void cleanUncommitted(Set<ManifestFile> committed);
@Override
public Snapshot apply() {
this.base = ops.refresh();
Long parentSnapshotId = base.currentSnapshot() != null ?
base.currentSnapshot().snapshotId() : null;
List<ManifestFile> manifests = apply(base);
if (base.propertyAsBoolean(MANIFEST_LISTS_ENABLED, MANIFEST_LISTS_ENABLED_DEFAULT)) {
OutputFile manifestList = manifestListPath();
try (ManifestListWriter writer = new ManifestListWriter(
manifestList, snapshotId(), parentSnapshotId)) {
// keep track of the manifest lists created
manifestLists.add(manifestList.location());
ManifestFile[] manifestFiles = new ManifestFile[manifests.size()];
Tasks.range(manifestFiles.length)
.stopOnFailure().throwFailureWhenFinished()
.retry(4).exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */ )
.executeWith(getWorkerPool())
.run(index ->
manifestFiles[index] = manifestsWithMetadata.getUnchecked(manifests.get(index)));
writer.addAll(Arrays.asList(manifestFiles));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write manifest list file");
}
return new BaseSnapshot(ops,
snapshotId(), parentSnapshotId, System.currentTimeMillis(),
ops.io().newInputFile(manifestList.location()));
} else {
return new BaseSnapshot(ops,
snapshotId(), parentSnapshotId, System.currentTimeMillis(), manifests);
}
}
@Override
public void commit() {
// this is always set to the latest commit attempt's snapshot id.
AtomicLong newSnapshotId = new AtomicLong(-1L);
try {
Tasks.foreach(ops)
.retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */ )
.onlyRetryOn(CommitFailedException.class)
.run(ops -> {
Snapshot newSnapshot = apply();
newSnapshotId.set(newSnapshot.snapshotId());
TableMetadata updated = base.replaceCurrentSnapshot(newSnapshot);
ops.commit(base, updated);
});
} catch (RuntimeException e) {
Exceptions.suppressAndThrow(e, this::cleanAll);
}
LOG.info("Committed snapshot {} ({})", newSnapshotId.get(), getClass().getSimpleName());
try {
// at this point, the commit must have succeeded. after a refresh, the snapshot is loaded by
// id in case another commit was added between this commit and the refresh.
Snapshot saved = ops.refresh().snapshot(newSnapshotId.get());
if (saved != null) {
cleanUncommitted(Sets.newHashSet(saved.manifests()));
// also clean up unused manifest lists created by multiple attempts
for (String manifestList : manifestLists) {
if (!saved.manifestListLocation().equals(manifestList)) {
ops.io().deleteFile(manifestList);
}
}
} else {
// saved may not be present if the latest metadata couldn't be loaded due to eventual
// consistency problems in refresh. in that case, don't clean up.
LOG.info("Failed to load committed snapshot, skipping manifest clean-up");
}
} catch (RuntimeException e) {
LOG.info("Failed to load committed table metadata, skipping manifest clean-up", e);
}
}
protected void cleanAll() {
for (String manifestList : manifestLists) {
ops.io().deleteFile(manifestList);
}
manifestLists.clear();
cleanUncommitted(EMPTY_SET);
}
protected void deleteFile(String path) {
ops.io().deleteFile(path);
}
protected OutputFile manifestListPath() {
return ops.io().newOutputFile(ops.metadataFileLocation(FileFormat.AVRO.addExtension(
String.format("snap-%d-%d-%s", snapshotId(), attempt.incrementAndGet(), commitUUID))));
}
protected OutputFile manifestPath(int i) {
return ops.io().newOutputFile(
ops.metadataFileLocation(FileFormat.AVRO.addExtension(commitUUID + "-m" + i)));
}
protected long snapshotId() {
if (snapshotId == null) {
this.snapshotId = ops.newSnapshotId();
}
return snapshotId;
}
private static ManifestFile addMetadata(TableOperations ops, ManifestFile manifest) {
try (ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest.path()))) {
PartitionSummary stats = new PartitionSummary(ops.current().spec(manifest.partitionSpecId()));
int addedFiles = 0;
int existingFiles = 0;
int deletedFiles = 0;
Long snapshotId = null;
long maxSnapshotId = Long.MIN_VALUE;
for (ManifestEntry entry : reader.entries()) {
if (entry.snapshotId() > maxSnapshotId) {
maxSnapshotId = entry.snapshotId();
}
switch (entry.status()) {
case ADDED:
addedFiles += 1;
if (snapshotId == null) {
snapshotId = entry.snapshotId();
}
break;
case EXISTING:
existingFiles += 1;
break;
case DELETED:
deletedFiles += 1;
if (snapshotId == null) {
snapshotId = entry.snapshotId();
}
break;
}
stats.update(entry.file().partition());
}
if (snapshotId == null) {
// if no files were added or deleted, use the largest snapshot ID in the manifest
snapshotId = maxSnapshotId;
}
return new GenericManifestFile(manifest.path(), manifest.length(), manifest.partitionSpecId(),
snapshotId, addedFiles, existingFiles, deletedFiles, stats.summaries());
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read manifest: %s", manifest.path());
}
}
}
| 6,315 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ConfigProperties.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import org.apache.hadoop.conf.Configuration;
public class ConfigProperties {
public static final String COMPRESS_METADATA = "iceberg.compress.metadata";
public static final boolean COMPRESS_METADATA_DEFAULT = false;
public static final boolean shouldCompress(Configuration configuration) {
return configuration.getBoolean(COMPRESS_METADATA, COMPRESS_METADATA_DEFAULT);
}
}
| 6,316 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/DataFiles.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.netflix.iceberg.hadoop.HadoopInputFile;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.types.Conversions;
import org.apache.hadoop.fs.FileStatus;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Locale;
import java.util.Map;
public class DataFiles {
private static final long DEFAULT_BLOCK_SIZE = 64*1024*1024;
private static PartitionData newPartitionData(PartitionSpec spec) {
return new PartitionData(spec.partitionType());
}
private static PartitionData copyPartitionData(PartitionSpec spec, StructLike partitionData, PartitionData reuse) {
PartitionData data = reuse;
if (data == null) {
data = newPartitionData(spec);
}
Class<?>[] javaClasses = spec.javaClasses();
List<PartitionField> fields = spec.fields();
for (int i = 0; i < fields.size(); i += 1) {
data.set(i, partitionData.get(i, javaClasses[i]));
}
return data;
}
private static PartitionData fillFromPath(PartitionSpec spec, String partitionPath, PartitionData reuse) {
PartitionData data = reuse;
if (data == null) {
data = newPartitionData(spec);
}
String[] partitions = partitionPath.split("/", -1);
Preconditions.checkArgument(partitions.length <= spec.fields().size(),
"Invalid partition data, too many fields (expecting %s): %s",
spec.fields().size(), partitionPath);
Preconditions.checkArgument(partitions.length >= spec.fields().size(),
"Invalid partition data, not enough fields (expecting %s): %s",
spec.fields().size(), partitionPath);
for (int i = 0; i < partitions.length; i += 1) {
PartitionField field = spec.fields().get(i);
String[] parts = partitions[i].split("=", 2);
Preconditions.checkArgument(
parts.length == 2 &&
parts[0] != null &&
field.name().equals(parts[0]),
"Invalid partition: " + partitions[i]);
data.set(i, Conversions.fromPartitionString(data.getType(i), parts[1]));
}
return data;
}
public static PartitionData data(PartitionSpec spec, String partitionPath) {
return fillFromPath(spec, partitionPath, null);
}
public static PartitionData copy(PartitionSpec spec, StructLike partition) {
return copyPartitionData(spec, partition, null);
}
public static DataFile fromInputFile(InputFile file, long rowCount) {
if (file instanceof HadoopInputFile) {
return fromStat(((HadoopInputFile) file).getStat(), rowCount);
}
String location = file.location();
FileFormat format = FileFormat.fromFileName(location);
return new GenericDataFile(location, format, rowCount, file.getLength(), DEFAULT_BLOCK_SIZE);
}
public static DataFile fromStat(FileStatus stat, long rowCount) {
String location = stat.getPath().toString();
FileFormat format = FileFormat.fromFileName(location);
return new GenericDataFile(location, format, rowCount, stat.getLen(), stat.getBlockSize());
}
public static DataFile fromInputFile(InputFile file, PartitionData partition, long rowCount) {
if (file instanceof HadoopInputFile) {
return fromStat(((HadoopInputFile) file).getStat(), partition, rowCount);
}
String location = file.location();
FileFormat format = FileFormat.fromFileName(location);
return new GenericDataFile(
location, format, partition, rowCount, file.getLength(), DEFAULT_BLOCK_SIZE);
}
public static DataFile fromStat(FileStatus stat, PartitionData partition, long rowCount) {
String location = stat.getPath().toString();
FileFormat format = FileFormat.fromFileName(location);
return new GenericDataFile(
location, format, partition, rowCount, stat.getLen(), stat.getBlockSize());
}
public static DataFile fromInputFile(InputFile file, PartitionData partition, Metrics metrics) {
if (file instanceof HadoopInputFile) {
return fromStat(((HadoopInputFile) file).getStat(), partition, metrics);
}
String location = file.location();
FileFormat format = FileFormat.fromFileName(location);
return new GenericDataFile(
location, format, partition, file.getLength(), DEFAULT_BLOCK_SIZE, metrics);
}
public static DataFile fromStat(FileStatus stat, PartitionData partition, Metrics metrics) {
String location = stat.getPath().toString();
FileFormat format = FileFormat.fromFileName(location);
return new GenericDataFile(
location, format, partition, stat.getLen(), stat.getBlockSize(), metrics);
}
public static DataFile fromParquetInputFile(InputFile file,
PartitionData partition,
Metrics metrics) {
if (file instanceof HadoopInputFile) {
return fromParquetStat(((HadoopInputFile) file).getStat(), partition, metrics);
}
String location = file.location();
FileFormat format = FileFormat.PARQUET;
return new GenericDataFile(
location, format, partition, file.getLength(), DEFAULT_BLOCK_SIZE, metrics);
}
public static DataFile fromParquetStat(FileStatus stat, PartitionData partition, Metrics metrics) {
String location = stat.getPath().toString();
FileFormat format = FileFormat.PARQUET;
return new GenericDataFile(
location, format, partition, stat.getLen(), stat.getBlockSize(), metrics);
}
public static Builder builder(PartitionSpec spec) {
return new Builder(spec);
}
static Builder builder() {
return new Builder();
}
public static class Builder {
private final PartitionSpec spec;
private final boolean isPartitioned;
private PartitionData partitionData;
private String filePath = null;
private FileFormat format = null;
private long recordCount = -1L;
private long fileSizeInBytes = -1L;
private long blockSizeInBytes = -1L;
// optional fields
private Map<Integer, Long> columnSizes = null;
private Map<Integer, Long> valueCounts = null;
private Map<Integer, Long> nullValueCounts = null;
private Map<Integer, ByteBuffer> lowerBounds = null;
private Map<Integer, ByteBuffer> upperBounds = null;
public Builder() {
this.spec = null;
this.partitionData = null;
this.isPartitioned = false;
}
public Builder(PartitionSpec spec) {
this.spec = spec;
this.partitionData = newPartitionData(spec);
this.isPartitioned = true;
}
public void clear() {
if (isPartitioned) {
partitionData.clear();
}
this.filePath = null;
this.format = null;
this.recordCount = -1L;
this.fileSizeInBytes = -1L;
this.blockSizeInBytes = -1L;
this.columnSizes = null;
this.valueCounts = null;
this.nullValueCounts = null;
this.lowerBounds = null;
this.upperBounds = null;
}
public Builder copy(DataFile toCopy) {
if (isPartitioned) {
this.partitionData = copyPartitionData(spec, toCopy.partition(), partitionData);
}
this.filePath = toCopy.path().toString();
this.format = toCopy.format();
this.recordCount = toCopy.recordCount();
this.fileSizeInBytes = toCopy.fileSizeInBytes();
this.blockSizeInBytes = toCopy.blockSizeInBytes();
this.columnSizes = toCopy.columnSizes();
this.valueCounts = toCopy.valueCounts();
this.nullValueCounts = toCopy.nullValueCounts();
this.lowerBounds = toCopy.lowerBounds();
this.upperBounds = toCopy.upperBounds();
return this;
}
public Builder withStatus(FileStatus stat) {
this.filePath = stat.getPath().toString();
this.fileSizeInBytes = stat.getLen();
this.blockSizeInBytes = stat.getBlockSize();
return this;
}
public Builder withInputFile(InputFile file) {
if (file instanceof HadoopInputFile) {
return withStatus(((HadoopInputFile) file).getStat());
}
this.filePath = file.location();
this.fileSizeInBytes = file.getLength();
return this;
}
public Builder withPath(String filePath) {
this.filePath = filePath;
return this;
}
public Builder withFormat(String format) {
this.format = FileFormat.valueOf(format.toUpperCase(Locale.ENGLISH));
return this;
}
public Builder withFormat(FileFormat format) {
this.format = format;
return this;
}
public Builder withPartition(StructLike partition) {
this.partitionData = copyPartitionData(spec, partition, partitionData);
return this;
}
public Builder withRecordCount(long recordCount) {
this.recordCount = recordCount;
return this;
}
public Builder withFileSizeInBytes(long fileSizeInBytes) {
this.fileSizeInBytes = fileSizeInBytes;
return this;
}
public Builder withBlockSizeInBytes(long blockSizeInBytes) {
this.blockSizeInBytes = blockSizeInBytes;
return this;
}
public Builder withPartitionPath(String partitionPath) {
Preconditions.checkArgument(isPartitioned,
"Cannot add partition data for an unpartitioned table");
this.partitionData = fillFromPath(spec, partitionPath, partitionData);
return this;
}
public Builder withMetrics(Metrics metrics) {
// check for null to avoid NPE when unboxing
this.recordCount = metrics.recordCount() == null ? -1 : metrics.recordCount();
this.columnSizes = metrics.columnSizes();
this.valueCounts = metrics.valueCounts();
this.nullValueCounts = metrics.nullValueCounts();
this.lowerBounds = metrics.lowerBounds();
this.upperBounds = metrics.upperBounds();
return this;
}
public DataFile build() {
Preconditions.checkArgument(filePath != null, "File path is required");
if (format == null) {
this.format = FileFormat.fromFileName(filePath);
}
Preconditions.checkArgument(format != null, "File format is required");
Preconditions.checkArgument(fileSizeInBytes >= 0, "File size is required");
Preconditions.checkArgument(recordCount >= 0, "Record count is required");
if (blockSizeInBytes < 0) {
this.blockSizeInBytes = DEFAULT_BLOCK_SIZE; // assume 64MB blocks
}
return new GenericDataFile(
filePath, format, isPartitioned ? partitionData.copy() : null,
fileSizeInBytes, blockSizeInBytes, new Metrics(
recordCount, columnSizes, valueCounts, nullValueCounts, lowerBounds, upperBounds));
}
}
}
| 6,317 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/PartitionSpecParser.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.base.Preconditions;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.JsonUtil;
import com.netflix.iceberg.util.Pair;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Iterator;
import java.util.concurrent.ExecutionException;
public class PartitionSpecParser {
private PartitionSpecParser() {
}
private static final String SPEC_ID = "spec-id";
private static final String FIELDS = "fields";
private static final String SOURCE_ID = "source-id";
private static final String TRANSFORM = "transform";
private static final String NAME = "name";
public static void toJson(PartitionSpec spec, JsonGenerator generator) throws IOException {
generator.writeStartObject();
generator.writeNumberField(SPEC_ID, spec.specId());
generator.writeFieldName(FIELDS);
toJsonFields(spec, generator);
generator.writeEndObject();
}
public static String toJson(PartitionSpec spec) {
return toJson(spec, false);
}
public static String toJson(PartitionSpec spec, boolean pretty) {
try {
StringWriter writer = new StringWriter();
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
if (pretty) {
generator.useDefaultPrettyPrinter();
}
toJson(spec, generator);
generator.flush();
return writer.toString();
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
public static PartitionSpec fromJson(Schema schema, JsonNode json) {
Preconditions.checkArgument(json.isObject(), "Cannot parse spec from non-object: %s", json);
int specId = JsonUtil.getInt(SPEC_ID, json);
PartitionSpec.Builder builder = PartitionSpec.builderFor(schema).withSpecId(specId);
buildFromJsonFields(builder, json.get(FIELDS));
return builder.build();
}
private static Cache<Pair<Types.StructType, String>, PartitionSpec> SPEC_CACHE = CacheBuilder
.newBuilder()
.weakValues()
.build();
public static PartitionSpec fromJson(Schema schema, String json) {
try {
return SPEC_CACHE.get(Pair.of(schema.asStruct(), json),
() -> fromJson(schema, JsonUtil.mapper().readValue(json, JsonNode.class)));
} catch (ExecutionException e) {
if (e.getCause() instanceof IOException) {
throw new RuntimeIOException(
(IOException) e.getCause(), "Failed to parse partition spec: %s", json);
} else {
throw new RuntimeException("Failed to parse partition spec: " + json, e.getCause());
}
}
}
static void toJsonFields(PartitionSpec spec, JsonGenerator generator) throws IOException {
generator.writeStartArray();
for (PartitionField field : spec.fields()) {
generator.writeStartObject();
generator.writeStringField(NAME, field.name());
generator.writeStringField(TRANSFORM, field.transform().toString());
generator.writeNumberField(SOURCE_ID, field.sourceId());
generator.writeEndObject();
}
generator.writeEndArray();
}
static String toJsonFields(PartitionSpec spec) {
try {
StringWriter writer = new StringWriter();
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
toJsonFields(spec, generator);
generator.flush();
return writer.toString();
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
static PartitionSpec fromJsonFields(Schema schema, int specId, JsonNode json) {
PartitionSpec.Builder builder = PartitionSpec.builderFor(schema).withSpecId(specId);
buildFromJsonFields(builder, json);
return builder.build();
}
static PartitionSpec fromJsonFields(Schema schema, int specId, String json) {
try {
return fromJsonFields(schema, specId, JsonUtil.mapper().readValue(json, JsonNode.class));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to parse partition spec fields: " + json);
}
}
private static void buildFromJsonFields(PartitionSpec.Builder builder, JsonNode json) {
Preconditions.checkArgument(json.isArray(),
"Cannot parse partition spec fields, not an array: %s", json);
Iterator<JsonNode> elements = json.elements();
while (elements.hasNext()) {
JsonNode element = elements.next();
Preconditions.checkArgument(element.isObject(),
"Cannot parse partition field, not an object: %s", element);
String name = JsonUtil.getString(NAME, element);
String transform = JsonUtil.getString(TRANSFORM, element);
int sourceId = JsonUtil.getInt(SOURCE_ID, element);
builder.add(sourceId, name, transform);
}
}
}
| 6,318 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/MergingSnapshotUpdate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.iceberg.ManifestEntry.Status;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.expressions.Evaluator;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Expressions;
import com.netflix.iceberg.expressions.Projections;
import com.netflix.iceberg.expressions.StrictMetricsEvaluator;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.util.BinPacking.ListPacker;
import com.netflix.iceberg.util.CharSequenceWrapper;
import com.netflix.iceberg.util.StructLikeWrapper;
import com.netflix.iceberg.util.Tasks;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.lang.reflect.Array;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import static com.google.common.collect.Iterables.filter;
import static com.google.common.collect.Iterables.transform;
import static com.netflix.iceberg.TableProperties.MANIFEST_MIN_MERGE_COUNT;
import static com.netflix.iceberg.TableProperties.MANIFEST_MIN_MERGE_COUNT_DEFAULT;
import static com.netflix.iceberg.TableProperties.MANIFEST_TARGET_SIZE_BYTES;
import static com.netflix.iceberg.TableProperties.MANIFEST_TARGET_SIZE_BYTES_DEFAULT;
import static com.netflix.iceberg.util.ThreadPools.getWorkerPool;
abstract class MergingSnapshotUpdate extends SnapshotUpdate {
private final Logger LOG = LoggerFactory.getLogger(getClass());
private static final Joiner COMMA = Joiner.on(",");
protected static class DeleteException extends ValidationException {
private final String partition;
private DeleteException(String partition) {
super("Operation would delete existing data");
this.partition = partition;
}
public String partition() {
return partition;
}
}
private final TableOperations ops;
private final PartitionSpec spec;
private final long manifestTargetSizeBytes;
private final int minManifestsCountToMerge;
// update data
private final AtomicInteger manifestCount = new AtomicInteger(0);
private final List<DataFile> newFiles = Lists.newArrayList();
private final Set<CharSequenceWrapper> deletePaths = Sets.newHashSet();
private final Set<StructLikeWrapper> dropPartitions = Sets.newHashSet();
private Expression deleteExpression = Expressions.alwaysFalse();
private boolean failAnyDelete = false;
private boolean failMissingDeletePaths = false;
// cache the new manifest once it is written
private ManifestFile newManifest = null;
private boolean hasNewFiles = false;
// cache merge results to reuse when retrying
private final Map<List<ManifestFile>, ManifestFile> mergeManifests = Maps.newConcurrentMap();
// cache filtered manifests to avoid extra work when commits fail.
private final Map<ManifestFile, ManifestFile> filteredManifests = Maps.newConcurrentMap();
// tracking where files were deleted to validate retries quickly
private final Map<ManifestFile, Set<CharSequenceWrapper>> filteredManifestToDeletedFiles =
Maps.newConcurrentMap();
private boolean filterUpdated = false; // used to clear caches of filtered and merged manifests
MergingSnapshotUpdate(TableOperations ops) {
super(ops);
this.ops = ops;
this.spec = ops.current().spec();
this.manifestTargetSizeBytes = ops.current()
.propertyAsLong(MANIFEST_TARGET_SIZE_BYTES, MANIFEST_TARGET_SIZE_BYTES_DEFAULT);
this.minManifestsCountToMerge = ops.current()
.propertyAsInt(MANIFEST_MIN_MERGE_COUNT, MANIFEST_MIN_MERGE_COUNT_DEFAULT);
}
protected PartitionSpec writeSpec() {
// the spec is set when the write is started
return spec;
}
protected Expression rowFilter() {
return deleteExpression;
}
protected List<DataFile> addedFiles() {
return newFiles;
}
protected void failAnyDelete() {
this.failAnyDelete = true;
}
protected void failMissingDeletePaths() {
this.failMissingDeletePaths = true;
}
/**
* Add a filter to match files to delete. A file will be deleted if all of the rows it contains
* match this or any other filter passed to this method.
*
* @param expr an expression to match rows.
*/
protected void deleteByRowFilter(Expression expr) {
Preconditions.checkNotNull(expr, "Cannot delete files using filter: null");
this.filterUpdated = true;
this.deleteExpression = Expressions.or(deleteExpression, expr);
}
/**
* Add a partition tuple to drop from the table during the delete phase.
*/
protected void dropPartition(StructLike partition) {
dropPartitions.add(StructLikeWrapper.wrap(partition));
}
/**
* Add a specific path to be deleted in the new snapshot.
*/
protected void delete(CharSequence path) {
Preconditions.checkNotNull(path, "Cannot delete file path: null");
this.filterUpdated = true;
deletePaths.add(CharSequenceWrapper.wrap(path));
}
/**
* Add a file to the new snapshot.
*/
protected void add(DataFile file) {
hasNewFiles = true;
newFiles.add(file);
}
@Override
public List<ManifestFile> apply(TableMetadata base) {
if (filterUpdated) {
cleanUncommittedFilters(SnapshotUpdate.EMPTY_SET);
this.filterUpdated = false;
}
Snapshot current = base.currentSnapshot();
Map<Integer, List<ManifestFile>> groups = Maps.newTreeMap(Comparator.<Integer>reverseOrder());
// use a common metrics evaluator for all manifests because it is bound to the table schema
StrictMetricsEvaluator metricsEvaluator = new StrictMetricsEvaluator(
ops.current().schema(), deleteExpression);
// add the current spec as the first group. files are added to the beginning.
try {
if (newFiles.size() > 0) {
ManifestFile newManifest = newFilesAsManifest();
List<ManifestFile> manifestGroup = Lists.newArrayList();
manifestGroup.add(newManifest);
groups.put(newManifest.partitionSpecId(), manifestGroup);
}
Set<CharSequenceWrapper> deletedFiles = Sets.newHashSet();
// group manifests by compatible partition specs to be merged
if (current != null) {
List<ManifestFile> manifests = current.manifests();
ManifestFile[] filtered = new ManifestFile[manifests.size()];
// open all of the manifest files in parallel, use index to avoid reordering
Tasks.range(filtered.length)
.stopOnFailure().throwFailureWhenFinished()
.executeWith(getWorkerPool())
.run(index -> {
ManifestFile manifest = filterManifest(
deleteExpression, metricsEvaluator,
manifests.get(index));
filtered[index] = manifest;
}, IOException.class);
for (ManifestFile manifest : filtered) {
Set<CharSequenceWrapper> manifestDeletes = filteredManifestToDeletedFiles.get(manifest);
if (manifestDeletes != null) {
deletedFiles.addAll(manifestDeletes);
}
List<ManifestFile> group = groups.get(manifest.partitionSpecId());
if (group != null) {
group.add(manifest);
} else {
group = Lists.newArrayList();
group.add(manifest);
groups.put(manifest.partitionSpecId(), group);
}
}
}
List<ManifestFile> manifests = Lists.newArrayList();
for (Map.Entry<Integer, List<ManifestFile>> entry : groups.entrySet()) {
for (ManifestFile manifest : mergeGroup(entry.getKey(), entry.getValue())) {
manifests.add(manifest);
}
}
ValidationException.check(!failMissingDeletePaths || deletedFiles.containsAll(deletePaths),
"Missing required files to delete: %s",
COMMA.join(transform(filter(deletePaths,
path -> !deletedFiles.contains(path)),
CharSequenceWrapper::get)));
return manifests;
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to create snapshot manifest list");
}
}
private void cleanUncommittedMerges(Set<ManifestFile> committed) {
// iterate over a copy of entries to avoid concurrent modification
List<Map.Entry<List<ManifestFile>, ManifestFile>> entries =
Lists.newArrayList(mergeManifests.entrySet());
for (Map.Entry<List<ManifestFile>, ManifestFile> entry : entries) {
// delete any new merged manifests that aren't in the committed list
ManifestFile merged = entry.getValue();
if (!committed.contains(merged)) {
deleteFile(merged.path());
// remove the deleted file from the cache
mergeManifests.remove(entry.getKey());
}
}
}
private void cleanUncommittedFilters(Set<ManifestFile> committed) {
// iterate over a copy of entries to avoid concurrent modification
List<Map.Entry<ManifestFile, ManifestFile>> filterEntries =
Lists.newArrayList(filteredManifests.entrySet());
for (Map.Entry<ManifestFile, ManifestFile> entry : filterEntries) {
// remove any new filtered manifests that aren't in the committed list
ManifestFile manifest = entry.getKey();
ManifestFile filtered = entry.getValue();
if (!committed.contains(filtered)) {
// only delete if the filtered copy was created
if (!manifest.equals(filtered)) {
deleteFile(filtered.path());
}
// remove the entry from the cache
filteredManifests.remove(manifest);
}
}
}
@Override
protected void cleanUncommitted(Set<ManifestFile> committed) {
if (newManifest != null && !committed.contains(newManifest)) {
deleteFile(newManifest.path());
this.newManifest = null;
}
cleanUncommittedMerges(committed);
cleanUncommittedFilters(committed);
}
private boolean nothingToFilter() {
return (deleteExpression == null || deleteExpression == Expressions.alwaysFalse()) &&
deletePaths.isEmpty() && dropPartitions.isEmpty();
}
/**
* @return a ManifestReader that is a filtered version of the input manifest.
*/
private ManifestFile filterManifest(Expression deleteExpression,
StrictMetricsEvaluator metricsEvaluator,
ManifestFile manifest) throws IOException {
ManifestFile cached = filteredManifests.get(manifest);
if (cached != null) {
return cached;
}
if (nothingToFilter()) {
filteredManifests.put(manifest, manifest);
return manifest;
}
try (ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest.path()))) {
Expression inclusiveExpr = Projections
.inclusive(reader.spec())
.project(deleteExpression);
Evaluator inclusive = new Evaluator(reader.spec().partitionType(), inclusiveExpr);
Expression strictExpr = Projections
.strict(reader.spec())
.project(deleteExpression);
Evaluator strict = new Evaluator(reader.spec().partitionType(), strictExpr);
// this is reused to compare file paths with the delete set
CharSequenceWrapper pathWrapper = CharSequenceWrapper.wrap("");
// reused to compare file partitions with the drop set
StructLikeWrapper partitionWrapper = StructLikeWrapper.wrap(null);
// this assumes that the manifest doesn't have files to remove and streams through the
// manifest without copying data. if a manifest does have a file to remove, this will break
// out of the loop and move on to filtering the manifest.
boolean hasDeletedFiles = false;
for (ManifestEntry entry : reader.entries()) {
DataFile file = entry.file();
boolean fileDelete = (deletePaths.contains(pathWrapper.set(file.path())) ||
dropPartitions.contains(partitionWrapper.set(file.partition())));
if (fileDelete || inclusive.eval(file.partition())) {
ValidationException.check(
fileDelete || strict.eval(file.partition()) || metricsEvaluator.eval(file),
"Cannot delete file where some, but not all, rows match filter %s: %s",
this.deleteExpression, file.path());
hasDeletedFiles = true;
if (failAnyDelete) {
throw new DeleteException(writeSpec().partitionToPath(file.partition()));
}
break; // as soon as a deleted file is detected, stop scanning
}
}
if (!hasDeletedFiles) {
filteredManifests.put(manifest, manifest);
return manifest;
}
// when this point is reached, there is at least one file that will be deleted in the
// manifest. produce a copy of the manifest with all deleted files removed.
Set<CharSequenceWrapper> deletedPaths = Sets.newHashSet();
OutputFile filteredCopy = manifestPath(manifestCount.getAndIncrement());
ManifestWriter writer = new ManifestWriter(reader.spec(), filteredCopy, snapshotId());
try {
for (ManifestEntry entry : reader.entries()) {
DataFile file = entry.file();
boolean fileDelete = (deletePaths.contains(pathWrapper.set(file.path())) ||
dropPartitions.contains(partitionWrapper.set(file.partition())));
if (entry.status() != Status.DELETED) {
if (fileDelete || inclusive.eval(file.partition())) {
ValidationException.check(
fileDelete || strict.eval(file.partition()) || metricsEvaluator.eval(file),
"Cannot delete file where some, but not all, rows match filter %s: %s",
this.deleteExpression, file.path());
writer.delete(entry);
CharSequenceWrapper wrapper = CharSequenceWrapper.wrap(entry.file().path());
if (deletedPaths.contains(wrapper)) {
LOG.warn("Deleting a duplicate path from manifest {}: {}",
manifest.path(), wrapper.get());
}
deletedPaths.add(wrapper);
} else {
writer.addExisting(entry);
}
}
}
} finally {
writer.close();
}
// return the filtered manifest as a reader
ManifestFile filtered = writer.toManifestFile();
// update caches
filteredManifests.put(manifest, filtered);
filteredManifestToDeletedFiles.put(filtered, deletedPaths);
return filtered;
}
}
@SuppressWarnings("unchecked")
private Iterable<ManifestFile> mergeGroup(int specId, List<ManifestFile> group)
throws IOException {
// use a lookback of 1 to avoid reordering the manifests. using 1 also means this should pack
// from the end so that the manifest that gets under-filled is the first one, which will be
// merged the next time.
ListPacker<ManifestFile> packer = new ListPacker<>(manifestTargetSizeBytes, 1);
List<List<ManifestFile>> bins = packer.packEnd(group, manifest -> manifest.length());
// process bins in parallel, but put results in the order of the bins into an array to preserve
// the order of manifests and contents. preserving the order helps avoid random deletes when
// data files are eventually aged off.
List<ManifestFile>[] binResults = (List<ManifestFile>[])
Array.newInstance(List.class, bins.size());
Tasks.range(bins.size())
.stopOnFailure().throwFailureWhenFinished()
.executeWith(getWorkerPool())
.run(index -> {
List<ManifestFile> bin = bins.get(index);
List<ManifestFile> outputManifests = Lists.newArrayList();
binResults[index] = outputManifests;
if (bin.size() == 1) {
// no need to rewrite
outputManifests.add(bin.get(0));
return;
}
// if the bin has a new manifest (the new data files) then only merge it if the number of
// manifests is above the minimum count. this is applied only to bins with an in-memory
// manifest so that large manifests don't prevent merging older groups.
if (bin.contains(newManifest) && bin.size() < minManifestsCountToMerge) {
// not enough to merge, add all manifest files to the output list
outputManifests.addAll(bin);
} else {
// merge the group
outputManifests.add(createManifest(specId, bin));
}
}, IOException.class);
return Iterables.concat(binResults);
}
private ManifestFile createManifest(int specId, List<ManifestFile> bin) throws IOException {
// if this merge was already rewritten, use the existing file.
// if the new files are in this merge, then the ManifestFile for the new files has changed and
// will be a cache miss.
if (mergeManifests.containsKey(bin)) {
return mergeManifests.get(bin);
}
OutputFile out = manifestPath(manifestCount.getAndIncrement());
ManifestWriter writer = new ManifestWriter(ops.current().spec(specId), out, snapshotId());
try {
for (ManifestFile manifest : bin) {
try (ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest.path()))) {
for (ManifestEntry entry : reader.entries()) {
if (entry.status() == Status.DELETED) {
// suppress deletes from previous snapshots. only files deleted by this snapshot
// should be added to the new manifest
if (entry.snapshotId() == snapshotId()) {
writer.add(entry);
}
} else if (entry.status() == Status.ADDED && entry.snapshotId() == snapshotId()) {
// adds from this snapshot are still adds, otherwise they should be existing
writer.add(entry);
} else {
// add all files from the old manifest as existing files
writer.addExisting(entry);
}
}
}
}
} finally {
writer.close();
}
ManifestFile manifest = writer.toManifestFile();
// update the cache
mergeManifests.put(bin, manifest);
return manifest;
}
private ManifestFile newFilesAsManifest() throws IOException {
if (hasNewFiles && newManifest != null) {
deleteFile(newManifest.path());
newManifest = null;
}
if (newManifest == null) {
OutputFile out = manifestPath(manifestCount.getAndIncrement());
ManifestWriter writer = new ManifestWriter(spec, out, snapshotId());
try {
writer.addAll(newFiles);
} finally {
writer.close();
}
this.newManifest = writer.toManifestFile();
this.hasNewFiles = false;
}
return newManifest;
}
}
| 6,319 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseTableScan.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Sets;
import com.netflix.iceberg.TableMetadata.SnapshotLogEntry;
import com.netflix.iceberg.events.Listeners;
import com.netflix.iceberg.events.ScanEvent;
import com.netflix.iceberg.expressions.Binder;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Expressions;
import com.netflix.iceberg.expressions.InclusiveManifestEvaluator;
import com.netflix.iceberg.expressions.ResidualEvaluator;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.util.BinPacking;
import com.netflix.iceberg.util.ParallelIterable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.text.SimpleDateFormat;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
import static com.netflix.iceberg.util.ThreadPools.getPlannerPool;
import static com.netflix.iceberg.util.ThreadPools.getWorkerPool;
/**
* Base class for {@link TableScan} implementations.
*/
class BaseTableScan implements TableScan {
private static final Logger LOG = LoggerFactory.getLogger(TableScan.class);
private static final SimpleDateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
private static final List<String> SNAPSHOT_COLUMNS = ImmutableList.of(
"snapshot_id", "file_path", "file_ordinal", "file_format", "block_size_in_bytes",
"file_size_in_bytes", "record_count", "partition", "value_counts", "null_value_counts",
"lower_bounds", "upper_bounds"
);
private static final boolean PLAN_SCANS_WITH_WORKER_POOL =
SystemProperties.getBoolean(SystemProperties.SCAN_THREAD_POOL_ENABLED, true);
private final TableOperations ops;
private final Table table;
private final Long snapshotId;
private final Schema schema;
private final Expression rowFilter;
BaseTableScan(TableOperations ops, Table table) {
this(ops, table, null, table.schema(), Expressions.alwaysTrue());
}
private BaseTableScan(TableOperations ops, Table table, Long snapshotId, Schema schema, Expression rowFilter) {
this.ops = ops;
this.table = table;
this.snapshotId = snapshotId;
this.schema = schema;
this.rowFilter = rowFilter;
}
@Override
public Table table() {
return table;
}
@Override
public TableScan useSnapshot(long snapshotId) {
Preconditions.checkArgument(this.snapshotId == null,
"Cannot override snapshot, already set to id=%s", snapshotId);
Preconditions.checkArgument(ops.current().snapshot(snapshotId) != null,
"Cannot find snapshot with ID %s", snapshotId);
return new BaseTableScan(ops, table, snapshotId, schema, rowFilter);
}
@Override
public TableScan asOfTime(long timestampMillis) {
Preconditions.checkArgument(this.snapshotId == null,
"Cannot override snapshot, already set to id=%s", snapshotId);
Long lastSnapshotId = null;
for (SnapshotLogEntry logEntry : ops.current().snapshotLog()) {
if (logEntry.timestampMillis() <= timestampMillis) {
lastSnapshotId = logEntry.snapshotId();
}
}
// the snapshot ID could be null if no entries were older than the requested time. in that case,
// there is no valid snapshot to read.
Preconditions.checkArgument(lastSnapshotId != null,
"Cannot find a snapshot older than %s", DATE_FORMAT.format(new Date(timestampMillis)));
return useSnapshot(lastSnapshotId);
}
public TableScan project(Schema schema) {
return new BaseTableScan(ops, table, snapshotId, schema, rowFilter);
}
@Override
public TableScan select(Collection<String> columns) {
Set<Integer> requiredFieldIds = Sets.newHashSet();
// all of the filter columns are required
requiredFieldIds.addAll(
Binder.boundReferences(table.schema().asStruct(), Collections.singletonList(rowFilter)));
// all of the projection columns are required
requiredFieldIds.addAll(TypeUtil.getProjectedIds(table.schema().select(columns)));
Schema projection = TypeUtil.select(table.schema(), requiredFieldIds);
return new BaseTableScan(ops, table, snapshotId, projection, rowFilter);
}
@Override
public TableScan filter(Expression expr) {
return new BaseTableScan(ops, table, snapshotId, schema, Expressions.and(rowFilter, expr));
}
private final LoadingCache<Integer, InclusiveManifestEvaluator> EVAL_CACHE = CacheBuilder
.newBuilder()
.build(new CacheLoader<Integer, InclusiveManifestEvaluator>() {
@Override
public InclusiveManifestEvaluator load(Integer specId) {
PartitionSpec spec = ops.current().spec(specId);
return new InclusiveManifestEvaluator(spec, rowFilter);
}
});
@Override
public CloseableIterable<FileScanTask> planFiles() {
Snapshot snapshot = snapshotId != null ?
ops.current().snapshot(snapshotId) :
ops.current().currentSnapshot();
if (snapshot != null) {
LOG.info("Scanning table {} snapshot {} created at {} with filter {}", table,
snapshot.snapshotId(), DATE_FORMAT.format(new Date(snapshot.timestampMillis())),
rowFilter);
Listeners.notifyAll(
new ScanEvent(table.toString(), snapshot.snapshotId(), rowFilter, schema));
Iterable<ManifestFile> matchingManifests = Iterables.filter(snapshot.manifests(),
manifest -> EVAL_CACHE.getUnchecked(manifest.partitionSpecId()).eval(manifest));
ConcurrentLinkedQueue<Closeable> toClose = new ConcurrentLinkedQueue<>();
Iterable<Iterable<FileScanTask>> readers = Iterables.transform(
matchingManifests,
manifest -> {
ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest.path()));
toClose.add(reader);
String schemaString = SchemaParser.toJson(reader.spec().schema());
String specString = PartitionSpecParser.toJson(reader.spec());
ResidualEvaluator residuals = new ResidualEvaluator(reader.spec(), rowFilter);
return Iterables.transform(
reader.filterRows(rowFilter).select(SNAPSHOT_COLUMNS),
file -> new BaseFileScanTask(file, schemaString, specString, residuals)
);
});
if (PLAN_SCANS_WITH_WORKER_POOL && snapshot.manifests().size() > 1) {
return CloseableIterable.combine(
new ParallelIterable<>(readers, getPlannerPool(), getWorkerPool()),
toClose);
} else {
return CloseableIterable.combine(Iterables.concat(readers), toClose);
}
} else {
LOG.info("Scanning empty table {}", table);
return CloseableIterable.empty();
}
}
@Override
public CloseableIterable<CombinedScanTask> planTasks() {
long splitSize = ops.current().propertyAsLong(
TableProperties.SPLIT_SIZE, TableProperties.SPLIT_SIZE_DEFAULT);
int lookback = ops.current().propertyAsInt(
TableProperties.SPLIT_LOOKBACK, TableProperties.SPLIT_LOOKBACK_DEFAULT);
return CloseableIterable.transform(
CloseableIterable.wrap(planFiles(), files ->
new BinPacking.PackingIterable<>(files, splitSize, lookback, FileScanTask::length)),
BaseCombinedScanTask::new);
}
@Override
public Schema schema() {
return schema;
}
@Override
public Expression filter() {
return rowFilter;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("table", table)
.add("projection", schema.asStruct())
.add("filter", rowFilter)
.toString();
}
}
| 6,320 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/FileHistory.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.expressions.Literal;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.CharSequenceWrapper;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import static com.google.common.collect.Iterables.concat;
import static com.google.common.collect.Iterables.transform;
public class FileHistory {
private static final List<String> HISTORY_COLUMNS = ImmutableList.of("file_path");
private FileHistory() {
}
public static Builder table(Table table) {
return new Builder(table);
}
public static class Builder {
private final Table table;
private final Set<CharSequenceWrapper> locations = Sets.newHashSet();
private Long startTime = null;
private Long endTime = null;
public Builder(Table table) {
this.table = table;
}
public Builder location(String location) {
locations.add(CharSequenceWrapper.wrap(location));
return this;
}
public Builder after(String timestamp) {
Literal<Long> tsLiteral = Literal.of(timestamp).to(Types.TimestampType.withoutZone());
this.startTime = tsLiteral.value() / 1000;
return this;
}
public Builder after(long timestampMillis) {
this.startTime = timestampMillis;
return this;
}
public Builder before(String timestamp) {
Literal<Long> tsLiteral = Literal.of(timestamp).to(Types.TimestampType.withoutZone());
this.endTime = tsLiteral.value() / 1000;
return this;
}
public Builder before(long timestampMillis) {
this.endTime = timestampMillis;
return this;
}
@SuppressWarnings("unchecked")
public Iterable<ManifestEntry> build() {
Iterable<Snapshot> snapshots = table.snapshots();
if (startTime != null) {
snapshots = Iterables.filter(snapshots, snap -> snap.timestampMillis() >= startTime);
}
if (endTime != null) {
snapshots = Iterables.filter(snapshots, snap -> snap.timestampMillis() <= endTime);
}
// only use manifests that were added in the matching snapshots
Set<Long> matchingIds = Sets.newHashSet(transform(snapshots, snap -> snap.snapshotId()));
Iterable<ManifestFile> manifests = Iterables.filter(
concat(transform(snapshots, Snapshot::manifests)),
manifest -> manifest.snapshotId() == null || matchingIds.contains(manifest.snapshotId()));
// a manifest group will only read each manifest once
ManifestGroup group = new ManifestGroup(((HasTableOperations) table).operations(), manifests);
List<ManifestEntry> results = Lists.newArrayList();
try (CloseableIterable<ManifestEntry> entries = group.select(HISTORY_COLUMNS).entries()) {
// TODO: replace this with an IN predicate
CharSequenceWrapper locationWrapper = CharSequenceWrapper.wrap(null);
for (ManifestEntry entry : entries) {
if (entry != null && locations.contains(locationWrapper.set(entry.file().path()))) {
results.add(entry.copy());
}
}
} catch (IOException e) {
throw new RuntimeIOException(e);
}
return results;
}
}
}
| 6,321 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/FileIO.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import java.io.Serializable;
/**
* Pluggable module for reading, writing, and deleting files.
* <p>
* Both table metadata files and data files can be written and read by this module. Implementations
* must be serializable because various clients of Spark tables may initialize this once and pass
* it off to a separate module that would then interact with the streams.
*/
public interface FileIO extends Serializable {
/**
* Get a {@link InputFile} instance to read bytes from the file at the given path.
*/
InputFile newInputFile(String path);
/**
* Get a {@link OutputFile} instance to write bytes to the file at the given path.
*/
OutputFile newOutputFile(String path);
/**
* Delete the file at the given path.
*/
void deleteFile(String path);
}
| 6,322 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ManifestWriter.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.netflix.iceberg.avro.Avro;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.FileAppender;
import com.netflix.iceberg.io.OutputFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import static com.netflix.iceberg.ManifestEntry.Status.DELETED;
/**
* Writer for manifest files.
*/
class ManifestWriter implements FileAppender<DataFile> {
private static final Logger LOG = LoggerFactory.getLogger(ManifestWriter.class);
private final String location;
private final OutputFile file;
private final int specId;
private final FileAppender<ManifestEntry> writer;
private final long snapshotId;
private final ManifestEntry reused;
private final PartitionSummary stats;
private boolean closed = false;
private int addedFiles = 0;
private int existingFiles = 0;
private int deletedFiles = 0;
ManifestWriter(PartitionSpec spec, OutputFile file, long snapshotId) {
this.location = file.location();
this.file = file;
this.specId = spec.specId();
this.writer = newAppender(FileFormat.AVRO, spec, file);
this.snapshotId = snapshotId;
this.reused = new ManifestEntry(spec.partitionType());
this.stats = new PartitionSummary(spec);
}
public void addExisting(Iterable<ManifestEntry> entries) {
for (ManifestEntry entry : entries) {
if (entry.status() != DELETED) {
addExisting(entry);
}
}
}
public void addExisting(ManifestEntry entry) {
add(reused.wrapExisting(entry.snapshotId(), entry.file()));
}
public void addExisting(long snapshotId, DataFile file) {
add(reused.wrapExisting(snapshotId, file));
}
public void delete(ManifestEntry entry) {
// Use the current Snapshot ID for the delete. It is safe to delete the data file from disk
// when this Snapshot has been removed or when there are no Snapshots older than this one.
add(reused.wrapDelete(snapshotId, entry.file()));
}
public void delete(DataFile file) {
add(reused.wrapDelete(snapshotId, file));
}
public void add(ManifestEntry entry) {
switch (entry.status()) {
case ADDED:
addedFiles += 1;
break;
case EXISTING:
existingFiles += 1;
break;
case DELETED:
deletedFiles += 1;
break;
}
stats.update(entry.file().partition());
writer.add(entry);
}
public void addEntries(Iterable<ManifestEntry> entries) {
for (ManifestEntry entry : entries) {
add(entry);
}
}
@Override
public void add(DataFile file) {
// TODO: this assumes that file is a GenericDataFile that can be written directly to Avro
// Eventually, this should check in case there are other DataFile implementations.
add(reused.wrapAppend(snapshotId, file));
}
@Override
public Metrics metrics() {
return writer.metrics();
}
public ManifestFile toManifestFile() {
Preconditions.checkState(closed, "Cannot build ManifestFile, writer is not closed");
return new GenericManifestFile(location, file.toInputFile().getLength(), specId, snapshotId,
addedFiles, existingFiles, deletedFiles, stats.summaries());
}
@Override
public void close() throws IOException {
this.closed = true;
writer.close();
}
private static <D> FileAppender<D> newAppender(FileFormat format, PartitionSpec spec,
OutputFile file) {
Schema manifestSchema = ManifestEntry.getSchema(spec.partitionType());
try {
switch (format) {
case AVRO:
return Avro.write(file)
.schema(manifestSchema)
.named("manifest_entry")
.meta("schema", SchemaParser.toJson(spec.schema()))
.meta("partition-spec", PartitionSpecParser.toJsonFields(spec))
.meta("partition-spec-id", String.valueOf(spec.specId()))
.build();
default:
throw new IllegalArgumentException("Unsupported format: " + format);
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to create manifest writer for path: " + file);
}
}
}
| 6,323 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/FastAppend.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Lists;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.OutputFile;
import java.io.IOException;
import java.util.List;
import java.util.Set;
/**
* {@link AppendFiles Append} implementation that adds a new manifest file for the write.
* <p>
* This implementation will attempt to commit 5 times before throwing {@link CommitFailedException}.
*/
class FastAppend extends SnapshotUpdate implements AppendFiles {
private final PartitionSpec spec;
private final List<DataFile> newFiles = Lists.newArrayList();
private ManifestFile newManifest = null;
private boolean hasNewFiles = false;
FastAppend(TableOperations ops) {
super(ops);
this.spec = ops.current().spec();
}
@Override
public FastAppend appendFile(DataFile file) {
this.hasNewFiles = true;
newFiles.add(file);
return this;
}
@Override
public List<ManifestFile> apply(TableMetadata base) {
List<ManifestFile> newManifests = Lists.newArrayList();
try {
newManifests.add(writeManifest());
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write manifest");
}
if (base.currentSnapshot() != null) {
newManifests.addAll(base.currentSnapshot().manifests());
}
return newManifests;
}
@Override
protected void cleanUncommitted(Set<ManifestFile> committed) {
if (!committed.contains(newManifest)) {
deleteFile(newManifest.path());
}
}
private ManifestFile writeManifest() throws IOException {
if (hasNewFiles && newManifest != null) {
deleteFile(newManifest.path());
newManifest = null;
}
if (newManifest == null) {
OutputFile out = manifestPath(0);
ManifestWriter writer = new ManifestWriter(spec, out, snapshotId());
try {
writer.addAll(newFiles);
} finally {
writer.close();
}
this.newManifest = writer.toManifestFile();
hasNewFiles = false;
}
return newManifest;
}
}
| 6,324 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/GenericPartitionFieldSummary.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.netflix.iceberg.ManifestFile.PartitionFieldSummary;
import com.netflix.iceberg.avro.AvroSchemaUtil;
import com.netflix.iceberg.types.Types;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData.SchemaConstructable;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.List;
public class GenericPartitionFieldSummary
implements PartitionFieldSummary, StructLike, IndexedRecord, SchemaConstructable, Serializable {
private static final Schema AVRO_SCHEMA = AvroSchemaUtil.convert(PartitionFieldSummary.getType());
private transient Schema avroSchema; // not final for Java serialization
private int[] fromProjectionPos;
// data fields
private boolean containsNull = false;
private ByteBuffer lowerBound = null;
private ByteBuffer upperBound = null;
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
public GenericPartitionFieldSummary(Schema avroSchema) {
this.avroSchema = avroSchema;
List<Types.NestedField> fields = AvroSchemaUtil.convert(avroSchema)
.asNestedType()
.asStructType()
.fields();
List<Types.NestedField> allFields = PartitionFieldSummary.getType().fields();
this.fromProjectionPos = new int[fields.size()];
for (int i = 0; i < fromProjectionPos.length; i += 1) {
boolean found = false;
for (int j = 0; j < allFields.size(); j += 1) {
if (fields.get(i).fieldId() == allFields.get(j).fieldId()) {
found = true;
fromProjectionPos[i] = j;
}
}
if (!found) {
throw new IllegalArgumentException("Cannot find projected field: " + fields.get(i));
}
}
}
public GenericPartitionFieldSummary(boolean containsNull, ByteBuffer lowerBound,
ByteBuffer upperBound) {
this.avroSchema = AVRO_SCHEMA;
this.containsNull = containsNull;
this.lowerBound = lowerBound;
this.upperBound = upperBound;
this.fromProjectionPos = null;
}
/**
* Copy constructor.
*
* @param toCopy a generic manifest file to copy.
*/
private GenericPartitionFieldSummary(GenericPartitionFieldSummary toCopy) {
this.avroSchema = toCopy.avroSchema;
this.containsNull = toCopy.containsNull;
this.lowerBound = toCopy.lowerBound;
this.upperBound = toCopy.upperBound;
this.fromProjectionPos = toCopy.fromProjectionPos;
}
/**
* Constructor for Java serialization.
*/
GenericPartitionFieldSummary() {
}
@Override
public boolean containsNull() {
return containsNull;
}
@Override
public ByteBuffer lowerBound() {
return lowerBound;
}
@Override
public ByteBuffer upperBound() {
return upperBound;
}
@Override
public int size() {
return PartitionFieldSummary.getType().fields().size();
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(get(pos));
}
@Override
public void put(int i, Object v) {
set(i, v);
}
@Override
public Object get(int i) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
return containsNull;
case 1:
return lowerBound;
case 2:
return upperBound;
default:
throw new UnsupportedOperationException("Unknown field ordinal: " + pos);
}
}
@Override
@SuppressWarnings("unchecked")
public <T> void set(int i, T value) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
this.containsNull = (Boolean) value;
return;
case 1:
this.lowerBound = (ByteBuffer) value;
return;
case 2:
this.upperBound = (ByteBuffer) value;
return;
default:
// ignore the object, it must be from a newer version of the format
}
}
@Override
public PartitionFieldSummary copy() {
return new GenericPartitionFieldSummary(this);
}
@Override
public Schema getSchema() {
return avroSchema;
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("contains_null", containsNull)
.add("lower_bound", lowerBound)
.add("upper_bound", upperBound)
.toString();
}
}
| 6,325 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/SystemProperties.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
/**
* Configuration properties that are controlled by Java system properties.
*/
public class SystemProperties {
/**
* Sets the size of the planner pool. The planner pool limits the number of concurrent planning
* operations in the base table implementation.
*/
public static final String PLANNER_THREAD_POOL_SIZE_PROP = "iceberg.planner.num-threads";
/**
* Sets the size of the worker pool. The worker pool limits the number of tasks concurrently
* processing manifests in the base table implementation across all concurrent planning or commit
* operations.
*/
public static final String WORKER_THREAD_POOL_SIZE_PROP = "iceberg.worker.num-threads";
/**
* Whether to use the shared worker pool when planning table scans.
*/
public static final String SCAN_THREAD_POOL_ENABLED = "iceberg.scan.plan-in-worker-pool";
static boolean getBoolean(String systemProperty, boolean defaultValue) {
String value = System.getProperty(systemProperty);
if (value != null) {
return Boolean.parseBoolean(value);
}
return defaultValue;
}
}
| 6,326 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/TableMetadataParser.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.TableMetadata.SnapshotLogEntry;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.util.JsonUtil;
import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
import org.apache.commons.compress.compressors.gzip.GzipCompressorOutputStream;
import org.apache.hadoop.conf.Configuration;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.io.StringWriter;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.SortedSet;
public class TableMetadataParser {
// visible for testing
static final String FORMAT_VERSION = "format-version";
static final String LOCATION = "location";
static final String LAST_UPDATED_MILLIS = "last-updated-ms";
static final String LAST_COLUMN_ID = "last-column-id";
static final String SCHEMA = "schema";
static final String PARTITION_SPEC = "partition-spec";
static final String PARTITION_SPECS = "partition-specs";
static final String DEFAULT_SPEC_ID = "default-spec-id";
static final String PROPERTIES = "properties";
static final String CURRENT_SNAPSHOT_ID = "current-snapshot-id";
static final String SNAPSHOTS = "snapshots";
static final String SNAPSHOT_ID = "snapshot-id";
static final String TIMESTAMP_MS = "timestamp-ms";
static final String SNAPSHOT_LOG = "snapshot-log";
public static String toJson(TableMetadata metadata) {
StringWriter writer = new StringWriter();
try {
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
toJson(metadata, generator);
generator.flush();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write json for: %s", metadata);
}
return writer.toString();
}
public static void write(TableMetadata metadata, OutputFile outputFile) {
try (OutputStreamWriter writer = new OutputStreamWriter(
outputFile.location().endsWith(".gz") ?
new GzipCompressorOutputStream(outputFile.create()) :
outputFile.create())) {
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
generator.useDefaultPrettyPrinter();
toJson(metadata, generator);
generator.flush();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write json to file: %s", outputFile);
}
}
public static String getFileExtension(Configuration configuration) {
return ConfigProperties.shouldCompress(configuration) ? ".metadata.json.gz" : ".metadata.json";
}
private static void toJson(TableMetadata metadata, JsonGenerator generator) throws IOException {
generator.writeStartObject();
generator.writeNumberField(FORMAT_VERSION, TableMetadata.TABLE_FORMAT_VERSION);
generator.writeStringField(LOCATION, metadata.location());
generator.writeNumberField(LAST_UPDATED_MILLIS, metadata.lastUpdatedMillis());
generator.writeNumberField(LAST_COLUMN_ID, metadata.lastColumnId());
generator.writeFieldName(SCHEMA);
SchemaParser.toJson(metadata.schema(), generator);
// for older readers, continue writing the default spec as "partition-spec"
generator.writeFieldName(PARTITION_SPEC);
PartitionSpecParser.toJsonFields(metadata.spec(), generator);
// write the default spec ID and spec list
generator.writeNumberField(DEFAULT_SPEC_ID, metadata.defaultSpecId());
generator.writeArrayFieldStart(PARTITION_SPECS);
for (PartitionSpec spec : metadata.specs()) {
PartitionSpecParser.toJson(spec, generator);
}
generator.writeEndArray();
generator.writeObjectFieldStart(PROPERTIES);
for (Map.Entry<String, String> keyValue : metadata.properties().entrySet()) {
generator.writeStringField(keyValue.getKey(), keyValue.getValue());
}
generator.writeEndObject();
generator.writeNumberField(CURRENT_SNAPSHOT_ID,
metadata.currentSnapshot() != null ? metadata.currentSnapshot().snapshotId() : -1);
generator.writeArrayFieldStart(SNAPSHOTS);
for (Snapshot snapshot : metadata.snapshots()) {
SnapshotParser.toJson(snapshot, generator);
}
generator.writeEndArray();
generator.writeArrayFieldStart(SNAPSHOT_LOG);
for (SnapshotLogEntry logEntry : metadata.snapshotLog()) {
generator.writeStartObject();
generator.writeNumberField(TIMESTAMP_MS, logEntry.timestampMillis());
generator.writeNumberField(SNAPSHOT_ID, logEntry.snapshotId());
generator.writeEndObject();
}
generator.writeEndArray();
generator.writeEndObject();
}
public static TableMetadata read(TableOperations ops, InputFile file) {
try {
InputStream is = file.location().endsWith("gz") ? new GzipCompressorInputStream(file.newStream()): file.newStream();
return fromJson(ops, file, JsonUtil.mapper().readValue(is, JsonNode.class));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read file: %s", file);
}
}
static TableMetadata fromJson(TableOperations ops, InputFile file, JsonNode node) {
Preconditions.checkArgument(node.isObject(),
"Cannot parse metadata from a non-object: %s", node);
int formatVersion = JsonUtil.getInt(FORMAT_VERSION, node);
Preconditions.checkArgument(formatVersion == TableMetadata.TABLE_FORMAT_VERSION,
"Cannot read unsupported version %d", formatVersion);
String location = JsonUtil.getString(LOCATION, node);
int lastAssignedColumnId = JsonUtil.getInt(LAST_COLUMN_ID, node);
Schema schema = SchemaParser.fromJson(node.get(SCHEMA));
JsonNode specArray = node.get(PARTITION_SPECS);
List<PartitionSpec> specs;
int defaultSpecId;
if (specArray != null) {
Preconditions.checkArgument(specArray.isArray(),
"Cannot parse partition specs from non-array: %s", specArray);
// default spec ID is required when the spec array is present
defaultSpecId = JsonUtil.getInt(DEFAULT_SPEC_ID, node);
// parse the spec array
ImmutableList.Builder<PartitionSpec> builder = ImmutableList.builder();
for (JsonNode spec : specArray) {
builder.add(PartitionSpecParser.fromJson(schema, spec));
}
specs = builder.build();
} else {
// partition spec is required for older readers, but is always set to the default if the spec
// array is set. it is only used to default the spec map is missing, indicating that the
// table metadata was written by an older writer.
defaultSpecId = TableMetadata.INITIAL_SPEC_ID;
specs = ImmutableList.of(PartitionSpecParser.fromJsonFields(
schema, TableMetadata.INITIAL_SPEC_ID, node.get(PARTITION_SPEC)));
}
Map<String, String> properties = JsonUtil.getStringMap(PROPERTIES, node);
long currentVersionId = JsonUtil.getLong(CURRENT_SNAPSHOT_ID, node);
long lastUpdatedMillis = JsonUtil.getLong(LAST_UPDATED_MILLIS, node);
JsonNode snapshotArray = node.get(SNAPSHOTS);
Preconditions.checkArgument(snapshotArray.isArray(),
"Cannot parse snapshots from non-array: %s", snapshotArray);
List<Snapshot> snapshots = Lists.newArrayListWithExpectedSize(snapshotArray.size());
Iterator<JsonNode> iterator = snapshotArray.elements();
while (iterator.hasNext()) {
snapshots.add(SnapshotParser.fromJson(ops, iterator.next()));
}
SortedSet<SnapshotLogEntry> entries =
Sets.newTreeSet(Comparator.comparingLong(SnapshotLogEntry::timestampMillis));
if (node.has(SNAPSHOT_LOG)) {
Iterator<JsonNode> logIterator = node.get(SNAPSHOT_LOG).elements();
while (logIterator.hasNext()) {
JsonNode entryNode = logIterator.next();
entries.add(new SnapshotLogEntry(
JsonUtil.getLong(TIMESTAMP_MS, entryNode), JsonUtil.getLong(SNAPSHOT_ID, entryNode)));
}
}
return new TableMetadata(ops, file, location,
lastUpdatedMillis, lastAssignedColumnId, schema, defaultSpecId, specs, properties,
currentVersionId, snapshots, ImmutableList.copyOf(entries.iterator()));
}
}
| 6,327 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/PropertiesUpdate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.util.Tasks;
import java.util.Map;
import java.util.Set;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT;
class PropertiesUpdate implements UpdateProperties {
private final TableOperations ops;
private final Map<String, String> updates = Maps.newHashMap();
private final Set<String> removals = Sets.newHashSet();
private TableMetadata base;
PropertiesUpdate(TableOperations ops) {
this.ops = ops;
this.base = ops.current();
}
@Override
public UpdateProperties set(String key, String value) {
Preconditions.checkNotNull(key, "Key cannot be null");
Preconditions.checkNotNull(key, "Value cannot be null");
Preconditions.checkArgument(!removals.contains(key),
"Cannot remove and update the same key: %s", key);
updates.put(key, value);
return this;
}
@Override
public UpdateProperties remove(String key) {
Preconditions.checkNotNull(key, "Key cannot be null");
Preconditions.checkArgument(!updates.keySet().contains(key),
"Cannot remove and update the same key: %s", key);
removals.add(key);
return this;
}
@Override
public UpdateProperties defaultFormat(FileFormat format) {
set(TableProperties.DEFAULT_FILE_FORMAT, format.name());
return this;
}
@Override
public Map<String, String> apply() {
this.base = ops.refresh();
Map<String, String> newProperties = Maps.newHashMap();
for (Map.Entry<String, String> entry : base.properties().entrySet()) {
if (!removals.contains(entry.getKey())) {
newProperties.put(entry.getKey(), entry.getValue());
}
}
newProperties.putAll(updates);
return newProperties;
}
@Override
public void commit() {
Tasks.foreach(ops)
.retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */ )
.onlyRetryOn(CommitFailedException.class)
.run(ops -> {
Map<String, String> newProperties = apply();
TableMetadata updated = base.replaceProperties(newProperties);
ops.commit(base, updated);
});
}
}
| 6,328 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseTable.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import java.util.Map;
/**
* Base {@link Table} implementation.
* <p>
* This can be extended by providing a {@link TableOperations} to the constructor.
*/
public class BaseTable implements Table, HasTableOperations {
private final TableOperations ops;
private final String name;
public BaseTable(TableOperations ops, String name) {
this.ops = ops;
this.name = name;
}
@Override
public TableOperations operations() {
return ops;
}
@Override
public void refresh() {
ops.refresh();
}
@Override
public TableScan newScan() {
return new BaseTableScan(ops, this);
}
@Override
public Schema schema() {
return ops.current().schema();
}
@Override
public PartitionSpec spec() {
return ops.current().spec();
}
@Override
public Map<String, String> properties() {
return ops.current().properties();
}
@Override
public String location() {
return ops.current().location();
}
@Override
public Snapshot currentSnapshot() {
return ops.current().currentSnapshot();
}
@Override
public Iterable<Snapshot> snapshots() {
return ops.current().snapshots();
}
@Override
public UpdateSchema updateSchema() {
return new SchemaUpdate(ops);
}
@Override
public UpdateProperties updateProperties() {
return new PropertiesUpdate(ops);
}
@Override
public AppendFiles newAppend() {
return new MergeAppend(ops);
}
@Override
public AppendFiles newFastAppend() {
return new FastAppend(ops);
}
@Override
public RewriteFiles newRewrite() {
return new ReplaceFiles(ops);
}
@Override
public OverwriteFiles newOverwrite() {
return new OverwriteData(ops);
}
@Override
public ReplacePartitions newReplacePartitions() {
return new ReplacePartitionsOperation(ops);
}
@Override
public DeleteFiles newDelete() {
return new StreamingDelete(ops);
}
@Override
public ExpireSnapshots expireSnapshots() {
return new RemoveSnapshots(ops);
}
@Override
public Rollback rollback() {
return new RollbackToSnapshot(ops);
}
@Override
public Transaction newTransaction() {
return BaseTransaction.newTransaction(ops);
}
@Override
public String toString() {
return name;
}
}
| 6,329 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseFileScanTask.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.ResidualEvaluator;
class BaseFileScanTask implements FileScanTask {
private final DataFile file;
private final String schemaString;
private final String specString;
private final ResidualEvaluator residuals;
private transient PartitionSpec spec = null;
BaseFileScanTask(DataFile file, String schemaString, String specString, ResidualEvaluator residuals) {
this.file = file;
this.schemaString = schemaString;
this.specString = specString;
this.residuals = residuals;
}
@Override
public DataFile file() {
return file;
}
@Override
public PartitionSpec spec() {
if (spec == null) {
this.spec = PartitionSpecParser.fromJson(SchemaParser.fromJson(schemaString), specString);
}
return spec;
}
@Override
public long start() {
return 0;
}
@Override
public long length() {
return file.fileSizeInBytes();
}
@Override
public Expression residual() {
return residuals.residualFor(file.partition());
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("file", file.path())
.add("partition_data", file.partition())
.add("residual", residual())
.toString();
}
}
| 6,330 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/PartitionSummary.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Lists;
import com.netflix.iceberg.ManifestFile.PartitionFieldSummary;
import com.netflix.iceberg.types.Comparators;
import com.netflix.iceberg.types.Conversions;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
class PartitionSummary {
private final PartitionFieldStats<?>[] fields;
private final Class<?>[] javaClasses;
PartitionSummary(PartitionSpec spec) {
this.javaClasses = spec.javaClasses();
this.fields = new PartitionFieldStats[javaClasses.length];
List<Types.NestedField> partitionFields = spec.partitionType().fields();
for (int i = 0; i < fields.length; i += 1) {
this.fields[i] = new PartitionFieldStats<>(partitionFields.get(i).type());
}
}
List<PartitionFieldSummary> summaries() {
return Lists.transform(Arrays.asList(fields), PartitionFieldStats::toSummary);
}
public void update(StructLike partitionKey) {
updateFields(partitionKey);
}
@SuppressWarnings("unchecked")
private <T> void updateFields(StructLike key) {
for (int i = 0; i < javaClasses.length; i += 1) {
PartitionFieldStats<T> stats = (PartitionFieldStats<T>) fields[i];
Class<T> javaClass = (Class<T>) javaClasses[i];
stats.update(key.get(i, javaClass));
}
}
private static class PartitionFieldStats<T> {
private final Type type;
private final Comparator<T> comparator;
private boolean containsNull = false;
private T min = null;
private T max = null;
private PartitionFieldStats(Type type) {
this.type = type;
this.comparator = Comparators.forType(type.asPrimitiveType());
}
public PartitionFieldSummary toSummary() {
return new GenericPartitionFieldSummary(containsNull,
min != null ? Conversions.toByteBuffer(type, min) : null,
max != null ? Conversions.toByteBuffer(type, max) : null);
}
void update(T value) {
if (value == null) {
this.containsNull = true;
} else if (min == null) {
this.min = value;
this.max = value;
} else {
if (comparator.compare(value, min) < 0) {
this.min = value;
}
if (comparator.compare(max, value) < 0) {
this.max = value;
}
}
}
}
}
| 6,331 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/GenericManifestFile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.netflix.iceberg.avro.AvroSchemaUtil;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.types.Types;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData.SchemaConstructable;
import java.io.Serializable;
import java.util.List;
import static com.google.common.collect.ImmutableList.copyOf;
import static com.google.common.collect.Iterables.transform;
public class GenericManifestFile
implements ManifestFile, StructLike, IndexedRecord, SchemaConstructable, Serializable {
private static final Schema AVRO_SCHEMA = AvroSchemaUtil.convert(
ManifestFile.schema(), "manifest_file");
private transient Schema avroSchema; // not final for Java serialization
private int[] fromProjectionPos;
// data fields
private InputFile file = null;
private String manifestPath = null;
private Long length = null;
private int specId = -1;
private Long snapshotId = null;
private Integer addedFilesCount = null;
private Integer existingFilesCount = null;
private Integer deletedFilesCount = null;
private List<PartitionFieldSummary> partitions = null;
/**
* Used by Avro reflection to instantiate this class when reading manifest files.
*/
public GenericManifestFile(org.apache.avro.Schema avroSchema) {
this.avroSchema = avroSchema;
List<Types.NestedField> fields = AvroSchemaUtil.convert(avroSchema)
.asNestedType()
.asStructType()
.fields();
List<Types.NestedField> allFields = ManifestFile.schema().asStruct().fields();
this.fromProjectionPos = new int[fields.size()];
for (int i = 0; i < fromProjectionPos.length; i += 1) {
boolean found = false;
for (int j = 0; j < allFields.size(); j += 1) {
if (fields.get(i).fieldId() == allFields.get(j).fieldId()) {
found = true;
fromProjectionPos[i] = j;
}
}
if (!found) {
throw new IllegalArgumentException("Cannot find projected field: " + fields.get(i));
}
}
}
GenericManifestFile(InputFile file, int specId) {
this.avroSchema = AVRO_SCHEMA;
this.file = file;
this.manifestPath = file.location();
this.length = null; // lazily loaded from file
this.specId = specId;
this.snapshotId = null;
this.addedFilesCount = null;
this.existingFilesCount = null;
this.deletedFilesCount = null;
this.partitions = null;
this.fromProjectionPos = null;
}
public GenericManifestFile(String path, long length, int specId, long snapshotId,
int addedFilesCount, int existingFilesCount, int deletedFilesCount,
List<PartitionFieldSummary> partitions) {
this.avroSchema = AVRO_SCHEMA;
this.manifestPath = path;
this.length = length;
this.specId = specId;
this.snapshotId = snapshotId;
this.addedFilesCount = addedFilesCount;
this.existingFilesCount = existingFilesCount;
this.deletedFilesCount = deletedFilesCount;
this.partitions = partitions;
this.fromProjectionPos = null;
}
/**
* Copy constructor.
*
* @param toCopy a generic manifest file to copy.
*/
private GenericManifestFile(GenericManifestFile toCopy) {
this.avroSchema = toCopy.avroSchema;
this.manifestPath = toCopy.manifestPath;
this.length = toCopy.length;
this.specId = toCopy.specId;
this.snapshotId = toCopy.snapshotId;
this.addedFilesCount = toCopy.addedFilesCount;
this.existingFilesCount = toCopy.existingFilesCount;
this.deletedFilesCount = toCopy.deletedFilesCount;
this.partitions = copyOf(transform(toCopy.partitions, PartitionFieldSummary::copy));
this.fromProjectionPos = toCopy.fromProjectionPos;
}
/**
* Constructor for Java serialization.
*/
GenericManifestFile() {
}
@Override
public String path() {
return manifestPath;
}
public Long lazyLength() {
if (length == null) {
if (file != null) {
// this was created from an input file and length is lazily loaded
this.length = file.getLength();
} else {
// this was loaded from a file without projecting length, throw an exception
return null;
}
}
return length;
}
@Override
public long length() {
return lazyLength();
}
@Override
public int partitionSpecId() {
return specId;
}
@Override
public Long snapshotId() {
return snapshotId;
}
@Override
public Integer addedFilesCount() {
return addedFilesCount;
}
@Override
public Integer existingFilesCount() {
return existingFilesCount;
}
@Override
public Integer deletedFilesCount() {
return deletedFilesCount;
}
@Override
public List<PartitionFieldSummary> partitions() {
return partitions;
}
@Override
public int size() {
return ManifestFile.schema().columns().size();
}
@Override
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(get(pos));
}
@Override
public void put(int i, Object v) {
set(i, v);
}
@Override
public Object get(int i) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
return manifestPath;
case 1:
return lazyLength();
case 2:
return specId;
case 3:
return snapshotId;
case 4:
return addedFilesCount;
case 5:
return existingFilesCount;
case 6:
return deletedFilesCount;
case 7:
return partitions;
default:
throw new UnsupportedOperationException("Unknown field ordinal: " + pos);
}
}
@Override
@SuppressWarnings("unchecked")
public <T> void set(int i, T value) {
int pos = i;
// if the schema was projected, map the incoming ordinal to the expected one
if (fromProjectionPos != null) {
pos = fromProjectionPos[i];
}
switch (pos) {
case 0:
// always coerce to String for Serializable
this.manifestPath = value.toString();
return;
case 1:
this.length = (Long) value;
return;
case 2:
this.specId = (Integer) value;
return;
case 3:
this.snapshotId = (Long) value;
return;
case 4:
this.addedFilesCount = (Integer) value;
return;
case 5:
this.existingFilesCount = (Integer) value;
return;
case 6:
this.deletedFilesCount = (Integer) value;
return;
case 7:
this.partitions = (List<PartitionFieldSummary>) value;
return;
default:
// ignore the object, it must be from a newer version of the format
}
}
@Override
public ManifestFile copy() {
return new GenericManifestFile(this);
}
@Override
public Schema getSchema() {
return avroSchema;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
GenericManifestFile that = (GenericManifestFile) other;
return Objects.equal(manifestPath, that.manifestPath);
}
@Override
public int hashCode() {
return Objects.hashCode(manifestPath);
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("path", manifestPath)
.add("length", length)
.add("partition_spec_id", specId)
.add("added_snapshot_id", snapshotId)
.add("added_data_files_count", addedFilesCount)
.add("existing_data_files_count", existingFilesCount)
.add("deleted_data_files_count", deletedFilesCount)
.add("partitions", partitions)
.toString();
}
}
| 6,332 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ManifestReader.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.netflix.iceberg.avro.Avro;
import com.netflix.iceberg.avro.AvroIterable;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Projections;
import com.netflix.iceberg.io.CloseableGroup;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.types.Types;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import static com.netflix.iceberg.ManifestEntry.Status.DELETED;
import static com.netflix.iceberg.expressions.Expressions.alwaysTrue;
/**
* Reader for manifest files.
* <p>
* Readers are created using the builder from {@link #read(InputFile)}.
*/
public class ManifestReader extends CloseableGroup implements Filterable<FilteredManifest> {
private static final Logger LOG = LoggerFactory.getLogger(ManifestReader.class);
private static final List<String> ALL_COLUMNS = Lists.newArrayList("*");
private static final List<String> CHANGE_COLUNNS = Lists.newArrayList(
"file_path", "file_format", "partition", "record_count", "file_size_in_bytes");
/**
* Returns a new {@link ManifestReader} for an {@link InputFile}.
*
* @param file an InputFile
* @return a manifest reader
*/
public static ManifestReader read(InputFile file) {
return new ManifestReader(file);
}
/**
* Returns a new {@link ManifestReader} for an in-memory list of {@link ManifestEntry}.
*
* @param spec a partition spec for the entries
* @param entries an in-memory list of entries for this manifest
* @return a manifest reader
*/
public static ManifestReader inMemory(PartitionSpec spec, Iterable<ManifestEntry> entries) {
return new ManifestReader(spec, entries);
}
private final InputFile file;
private final Iterable<ManifestEntry> entries;
private final Map<String, String> metadata;
private final PartitionSpec spec;
private final Schema schema;
// lazily initialized
private List<ManifestEntry> adds = null;
private List<ManifestEntry> deletes = null;
private ManifestReader(InputFile file) {
this.file = file;
try {
try (AvroIterable<ManifestEntry> headerReader = Avro.read(file)
.project(ManifestEntry.getSchema(Types.StructType.of()).select("status"))
.build()) {
this.metadata = headerReader.getMetadata();
}
} catch (IOException e) {
throw new RuntimeIOException(e);
}
this.schema = SchemaParser.fromJson(metadata.get("schema"));
int specId = TableMetadata.INITIAL_SPEC_ID;
String specProperty = metadata.get("partition-spec-id");
if (specProperty != null) {
specId = Integer.parseInt(specProperty);
}
this.spec = PartitionSpecParser.fromJsonFields(schema, specId, metadata.get("partition-spec"));
this.entries = null;
}
private ManifestReader(PartitionSpec spec, Iterable<ManifestEntry> entries) {
this.file = null;
this.metadata = ImmutableMap.of();
this.spec = spec;
this.schema = spec.schema();
this.entries = entries;
}
public InputFile file() {
return file;
}
public Schema schema() {
return schema;
}
public PartitionSpec spec() {
return spec;
}
@Override
public Iterator<DataFile> iterator() {
return iterator(alwaysTrue(), ALL_COLUMNS);
}
@Override
public FilteredManifest select(Collection<String> columns) {
return new FilteredManifest(this, alwaysTrue(), alwaysTrue(), Lists.newArrayList(columns));
}
@Override
public FilteredManifest filterPartitions(Expression expr) {
return new FilteredManifest(this, expr, alwaysTrue(), ALL_COLUMNS);
}
@Override
public FilteredManifest filterRows(Expression expr) {
return new FilteredManifest(this, Projections.inclusive(spec).project(expr), expr, ALL_COLUMNS);
}
public List<ManifestEntry> addedFiles() {
if (adds == null) {
cacheChanges();
}
return adds;
}
public List<ManifestEntry> deletedFiles() {
if (deletes == null) {
cacheChanges();
}
return deletes;
}
private void cacheChanges() {
List<ManifestEntry> adds = Lists.newArrayList();
List<ManifestEntry> deletes = Lists.newArrayList();
for (ManifestEntry entry : entries(CHANGE_COLUNNS)) {
switch (entry.status()) {
case ADDED:
adds.add(entry.copy());
break;
case DELETED:
deletes.add(entry.copy());
break;
default:
}
}
this.adds = adds;
this.deletes = deletes;
}
CloseableIterable<ManifestEntry> entries() {
return entries(ALL_COLUMNS);
}
CloseableIterable<ManifestEntry> entries(Collection<String> columns) {
if (entries != null) {
// if this reader is an in-memory list or if the entries have been cached, return the list.
return CloseableIterable.withNoopClose(entries);
}
FileFormat format = FileFormat.fromFileName(file.location());
Preconditions.checkArgument(format != null, "Unable to determine format of manifest: " + file);
Schema schema = ManifestEntry.projectSchema(spec.partitionType(), columns);
switch (format) {
case AVRO:
AvroIterable<ManifestEntry> reader = Avro.read(file)
.project(schema)
.rename("manifest_entry", ManifestEntry.class.getName())
.rename("partition", PartitionData.class.getName())
.rename("r102", PartitionData.class.getName())
.rename("data_file", GenericDataFile.class.getName())
.rename("r2", GenericDataFile.class.getName())
.reuseContainers()
.build();
addCloseable(reader);
return reader;
default:
throw new UnsupportedOperationException("Invalid format for manifest file: " + format);
}
}
// visible for use by PartialManifest
Iterator<DataFile> iterator(Expression partFilter, Collection<String> columns) {
return Iterables.transform(Iterables.filter(
entries(columns),
entry -> entry.status() != DELETED),
ManifestEntry::file).iterator();
}
}
| 6,333 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/TableOperations.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.io.OutputFile;
/**
* SPI interface to abstract table metadata access and updates.
*/
public interface TableOperations {
/**
* Return the currently loaded table metadata, without checking for updates.
*
* @return table metadata
*/
TableMetadata current();
/**
* Return the current table metadata after checking for updates.
*
* @return table metadata
*/
TableMetadata refresh();
/**
* Replace the base table metadata with a new version.
* <p>
* This method should implement and document atomicity guarantees.
* <p>
* Implementations must check that the base metadata is current to avoid overwriting updates.
* Once the atomic commit operation succeeds, implementations must not perform any operations that
* may fail because failure in this method cannot be distinguished from commit failure.
*
* @param base table metadata on which changes were based
* @param metadata new table metadata with updates
*/
void commit(TableMetadata base, TableMetadata metadata);
/**
* @return a {@link com.netflix.iceberg.FileIO} to read and write table data and metadata files
*/
FileIO io();
/**
* Given the name of a metadata file, obtain the full path of that file using an appropriate base
* location of the implementation's choosing.
* <p>
* The file may not exist yet, in which case the path should be returned as if it were to be created
* by e.g. {@link FileIO#newOutputFile(String)}.
*/
String metadataFileLocation(String fileName);
/**
* Create a new ID for a Snapshot
*
* @return a long snapshot ID
*/
long newSnapshotId();
}
| 6,334 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ManifestGroup.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.expressions.Evaluator;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Expressions;
import com.netflix.iceberg.expressions.InclusiveManifestEvaluator;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.types.Types;
import java.io.Closeable;
import java.util.Arrays;
import java.util.List;
import java.util.Set;
class ManifestGroup {
private static final Types.StructType EMPTY_STRUCT = Types.StructType.of();
private final TableOperations ops;
private final Set<ManifestFile> manifests;
private final Expression dataFilter;
private final Expression fileFilter;
private final boolean ignoreDeleted;
private final List<String> columns;
private final LoadingCache<Integer, InclusiveManifestEvaluator> EVAL_CACHE = CacheBuilder
.newBuilder()
.build(new CacheLoader<Integer, InclusiveManifestEvaluator>() {
@Override
public InclusiveManifestEvaluator load(Integer specId) {
PartitionSpec spec = ops.current().spec(specId);
return new InclusiveManifestEvaluator(spec, dataFilter);
}
});
ManifestGroup(TableOperations ops, Iterable<ManifestFile> manifests) {
this(ops, Sets.newHashSet(manifests), Expressions.alwaysTrue(), Expressions.alwaysTrue(),
false, ImmutableList.of("*"));
}
private ManifestGroup(TableOperations ops, Set<ManifestFile> manifests,
Expression dataFilter, Expression fileFilter, boolean ignoreDeleted,
List<String> columns) {
this.ops = ops;
this.manifests = manifests;
this.dataFilter = dataFilter;
this.fileFilter = fileFilter;
this.ignoreDeleted = ignoreDeleted;
this.columns = columns;
}
public ManifestGroup filterData(Expression expr) {
return new ManifestGroup(
ops, manifests, Expressions.and(dataFilter, expr), fileFilter, ignoreDeleted, columns);
}
public ManifestGroup filterFiles(Expression expr) {
return new ManifestGroup(
ops, manifests, dataFilter, Expressions.and(fileFilter, expr), ignoreDeleted, columns);
}
public ManifestGroup ignoreDeleted() {
return new ManifestGroup(ops, manifests, dataFilter, fileFilter, true, columns);
}
public ManifestGroup select(List<String> columns) {
return new ManifestGroup(
ops, manifests, dataFilter, fileFilter, ignoreDeleted, Lists.newArrayList(columns));
}
public ManifestGroup select(String... columns) {
return select(Arrays.asList(columns));
}
/**
* Returns an iterable for manifest entries in the set of manifests.
* <p>
* Entries are not copied and it is the caller's responsibility to make defensive copies if
* adding these entries to a collection.
*
* @return a CloseableIterable of manifest entries.
*/
public CloseableIterable<ManifestEntry> entries() {
Evaluator evaluator = new Evaluator(DataFile.getType(EMPTY_STRUCT), fileFilter);
List<Closeable> toClose = Lists.newArrayList();
Iterable<ManifestFile> matchingManifests = Iterables.filter(manifests,
manifest -> EVAL_CACHE.getUnchecked(manifest.partitionSpecId()).eval(manifest));
if (ignoreDeleted) {
// remove any manifests that don't have any existing or added files. if either the added or
// existing files count is missing, the manifest must be scanned.
matchingManifests = Iterables.filter(manifests, manifest ->
manifest.addedFilesCount() == null || manifest.existingFilesCount() == null ||
manifest.addedFilesCount() + manifest.existingFilesCount() > 0);
}
Iterable<Iterable<ManifestEntry>> readers = Iterables.transform(
matchingManifests,
manifest -> {
ManifestReader reader = ManifestReader.read(ops.io().newInputFile(manifest.path()));
FilteredManifest filtered = reader.filterRows(dataFilter).select(columns);
toClose.add(reader);
return Iterables.filter(
ignoreDeleted ? filtered.liveEntries() : filtered.allEntries(),
entry -> evaluator.eval((GenericDataFile) entry.file()));
});
return CloseableIterable.combine(Iterables.concat(readers), toClose);
}
}
| 6,335 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ReplaceFiles.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import java.util.Set;
class ReplaceFiles extends MergingSnapshotUpdate implements RewriteFiles {
ReplaceFiles(TableOperations ops) {
super(ops);
// replace files must fail if any of the deleted paths is missing and cannot be deleted
failMissingDeletePaths();
}
@Override
public RewriteFiles rewriteFiles(Set<DataFile> filesToDelete, Set<DataFile> filesToAdd) {
Preconditions.checkArgument(filesToDelete != null && !filesToDelete.isEmpty(),
"Files to delete cannot be null or empty");
Preconditions.checkArgument(filesToAdd != null && !filesToAdd.isEmpty(),
"Files to add can not be null or empty");
for (DataFile toDelete : filesToDelete) {
delete(toDelete.path());
}
for (DataFile toAdd : filesToAdd) {
add(toAdd);
}
return this;
}
}
| 6,336 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ManifestListWriter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.avro.Avro;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.FileAppender;
import com.netflix.iceberg.io.OutputFile;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
class ManifestListWriter implements FileAppender<ManifestFile> {
private final FileAppender<ManifestFile> writer;
ManifestListWriter(OutputFile snapshotFile, long snapshotId, Long parentSnapshotId) {
this.writer = newAppender(snapshotFile, ImmutableMap.of(
"snapshot-id", String.valueOf(snapshotId),
"parent-snapshot-id", String.valueOf(parentSnapshotId)));
}
@Override
public void add(ManifestFile file) {
writer.add(file);
}
@Override
public void addAll(Iterator<ManifestFile> values) {
writer.addAll(values);
}
@Override
public void addAll(Iterable<ManifestFile> values) {
writer.addAll(values);
}
@Override
public Metrics metrics() {
return writer.metrics();
}
@Override
public void close() throws IOException {
writer.close();
}
private static FileAppender<ManifestFile> newAppender(OutputFile file, Map<String, String> meta) {
try {
return Avro.write(file)
.schema(ManifestFile.schema())
.named("manifest_file")
.meta(meta)
.build();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to create snapshot list writer for path: " + file);
}
}
}
| 6,337 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ReplacePartitionsOperation.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.expressions.Expressions;
import java.util.List;
public class ReplacePartitionsOperation extends MergingSnapshotUpdate implements ReplacePartitions {
ReplacePartitionsOperation(TableOperations ops) {
super(ops);
}
@Override
public ReplacePartitions addFile(DataFile file) {
dropPartition(file.partition());
add(file);
return this;
}
@Override
public ReplacePartitions validateAppendOnly() {
failAnyDelete();
return this;
}
@Override
public List<ManifestFile> apply(TableMetadata base) {
if (writeSpec().fields().size() <= 0) {
// replace all data in an unpartitioned table
deleteByRowFilter(Expressions.alwaysTrue());
}
try {
return super.apply(base);
} catch (DeleteException e) {
throw new ValidationException(
"Cannot commit file that conflicts with existing partition: %s", e.partition());
}
}
}
| 6,338 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseTransaction.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.util.Tasks;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES;
import static com.netflix.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS;
import static com.netflix.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT;
class BaseTransaction implements Transaction {
private enum TransactionType {
CREATE_TABLE,
REPLACE_TABLE,
SIMPLE
}
static Transaction replaceTableTransaction(TableOperations ops, TableMetadata start) {
return new BaseTransaction(ops, start);
}
static Transaction createTableTransaction(TableOperations ops, TableMetadata start) {
Preconditions.checkArgument(ops.current() == null,
"Cannot start create table transaction: table already exists");
return new BaseTransaction(ops, start);
}
static Transaction newTransaction(TableOperations ops) {
return new BaseTransaction(ops, ops.refresh());
}
// exposed for testing
final TableOperations ops;
private final TransactionTable transactionTable;
private final TableOperations transactionOps;
private final List<PendingUpdate> updates;
private final Set<Long> intermediateSnapshotIds;
private TransactionType type;
private TableMetadata base;
private TableMetadata lastBase;
private TableMetadata current;
private BaseTransaction(TableOperations ops, TableMetadata start) {
this.ops = ops;
this.transactionTable = new TransactionTable();
this.transactionOps = new TransactionTableOperations();
this.updates = Lists.newArrayList();
this.intermediateSnapshotIds = Sets.newHashSet();
this.base = ops.current();
if (base == null && start != null) {
this.type = TransactionType.CREATE_TABLE;
} else if (base != null && start != base) {
this.type = TransactionType.REPLACE_TABLE;
} else {
this.type = TransactionType.SIMPLE;
}
this.lastBase = null;
this.current = start;
}
@Override
public Table table() {
return transactionTable;
}
private void checkLastOperationCommitted(String operation) {
Preconditions.checkState(lastBase != current,
"Cannot create new %s: last operation has not committed", operation);
this.lastBase = current;
}
@Override
public UpdateProperties updateProperties() {
checkLastOperationCommitted("UpdateProperties");
UpdateProperties props = new PropertiesUpdate(transactionOps);
updates.add(props);
return props;
}
@Override
public AppendFiles newAppend() {
checkLastOperationCommitted("AppendFiles");
AppendFiles append = new MergeAppend(transactionOps);
updates.add(append);
return append;
}
@Override
public RewriteFiles newRewrite() {
checkLastOperationCommitted("RewriteFiles");
RewriteFiles rewrite = new ReplaceFiles(transactionOps);
updates.add(rewrite);
return rewrite;
}
@Override
public OverwriteFiles newOverwrite() {
checkLastOperationCommitted("OverwriteFiles");
OverwriteFiles overwrite = new OverwriteData(transactionOps);
updates.add(overwrite);
return overwrite;
}
@Override
public ReplacePartitions newReplacePartitions() {
checkLastOperationCommitted("ReplacePartitions");
ReplacePartitionsOperation replacePartitions = new ReplacePartitionsOperation(transactionOps);
updates.add(replacePartitions);
return replacePartitions;
}
@Override
public DeleteFiles newDelete() {
checkLastOperationCommitted("DeleteFiles");
DeleteFiles delete = new StreamingDelete(transactionOps);
updates.add(delete);
return delete;
}
@Override
public ExpireSnapshots expireSnapshots() {
checkLastOperationCommitted("ExpireSnapshots");
ExpireSnapshots expire = new RemoveSnapshots(transactionOps);
updates.add(expire);
return expire;
}
@Override
public void commitTransaction() {
Preconditions.checkState(lastBase != current,
"Cannot commit transaction: last operation has not committed");
switch (type) {
case CREATE_TABLE:
// fix up the snapshot log, which should not contain intermediate snapshots
TableMetadata createMetadata = current.removeSnapshotLogEntries(intermediateSnapshotIds);
// this operation creates the table. if the commit fails, this cannot retry because another
// process has created the same table.
ops.commit(null, createMetadata);
break;
case REPLACE_TABLE:
// fix up the snapshot log, which should not contain intermediate snapshots
TableMetadata replaceMetadata = current.removeSnapshotLogEntries(intermediateSnapshotIds);
Tasks.foreach(ops)
.retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */)
.onlyRetryOn(CommitFailedException.class)
.run(ops -> {
// because this is a replace table, it will always completely replace the table
// metadata. even if it was just updated.
if (base != ops.refresh()) {
this.base = ops.current(); // just refreshed
}
ops.commit(base, replaceMetadata);
});
break;
case SIMPLE:
// if there were no changes, don't try to commit
if (base == current) {
return;
}
Tasks.foreach(ops)
.retry(base.propertyAsInt(COMMIT_NUM_RETRIES, COMMIT_NUM_RETRIES_DEFAULT))
.exponentialBackoff(
base.propertyAsInt(COMMIT_MIN_RETRY_WAIT_MS, COMMIT_MIN_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_MAX_RETRY_WAIT_MS, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT),
base.propertyAsInt(COMMIT_TOTAL_RETRY_TIME_MS, COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT),
2.0 /* exponential */)
.onlyRetryOn(CommitFailedException.class)
.run(ops -> {
if (base != ops.refresh()) {
this.base = ops.current(); // just refreshed
this.current = base;
for (PendingUpdate update : updates) {
// re-commit each update in the chain to apply it and update current
update.commit();
}
}
// fix up the snapshot log, which should not contain intermediate snapshots
ops.commit(base, current.removeSnapshotLogEntries(intermediateSnapshotIds));
});
break;
}
}
private static Long currentId(TableMetadata meta) {
if (meta != null) {
if (meta.currentSnapshot() != null) {
return meta.currentSnapshot().snapshotId();
}
}
return null;
}
public class TransactionTableOperations implements TableOperations {
@Override
public TableMetadata current() {
return current;
}
@Override
public TableMetadata refresh() {
return current;
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
if (base != current) {
// trigger a refresh and retry
throw new CommitFailedException("Table metadata refresh is required");
}
// track the intermediate snapshot ids for rewriting the snapshot log
// an id is intermediate if it isn't the base snapshot id and it is replaced by a new current
Long oldId = currentId(current);
if (oldId != null && !oldId.equals(currentId(metadata)) && !oldId.equals(currentId(base))) {
intermediateSnapshotIds.add(oldId);
}
BaseTransaction.this.current = metadata;
}
@Override
public FileIO io() {
return ops.io();
}
@Override
public String metadataFileLocation(String fileName) {
return ops.metadataFileLocation(fileName);
}
@Override
public long newSnapshotId() {
return ops.newSnapshotId();
}
}
public class TransactionTable implements Table {
@Override
public void refresh() {
}
@Override
public TableScan newScan() {
throw new UnsupportedOperationException("Transaction tables do not support scans");
}
@Override
public Schema schema() {
return current.schema();
}
@Override
public PartitionSpec spec() {
return current.spec();
}
@Override
public Map<String, String> properties() {
return current.properties();
}
@Override
public String location() {
return current.location();
}
@Override
public Snapshot currentSnapshot() {
return current.currentSnapshot();
}
@Override
public Iterable<Snapshot> snapshots() {
return current.snapshots();
}
@Override
public UpdateSchema updateSchema() {
throw new UnsupportedOperationException("Transaction tables do not support schema updates");
}
@Override
public UpdateProperties updateProperties() {
return BaseTransaction.this.updateProperties();
}
@Override
public AppendFiles newAppend() {
return BaseTransaction.this.newAppend();
}
@Override
public RewriteFiles newRewrite() {
return BaseTransaction.this.newRewrite();
}
@Override
public OverwriteFiles newOverwrite() {
return BaseTransaction.this.newOverwrite();
}
@Override
public ReplacePartitions newReplacePartitions() {
return BaseTransaction.this.newReplacePartitions();
}
@Override
public DeleteFiles newDelete() {
return BaseTransaction.this.newDelete();
}
@Override
public ExpireSnapshots expireSnapshots() {
return BaseTransaction.this.expireSnapshots();
}
@Override
public Rollback rollback() {
throw new UnsupportedOperationException("Transaction tables do not support rollback");
}
@Override
public Transaction newTransaction() {
throw new UnsupportedOperationException("Cannot create a transaction within a transaction");
}
}
}
| 6,339 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/SerializableByteBufferMap.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Maps;
import java.io.ObjectStreamException;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.Set;
class SerializableByteBufferMap implements Map<Integer, ByteBuffer>, Serializable {
private final Map<Integer, ByteBuffer> wrapped;
static Map<Integer, ByteBuffer> wrap(Map<Integer, ByteBuffer> map) {
if (map == null) {
return null;
}
if (map instanceof SerializableByteBufferMap) {
return map;
}
return new SerializableByteBufferMap(map);
}
public SerializableByteBufferMap() {
this.wrapped = Maps.newLinkedHashMap();
}
private SerializableByteBufferMap(Map<Integer, ByteBuffer> wrapped) {
this.wrapped = wrapped;
}
private static class MapSerializationProxy implements Serializable {
private int[] keys = null;
private byte[][] values = null;
/**
* Constructor for Java serialization.
*/
MapSerializationProxy() {
}
public MapSerializationProxy(int[] keys, byte[][] values) {
this.keys = keys;
this.values = values;
}
Object readResolve() throws ObjectStreamException {
Map<Integer, ByteBuffer> map = Maps.newLinkedHashMap();
for (int i = 0; i < keys.length; i += 1) {
map.put(keys[i], ByteBuffer.wrap(values[i]));
}
return SerializableByteBufferMap.wrap(map);
}
}
Object writeReplace() throws ObjectStreamException {
Collection<Map.Entry<Integer, ByteBuffer>> entries = wrapped.entrySet();
int[] keys = new int[entries.size()];
byte[][] values = new byte[keys.length][];
int i = 0;
for (Map.Entry<Integer, ByteBuffer> entry : entries) {
keys[i] = entry.getKey();
values[i] = copy(entry.getValue());
i += 1;
}
return new MapSerializationProxy(keys, values);
}
private byte[] copy(ByteBuffer buffer) {
if (buffer.hasArray()) {
byte[] array = buffer.array();
if (buffer.arrayOffset() == 0 && buffer.position() == 0 && array.length == buffer.remaining()) {
return array;
} else {
int start = buffer.arrayOffset() + buffer.position();
int end = start + buffer.remaining();
return Arrays.copyOfRange(array, start, end);
}
} else {
byte[] bytes = new byte[buffer.remaining()];
buffer.get(bytes);
return bytes;
}
}
@Override
public int size() {
return wrapped.size();
}
@Override
public boolean isEmpty() {
return wrapped.isEmpty();
}
@Override
public boolean containsKey(Object key) {
return wrapped.containsKey(key);
}
@Override
public boolean containsValue(Object value) {
return wrapped.containsValue(value);
}
@Override
public ByteBuffer get(Object key) {
return wrapped.get(key);
}
@Override
public ByteBuffer put(Integer key, ByteBuffer value) {
return wrapped.put(key, value);
}
@Override
public ByteBuffer remove(Object key) {
return wrapped.remove(key);
}
@Override
public void putAll(Map<? extends Integer, ? extends ByteBuffer> m) {
wrapped.putAll(m);
}
@Override
public void clear() {
wrapped.clear();
}
@Override
public Set<Integer> keySet() {
return wrapped.keySet();
}
@Override
public Collection<ByteBuffer> values() {
return wrapped.values();
}
@Override
public Set<Entry<Integer, ByteBuffer>> entrySet() {
return wrapped.entrySet();
}
@Override
public boolean equals(Object o) {
return wrapped.equals(o);
}
@Override
public int hashCode() {
return wrapped.hashCode();
}
}
| 6,340 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseCombinedScanTask.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableList;
import java.util.Collection;
import java.util.List;
public class BaseCombinedScanTask implements CombinedScanTask {
private final List<FileScanTask> tasks;
public BaseCombinedScanTask(FileScanTask... tasks) {
this.tasks = ImmutableList.copyOf(tasks);
}
public BaseCombinedScanTask(List<FileScanTask> tasks) {
this.tasks = ImmutableList.copyOf(tasks);
}
@Override
public Collection<FileScanTask> files() {
return tasks;
}
}
| 6,341 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/CharSequenceWrapper.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.netflix.iceberg.types.Comparators;
/**
* Wrapper class to adapt CharSequence for use in maps and sets.
*/
public class CharSequenceWrapper {
public static CharSequenceWrapper wrap(CharSequence seq) {
return new CharSequenceWrapper(seq);
}
private CharSequence wrapped;
private CharSequenceWrapper(CharSequence wrapped) {
this.wrapped = wrapped;
}
public CharSequenceWrapper set(CharSequence wrapped) {
this.wrapped = wrapped;
return this;
}
public CharSequence get() {
return wrapped;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
CharSequenceWrapper that = (CharSequenceWrapper) other;
return Comparators.charSequences().compare(wrapped, that.wrapped) == 0;
}
@Override
public int hashCode() {
int result = 177;
for (int i = 0; i < wrapped.length(); i += 1) {
char c = wrapped.charAt(i);
result = 31 * result + (int) c;
}
return result;
}
}
| 6,342 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/ParallelIterable.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
public class ParallelIterable<T> implements Iterable<T> {
private final Iterable<Iterable<T>> iterables;
private final ExecutorService trackingPool;
private final ExecutorService workerPool;
public ParallelIterable(Iterable<Iterable<T>> iterables,
ExecutorService trackingPool,
ExecutorService workerPool) {
this.iterables = iterables;
this.trackingPool = trackingPool;
this.workerPool = workerPool;
}
@Override
public Iterator<T> iterator() {
return new ParallelIterator<>(iterables, trackingPool, workerPool);
}
private static class ParallelIterator<T> implements Iterator<T> {
private final ConcurrentLinkedQueue<T> queue = new ConcurrentLinkedQueue<>();
private final Future<?> taskFuture;
public ParallelIterator(Iterable<Iterable<T>> iterables,
ExecutorService trackingPool,
ExecutorService workerPool) {
this.taskFuture = trackingPool.submit(() -> {
Tasks.foreach(iterables)
.noRetry().stopOnFailure().throwFailureWhenFinished()
.executeWith(workerPool)
.run(iterable -> {
for (T item : iterable) {
queue.add(item);
}
});
return true;
});
}
@Override
public synchronized boolean hasNext() {
// this cannot conclude that there are no more records until tasks have finished. while some
// are running, return true when there is at least one item to return.
while (!taskFuture.isDone()) {
if (!queue.isEmpty()) {
return true;
}
try {
taskFuture.get(10, TimeUnit.MILLISECONDS);
break;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (ExecutionException e) {
ExceptionUtil.castAndThrow(e.getCause(), RuntimeException.class);
} catch (TimeoutException e) {
// continue looping to check the queue size and wait again
}
}
// when tasks are no longer running, return whether the queue has items
return !queue.isEmpty();
}
@Override
public synchronized T next() {
// use hasNext to block until there is an available record
if (!hasNext()) {
throw new NoSuchElementException();
}
return queue.poll();
}
}
}
| 6,343 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/JsonUtil.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
public class JsonUtil {
private static final JsonFactory FACTORY = new JsonFactory();
private static final ObjectMapper MAPPER = new ObjectMapper(FACTORY);
public static JsonFactory factory() {
return FACTORY;
}
public static ObjectMapper mapper() {
return MAPPER;
}
public static int getInt(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing int %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(pNode != null && !pNode.isNull() && pNode.isNumber(),
"Cannot parse %s from non-numeric value: %s", property, pNode);
return pNode.asInt();
}
public static long getLong(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing int %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(pNode != null && !pNode.isNull() && pNode.isNumber(),
"Cannot parse %s from non-numeric value: %s", property, pNode);
return pNode.asLong();
}
public static boolean getBool(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing boolean %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(pNode != null && !pNode.isNull() && pNode.isBoolean(),
"Cannot parse %s from non-boolean value: %s", property, pNode);
return pNode.asBoolean();
}
public static String getString(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing string %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(pNode != null && !pNode.isNull() && pNode.isTextual(),
"Cannot parse %s from non-string value: %s", property, pNode);
return pNode.asText();
}
public static Map<String, String> getStringMap(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing map %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(pNode != null && !pNode.isNull() && pNode.isObject(),
"Cannot parse %s from non-object value: %s", property, pNode);
ImmutableMap.Builder<String, String> builder = ImmutableMap.builder();
Iterator<String> fields = pNode.fieldNames();
while (fields.hasNext()) {
String field = fields.next();
builder.put(field, getString(field, pNode));
}
return builder.build();
}
public static List<String> getStringList(String property, JsonNode node) {
Preconditions.checkArgument(node.has(property), "Cannot parse missing list %s", property);
JsonNode pNode = node.get(property);
Preconditions.checkArgument(pNode != null && !pNode.isNull() && pNode.isArray(),
"Cannot parse %s from non-array value: %s", property, pNode);
ImmutableList.Builder<String> builder = ImmutableList.builder();
Iterator<JsonNode> elements = pNode.elements();
while (elements.hasNext()) {
JsonNode element = elements.next();
Preconditions.checkArgument(element.isTextual(),
"Cannot parse string from non-text value: %s", element);
builder.add(element.asText());
}
return builder.build();
}
}
| 6,344 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/StructLikeWrapper.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.netflix.iceberg.StructLike;
/**
* Wrapper to adapt StructLike for use in maps and sets by implementing equals and hashCode.
*/
public class StructLikeWrapper {
public static StructLikeWrapper wrap(StructLike struct) {
return new StructLikeWrapper(struct);
}
private StructLike struct;
private StructLikeWrapper(StructLike struct) {
this.struct = struct;
}
public StructLikeWrapper set(StructLike struct) {
this.struct = struct;
return this;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
StructLikeWrapper that = (StructLikeWrapper) other;
if (this.struct == null) {
if (that.struct == null) {
return true;
} else {
return false;
}
} else if (that.struct == null) {
return false;
}
int len = struct.size();
if (len != that.struct.size()) {
return false;
}
for (int i = 0; i < len; i += 1) {
if (!struct.get(i, Object.class).equals(that.struct.get(i, Object.class))) {
return false;
}
}
return true;
}
@Override
public int hashCode() {
int result = 97;
int len = struct.size();
result = 41 * result + len;
for (int i = 0; i < len; i += 1) {
result = 41 * result + struct.get(i, Object.class).hashCode();
}
return result;
}
}
| 6,345 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/ExceptionUtil.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
public class ExceptionUtil {
@SuppressWarnings("unchecked")
static <E extends Exception> void castAndThrow(
Throwable e, Class<E> exceptionClass) throws E {
if (e instanceof RuntimeException) {
throw (RuntimeException) e;
} else if (e instanceof Error) {
throw (Error) e;
} else if (exceptionClass.isInstance(e)) {
throw (E) e;
}
throw new RuntimeException(e);
}
}
| 6,346 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/Tasks.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.google.common.collect.Lists;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
public class Tasks {
private static final Logger LOG = LoggerFactory.getLogger(Tasks.class);
public static class UnrecoverableException extends RuntimeException {
public UnrecoverableException(String message) {
super(message);
}
public UnrecoverableException(String message, Throwable cause) {
super(message, cause);
}
public UnrecoverableException(Throwable cause) {
super(cause);
}
}
public interface FailureTask<I, E extends Exception> {
void run(I item, Exception exception) throws E;
}
public interface Task<I, E extends Exception> {
void run(I item) throws E;
}
public static class Builder<I> {
private final Iterable<I> items;
private ExecutorService service = null;
private FailureTask<I, ?> onFailure = null;
private boolean stopOnFailure = false;
private boolean throwFailureWhenFinished = true;
private Task<I, ?> revertTask = null;
private boolean stopRevertsOnFailure = false;
private Task<I, ?> abortTask = null;
private boolean stopAbortsOnFailure = false;
// retry settings
@SuppressWarnings("unchecked")
private List<Class<? extends Exception>> stopRetryExceptions = Lists.newArrayList(
UnrecoverableException.class);
private List<Class<? extends Exception>> onlyRetryExceptions = null;
private int maxAttempts = 1; // not all operations can be retried
private long minSleepTimeMs = 1000; // 1 second
private long maxSleepTimeMs = 600000; // 10 minutes
private long maxDurationMs = 600000; // 10 minutes
private double scaleFactor = 2.0; // exponential
public Builder(Iterable<I> items) {
this.items = items;
}
public Builder<I> executeWith(ExecutorService service) {
this.service = service;
return this;
}
public Builder<I> onFailure(FailureTask<I, ?> task) {
this.onFailure = task;
return this;
}
public Builder<I> stopOnFailure() {
this.stopOnFailure = true;
return this;
}
public Builder<I> throwFailureWhenFinished() {
this.throwFailureWhenFinished = true;
return this;
}
public Builder<I> suppressFailureWhenFinished() {
this.throwFailureWhenFinished = false;
return this;
}
public Builder<I> throwFailureWhenFinished(boolean throwWhenFinished) {
this.throwFailureWhenFinished = throwWhenFinished;
return this;
}
public Builder<I> revertWith(Task<I, ?> task) {
this.revertTask = task;
return this;
}
public Builder<I> stopRevertsOnFailure() {
this.stopRevertsOnFailure = true;
return this;
}
public Builder<I> abortWith(Task<I, ?> task) {
this.abortTask = task;
return this;
}
public Builder<I> stopAbortsOnFailure() {
this.stopAbortsOnFailure = true;
return this;
}
public Builder<I> stopRetryOn(Class<? extends Exception>... exceptions) {
stopRetryExceptions.addAll(Arrays.asList(exceptions));
return this;
}
public Builder<I> noRetry() {
this.maxAttempts = 1;
return this;
}
public Builder<I> retry(int nTimes) {
this.maxAttempts = nTimes + 1;
return this;
}
public Builder<I> onlyRetryOn(Class<? extends Exception> exception) {
this.onlyRetryExceptions = Collections.singletonList(exception);
return this;
}
public Builder<I> onlyRetryOn(Class<? extends Exception>... exceptions) {
this.onlyRetryExceptions = Lists.newArrayList(exceptions);
return this;
}
public Builder<I> exponentialBackoff(long minSleepTimeMs,
long maxSleepTimeMs,
long maxRetryTimeMs,
double scaleFactor) {
this.minSleepTimeMs = minSleepTimeMs;
this.maxSleepTimeMs = maxSleepTimeMs;
this.maxDurationMs = maxRetryTimeMs;
this.scaleFactor = scaleFactor;
return this;
}
public boolean run(Task<I, RuntimeException> task) {
return run(task, RuntimeException.class);
}
public <E extends Exception> boolean run(Task<I, E> task,
Class<E> exceptionClass) throws E {
if (service != null) {
return runParallel(task, exceptionClass);
} else {
return runSingleThreaded(task, exceptionClass);
}
}
private <E extends Exception> boolean runSingleThreaded(
Task<I, E> task, Class<E> exceptionClass) throws E {
List<I> succeeded = Lists.newArrayList();
List<Throwable> exceptions = Lists.newArrayList();
Iterator<I> iterator = items.iterator();
boolean threw = true;
try {
while (iterator.hasNext()) {
I item = iterator.next();
try {
runTaskWithRetry(task, item);
succeeded.add(item);
} catch (Exception e) {
exceptions.add(e);
if (onFailure != null) {
try {
onFailure.run(item, e);
} catch (Exception failException) {
e.addSuppressed(failException);
LOG.error("Failed to clean up on failure", e);
// keep going
}
}
if (stopOnFailure) {
break;
}
}
}
threw = false;
} finally {
// threw handles exceptions that were *not* caught by the catch block,
// and exceptions that were caught and possibly handled by onFailure
// are kept in exceptions.
if (threw || !exceptions.isEmpty()) {
if (revertTask != null) {
boolean failed = false;
for (I item : succeeded) {
try {
revertTask.run(item);
} catch (Exception e) {
failed = true;
LOG.error("Failed to revert task", e);
// keep going
}
if (stopRevertsOnFailure && failed) {
break;
}
}
}
if (abortTask != null) {
boolean failed = false;
while (iterator.hasNext()) {
try {
abortTask.run(iterator.next());
} catch (Exception e) {
failed = true;
LOG.error("Failed to abort task", e);
// keep going
}
if (stopAbortsOnFailure && failed) {
break;
}
}
}
}
}
if (throwFailureWhenFinished && !exceptions.isEmpty()) {
Tasks.throwOne(exceptions, exceptionClass);
} else if (throwFailureWhenFinished && threw) {
throw new RuntimeException(
"Task set failed with an uncaught throwable");
}
return !threw;
}
private <E extends Exception> boolean runParallel(final Task<I, E> task,
Class<E> exceptionClass)
throws E {
final Queue<I> succeeded = new ConcurrentLinkedQueue<>();
final Queue<Throwable> exceptions = new ConcurrentLinkedQueue<>();
final AtomicBoolean taskFailed = new AtomicBoolean(false);
final AtomicBoolean abortFailed = new AtomicBoolean(false);
final AtomicBoolean revertFailed = new AtomicBoolean(false);
List<Future<?>> futures = Lists.newArrayList();
for (final I item : items) {
// submit a task for each item that will either run or abort the task
futures.add(service.submit(new Runnable() {
@Override
public void run() {
if (!(stopOnFailure && taskFailed.get())) {
// run the task with retries
boolean threw = true;
try {
runTaskWithRetry(task, item);
succeeded.add(item);
threw = false;
} catch (Exception e) {
taskFailed.set(true);
exceptions.add(e);
if (onFailure != null) {
try {
onFailure.run(item, e);
} catch (Exception failException) {
e.addSuppressed(failException);
LOG.error("Failed to clean up on failure", e);
// swallow the exception
}
}
} finally {
if (threw) {
taskFailed.set(true);
}
}
} else if (abortTask != null) {
// abort the task instead of running it
if (stopAbortsOnFailure && abortFailed.get()) {
return;
}
boolean failed = true;
try {
abortTask.run(item);
failed = false;
} catch (Exception e) {
LOG.error("Failed to abort task", e);
// swallow the exception
} finally {
if (failed) {
abortFailed.set(true);
}
}
}
}
}));
}
// let the above tasks complete (or abort)
exceptions.addAll(waitFor(futures));
futures.clear();
if (taskFailed.get() && revertTask != null) {
// at least one task failed, revert any that succeeded
for (final I item : succeeded) {
futures.add(service.submit(new Runnable() {
@Override
public void run() {
if (stopRevertsOnFailure && revertFailed.get()) {
return;
}
boolean failed = true;
try {
revertTask.run(item);
failed = false;
} catch (Exception e) {
LOG.error("Failed to revert task", e);
// swallow the exception
} finally {
if (failed) {
revertFailed.set(true);
}
}
}
}));
}
// let the revert tasks complete
exceptions.addAll(waitFor(futures));
}
if (throwFailureWhenFinished && !exceptions.isEmpty()) {
Tasks.throwOne(exceptions, exceptionClass);
} else if (throwFailureWhenFinished && taskFailed.get()) {
throw new RuntimeException(
"Task set failed with an uncaught throwable");
}
return !taskFailed.get();
}
private <E extends Exception> void runTaskWithRetry(Task<I, E> task, I item)
throws E {
long start = System.currentTimeMillis();
int attempt = 0;
while (true) {
attempt += 1;
try {
task.run(item);
break;
} catch (Exception e) {
long durationMs = System.currentTimeMillis() - start;
if (attempt >= maxAttempts || durationMs > maxDurationMs) {
throw e;
}
if (onlyRetryExceptions != null) {
// if onlyRetryExceptions are present, then this retries if one is found
boolean matchedRetryException = false;
for (Class<? extends Exception> exClass : onlyRetryExceptions) {
if (exClass.isInstance(e)) {
matchedRetryException = true;
}
}
if (!matchedRetryException) {
throw e;
}
} else {
// otherwise, always retry unless one of the stop exceptions is found
for (Class<? extends Exception> exClass : stopRetryExceptions) {
if (exClass.isInstance(e)) {
throw e;
}
}
}
int delayMs = (int) Math.min(
minSleepTimeMs * Math.pow(scaleFactor, attempt - 1),
maxSleepTimeMs);
int jitter = ThreadLocalRandom.current()
.nextInt(Math.max(1, (int) (delayMs * 0.1)));
LOG.warn("Retrying task after failure: " + e.getMessage(), e);
try {
TimeUnit.MILLISECONDS.sleep(delayMs + jitter);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new RuntimeException(ie);
}
}
}
}
}
private static Collection<Throwable> waitFor(Collection<Future<?>> futures)
throws Error {
while (true) {
int numFinished = 0;
for (Future<?> future : futures) {
if (future.isDone()) {
numFinished += 1;
}
}
if (numFinished == futures.size()) {
List<Throwable> uncaught = new ArrayList<>();
// all of the futures are done, get any uncaught exceptions
for (Future<?> future : futures) {
try {
future.get();
} catch (InterruptedException e) {
LOG.warn("Interrupted while getting future results", e);
for (Throwable t : uncaught) {
e.addSuppressed(t);
}
Thread.currentThread().interrupt();
throw new RuntimeException(e);
} catch (CancellationException e) {
// ignore cancellations
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (Error.class.isInstance(cause)) {
for (Throwable t : uncaught) {
cause.addSuppressed(t);
}
throw (Error) cause;
}
if (cause != null) {
uncaught.add(e);
}
LOG.warn("Task threw uncaught exception", cause);
}
}
return uncaught;
} else {
try {
Thread.sleep(10);
} catch (InterruptedException e) {
LOG.warn("Interrupted while waiting for tasks to finish", e);
for (Future<?> future : futures) {
future.cancel(true);
}
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
}
}
/**
* A range, [ 0, size )
*/
private static class Range implements Iterable<Integer> {
private int size;
Range(int size) {
this.size = size;
}
@Override
public Iterator<Integer> iterator() {
return new Iterator<Integer>() {
private int current = 0;
@Override
public boolean hasNext() {
return current < size;
}
@Override
public Integer next() {
int ret = current;
current += 1;
return ret;
}
};
}
}
public static Builder<Integer> range(int upTo) {
return new Builder<>(new Range(upTo));
}
public static <I> Builder<I> foreach(Iterable<I> items) {
return new Builder<>(items);
}
public static <I> Builder<I> foreach(I... items) {
return new Builder<>(Arrays.asList(items));
}
@SuppressWarnings("unchecked")
private static <E extends Exception> void throwOne(
Collection<Throwable> exceptions, Class<E> allowedException) throws E {
Iterator<Throwable> iter = exceptions.iterator();
Throwable e = iter.next();
Class<? extends Throwable> exceptionClass = e.getClass();
while (iter.hasNext()) {
Throwable other = iter.next();
if (!exceptionClass.isInstance(other)) {
e.addSuppressed(other);
}
}
ExceptionUtil.<E>castAndThrow(e, allowedException);
}
}
| 6,347 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/Pair.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.google.common.base.Objects;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.collect.Lists;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.reflect.ReflectData;
import org.apache.avro.specific.SpecificData;
import java.io.Serializable;
public class Pair<X, Y> implements IndexedRecord, SpecificData.SchemaConstructable, Serializable {
public static <X, Y> Pair<X, Y> of(X x, Y y) {
return new Pair<>(x, y);
}
private static final LoadingCache<Pair<Class<?>, Class<?>>, Schema> SCHEMA_CACHE = CacheBuilder
.newBuilder()
.build(new CacheLoader<Pair<Class<?>, Class<?>>, Schema>() {
@Override
@SuppressWarnings("deprecation")
public Schema load(Pair<Class<?>, Class<?>> key) {
Schema xSchema = ReflectData.get().getSchema(key.x);
Schema ySchema = ReflectData.get().getSchema(key.y);
return Schema.createRecord("pair", null, null, false, Lists.newArrayList(
new Schema.Field("x", xSchema, null, null),
new Schema.Field("y", ySchema, null, null)
));
}
});
private Schema schema = null;
private X x;
private Y y;
/**
* Constructor used by Avro
*/
private Pair(Schema schema) {
this.schema = schema;
}
private Pair(X x, Y y) {
this.x = x;
this.y = y;
}
@Override
@SuppressWarnings("unchecked")
public void put(int i, Object v) {
if (i == 0) {
this.x = (X) v;
return;
} else if (i == 1) {
this.y = (Y) v;
return;
}
throw new IllegalArgumentException("Cannot set value " + i + " (not 0 or 1): " + v);
}
@Override
public Object get(int i) {
if (i == 0) {
return x;
} else if (i == 1) {
return y;
}
throw new IllegalArgumentException("Cannot get value " + i + " (not 0 or 1)");
}
@Override
public Schema getSchema() {
if (schema == null) {
this.schema = SCHEMA_CACHE.getUnchecked(Pair.of(x.getClass(), y.getClass()));
}
return schema;
}
public X first() {
return x;
}
public Y second() {
return y;
}
@Override
public String toString() {
return "(" + String.valueOf(x) + ", " + String.valueOf(y) + ")";
}
@Override
public int hashCode() {
return Objects.hashCode(x, y);
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (getClass() != other.getClass()) {
return false;
}
Pair<?, ?> otherPair = (Pair<?, ?>) other;
return Objects.equal(x, otherPair.x) && Objects.equal(y, otherPair.y);
}
}
| 6,348 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/BinPacking.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.NoSuchElementException;
import java.util.function.Function;
public class BinPacking {
public static class ListPacker<T> {
private final long targetWeight;
private final int lookback;
public ListPacker(long targetWeight, int lookback) {
this.targetWeight = targetWeight;
this.lookback = lookback;
}
public List<List<T>> packEnd(List<T> items, Function<T, Long> weightFunc) {
return Lists.reverse(ImmutableList.copyOf(Iterables.transform(
new PackingIterable<>(Lists.reverse(items), targetWeight, lookback, weightFunc),
Lists::reverse)));
}
public List<List<T>> pack(Iterable<T> items, Function<T, Long> weightFunc) {
return ImmutableList.copyOf(new PackingIterable<>(items, targetWeight, lookback, weightFunc));
}
}
public static class PackingIterable<T> implements Iterable<List<T>> {
private final Iterable<T> iterable;
private final long targetWeight;
private final int lookback;
private final Function<T, Long> weightFunc;
public PackingIterable(Iterable<T> iterable, long targetWeight, int lookback,
Function<T, Long> weightFunc) {
Preconditions.checkArgument(lookback > 0,
"Bin look-back size must be greater than 0: %s", lookback);
this.iterable = iterable;
this.targetWeight = targetWeight;
this.lookback = lookback;
this.weightFunc = weightFunc;
}
@Override
public Iterator<List<T>> iterator() {
return new PackingIterator<>(iterable.iterator(), targetWeight, lookback, weightFunc);
}
}
private static class PackingIterator<T> implements Iterator<List<T>> {
private final LinkedList<Bin> bins = Lists.newLinkedList();
private final Iterator<T> items;
private final long targetWeight;
private final int lookback;
private final Function<T, Long> weightFunc;
private PackingIterator(Iterator<T> items, long targetWeight, int lookback,
Function<T, Long> weightFunc) {
this.items = items;
this.targetWeight = targetWeight;
this.lookback = lookback;
this.weightFunc = weightFunc;
}
public boolean hasNext() {
return items.hasNext() || !bins.isEmpty();
}
public List<T> next() {
while (items.hasNext()) {
T item = items.next();
long weight = weightFunc.apply(item);
Bin bin = find(bins, weight);
if (bin != null) {
bin.add(item, weight);
} else {
bin = new Bin();
bin.add(item, weight);
bins.addLast(bin);
if (bins.size() > lookback) {
return ImmutableList.copyOf(bins.removeFirst().items());
}
}
}
if (bins.isEmpty()) {
throw new NoSuchElementException();
}
return ImmutableList.copyOf(bins.removeFirst().items());
}
private Bin find(List<Bin> bins, long weight) {
for (Bin bin : bins) {
if (bin.canAdd(weight)) {
return bin;
}
}
return null;
}
private class Bin {
private long binWeight = 0L;
private List<T> items = Lists.newArrayList();
public List<T> items() {
return items;
}
public boolean canAdd(long weight) {
return (binWeight + weight <= targetWeight);
}
public void add(T item, long weight) {
this.binWeight += weight;
items.add(item);
}
}
}
}
| 6,349 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/Exceptions.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
public class Exceptions {
private Exceptions() {
}
public static <E extends Exception>
E suppressExceptions(E alreadyThrown, Runnable run) {
try {
run.run();
} catch (Exception e) {
alreadyThrown.addSuppressed(e);
}
return alreadyThrown;
}
public static <E extends Exception>
void suppressAndThrow(E alreadyThrown, Runnable run) throws E {
throw suppressExceptions(alreadyThrown, run);
}
}
| 6,350 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/util/ThreadPools.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.google.common.util.concurrent.MoreExecutors;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import com.netflix.iceberg.SystemProperties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;
public class ThreadPools {
public static final String PLANNER_THREAD_POOL_SIZE_PROP =
SystemProperties.PLANNER_THREAD_POOL_SIZE_PROP;
public static final String WORKER_THREAD_POOL_SIZE_PROP =
SystemProperties.WORKER_THREAD_POOL_SIZE_PROP;
private static ExecutorService PLANNER_POOL = MoreExecutors.getExitingExecutorService(
(ThreadPoolExecutor) Executors.newFixedThreadPool(
getPoolSize(PLANNER_THREAD_POOL_SIZE_PROP, 4),
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("iceberg-planner-pool-%d")
.build()));
private static ExecutorService WORKER_POOL = MoreExecutors.getExitingExecutorService(
(ThreadPoolExecutor) Executors.newFixedThreadPool(
getPoolSize(WORKER_THREAD_POOL_SIZE_PROP, Runtime.getRuntime().availableProcessors()),
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("iceberg-worker-pool-%d")
.build()));
/**
* Return an {@link ExecutorService} that uses the "planner" thread-pool.
* <p>
* The size of the planner pool limits the number of concurrent planning operations in the base
* table implementation.
* <p>
* The size of this thread-pool is controlled by the Java system property
* {@code iceberg.planner.num-threads}.
*
* @return an {@link ExecutorService} that uses the planner pool
*/
public static ExecutorService getPlannerPool() {
return PLANNER_POOL;
}
/**
* Return an {@link ExecutorService} that uses the "worker" thread-pool.
* <p>
* The size of the worker pool limits the number of tasks concurrently reading manifests in the
* base table implementation across all concurrent planning operations.
* <p>
* The size of this thread-pool is controlled by the Java system property
* {@code iceberg.worker.num-threads}.
*
* @return an {@link ExecutorService} that uses the worker pool
*/
public static ExecutorService getWorkerPool() {
return WORKER_POOL;
}
private static int getPoolSize(String systemProperty, int defaultSize) {
String value = System.getProperty(systemProperty);
if (value != null) {
try {
return Integer.parseUnsignedInt(value);
} catch (NumberFormatException e) {
// will return the default
}
}
return defaultSize;
}
}
| 6,351 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/AvroFileAppender.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.netflix.iceberg.Metrics;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.FileAppender;
import com.netflix.iceberg.io.OutputFile;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.file.DataFileWriter;
import org.apache.avro.io.DatumWriter;
import java.io.IOException;
import java.util.Map;
import java.util.function.Function;
class AvroFileAppender<D> implements FileAppender<D> {
private DataFileWriter<D> writer = null;
private long numRecords = 0L;
AvroFileAppender(Schema schema, OutputFile file,
Function<Schema, DatumWriter<?>> createWriterFunc,
CodecFactory codec, Map<String, String> metadata) throws IOException {
this.writer = newAvroWriter(schema, file, createWriterFunc, codec, metadata);
}
@Override
public void add(D datum) {
try {
numRecords += 1L;
writer.append(datum);
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
@Override
public Metrics metrics() {
return new Metrics(numRecords, null, null, null);
}
@Override
public void close() throws IOException {
if (writer != null) {
writer.close();
this.writer = null;
}
}
@SuppressWarnings("unchecked")
private static <D> DataFileWriter<D> newAvroWriter(
Schema schema, OutputFile file, Function<Schema, DatumWriter<?>> createWriterFunc,
CodecFactory codec, Map<String, String> metadata) throws IOException {
DataFileWriter<D> writer = new DataFileWriter<>(
(DatumWriter<D>) createWriterFunc.apply(schema));
writer.setCodec(codec);
for (Map.Entry<String, String> entry : metadata.entrySet()) {
writer.setMeta(entry.getKey(), entry.getValue());
}
// TODO: support overwrite
return writer.create(schema, file.create());
}
}
| 6,352 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/ValueReaders.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.iceberg.common.DynConstructors;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.ResolvingDecoder;
import org.apache.avro.util.Utf8;
import java.io.IOException;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import static java.util.Collections.emptyIterator;
public class ValueReaders {
private ValueReaders() {
}
public static ValueReader<Object> nulls() {
return NullReader.INSTANCE;
}
public static ValueReader<Boolean> booleans() {
return BooleanReader.INSTANCE;
}
public static ValueReader<Integer> ints() {
return IntegerReader.INSTANCE;
}
public static ValueReader<Long> longs() {
return LongReader.INSTANCE;
}
public static ValueReader<Float> floats() {
return FloatReader.INSTANCE;
}
public static ValueReader<Double> doubles() {
return DoubleReader.INSTANCE;
}
public static ValueReader<String> strings() {
return StringReader.INSTANCE;
}
public static ValueReader<Utf8> utf8s() {
return Utf8Reader.INSTANCE;
}
public static ValueReader<UUID> uuids() {
return UUIDReader.INSTANCE;
}
public static ValueReader<byte[]> fixed(int length) {
return new FixedReader(length);
}
public static ValueReader<GenericData.Fixed> fixed(Schema schema) {
return new GenericFixedReader(schema);
}
public static ValueReader<byte[]> bytes() {
return BytesReader.INSTANCE;
}
public static ValueReader<ByteBuffer> byteBuffers() {
return ByteBufferReader.INSTANCE;
}
public static ValueReader<BigDecimal> decimal(ValueReader<byte[]> unscaledReader, int scale) {
return new DecimalReader(unscaledReader, scale);
}
public static ValueReader<Object> union(List<ValueReader<?>> readers) {
return new UnionReader(readers);
}
public static <T> ValueReader<Collection<T>> array(ValueReader<T> elementReader) {
return new ArrayReader<>(elementReader);
}
public static <K, V> ValueReader<Map<K, V>> arrayMap(ValueReader<K> keyReader, ValueReader<V> valueReader) {
return new ArrayMapReader<>(keyReader, valueReader);
}
public static <K, V> ValueReader<Map<K, V>> map(ValueReader<K> keyReader, ValueReader<V> valueReader) {
return new MapReader<>(keyReader, valueReader);
}
public static ValueReader<GenericData.Record> record(List<ValueReader<?>> readers, Schema recordSchema) {
return new RecordReader(readers, recordSchema);
}
public static <R extends IndexedRecord> ValueReader<R> record(List<ValueReader<?>> readers, Class<R> recordClass, Schema recordSchema) {
return new IndexedRecordReader<>(readers, recordClass, recordSchema);
}
private static class NullReader implements ValueReader<Object> {
private static NullReader INSTANCE = new NullReader();
private NullReader() {
}
@Override
public Object read(Decoder decoder, Object ignored) throws IOException {
decoder.readNull();
return null;
}
}
private static class BooleanReader implements ValueReader<Boolean> {
private static BooleanReader INSTANCE = new BooleanReader();
private BooleanReader() {
}
@Override
public Boolean read(Decoder decoder, Object ignored) throws IOException {
return decoder.readBoolean();
}
}
private static class IntegerReader implements ValueReader<Integer> {
private static IntegerReader INSTANCE = new IntegerReader();
private IntegerReader() {
}
@Override
public Integer read(Decoder decoder, Object ignored) throws IOException {
return decoder.readInt();
}
}
private static class LongReader implements ValueReader<Long> {
private static LongReader INSTANCE = new LongReader();
private LongReader() {
}
@Override
public Long read(Decoder decoder, Object ignored) throws IOException {
return decoder.readLong();
}
}
private static class FloatReader implements ValueReader<Float> {
private static FloatReader INSTANCE = new FloatReader();
private FloatReader() {
}
@Override
public Float read(Decoder decoder, Object ignored) throws IOException {
return decoder.readFloat();
}
}
private static class DoubleReader implements ValueReader<Double> {
private static DoubleReader INSTANCE = new DoubleReader();
private DoubleReader() {
}
@Override
public Double read(Decoder decoder, Object ignored) throws IOException {
return decoder.readDouble();
}
}
private static class StringReader implements ValueReader<String> {
private static StringReader INSTANCE = new StringReader();
private final ThreadLocal<Utf8> reusedTempUtf8 = ThreadLocal.withInitial(Utf8::new);
private StringReader() {
}
@Override
public String read(Decoder decoder, Object ignored) throws IOException {
// use the decoder's readString(Utf8) method because it may be a resolving decoder
this.reusedTempUtf8.set(decoder.readString(reusedTempUtf8.get()));
return reusedTempUtf8.get().toString();
// int length = decoder.readInt();
// byte[] bytes = new byte[length];
// decoder.readFixed(bytes, 0, length);
}
}
private static class Utf8Reader implements ValueReader<Utf8> {
private static Utf8Reader INSTANCE = new Utf8Reader();
private Utf8Reader() {
}
@Override
public Utf8 read(Decoder decoder, Object reuse) throws IOException {
// use the decoder's readString(Utf8) method because it may be a resolving decoder
if (reuse instanceof Utf8) {
return decoder.readString((Utf8) reuse);
} else {
return decoder.readString(null);
}
// int length = decoder.readInt();
// byte[] bytes = new byte[length];
// decoder.readFixed(bytes, 0, length);
}
}
private static class UUIDReader implements ValueReader<UUID> {
private static final ThreadLocal<ByteBuffer> BUFFER = ThreadLocal.withInitial(() -> {
ByteBuffer buffer = ByteBuffer.allocate(16);
buffer.order(ByteOrder.BIG_ENDIAN);
return buffer;
});
private static UUIDReader INSTANCE = new UUIDReader();
private UUIDReader() {
}
@Override
public UUID read(Decoder decoder, Object ignored) throws IOException {
ByteBuffer buffer = BUFFER.get();
buffer.rewind();
decoder.readFixed(buffer.array(), 0, 16);
long mostSigBits = buffer.getLong();
long leastSigBits = buffer.getLong();
return new UUID(mostSigBits, leastSigBits);
}
}
private static class FixedReader implements ValueReader<byte[]> {
private final int length;
private FixedReader(int length) {
this.length = length;
}
@Override
public byte[] read(Decoder decoder, Object reuse) throws IOException {
if (reuse instanceof byte[]) {
byte[] reusedBytes = (byte[]) reuse;
if (reusedBytes.length == length) {
decoder.readFixed(reusedBytes, 0, length);
return reusedBytes;
}
}
byte[] bytes = new byte[length];
decoder.readFixed(bytes, 0, length);
return bytes;
}
}
private static class GenericFixedReader implements ValueReader<GenericData.Fixed> {
private final Schema schema;
private final int length;
private GenericFixedReader(Schema schema) {
this.schema = schema;
this.length = schema.getFixedSize();
}
@Override
public GenericData.Fixed read(Decoder decoder, Object reuse) throws IOException {
if (reuse instanceof GenericData.Fixed) {
GenericData.Fixed reusedFixed = (GenericData.Fixed) reuse;
if (reusedFixed.bytes().length == length) {
decoder.readFixed(reusedFixed.bytes(), 0, length);
return reusedFixed;
}
}
byte[] bytes = new byte[length];
decoder.readFixed(bytes, 0, length);
return new GenericData.Fixed(schema, bytes);
}
}
private static class BytesReader implements ValueReader<byte[]> {
private static BytesReader INSTANCE = new BytesReader();
private BytesReader() {
}
@Override
public byte[] read(Decoder decoder, Object reuse) throws IOException {
// use the decoder's readBytes method because it may be a resolving decoder
// the only time the previous value could be reused is when its length matches the next array,
// but there is no way to know this with the readBytes call, which uses a ByteBuffer. it is
// possible to wrap the reused array in a ByteBuffer, but this may still result in allocating
// a new buffer. since the usual case requires an allocation anyway to get the size right,
// just allocate every time.
return decoder.readBytes(null).array();
// int length = decoder.readInt();
// byte[] bytes = new byte[length];
// decoder.readFixed(bytes, 0, length);
// return bytes;
}
}
private static class ByteBufferReader implements ValueReader<ByteBuffer> {
private static ByteBufferReader INSTANCE = new ByteBufferReader();
private ByteBufferReader() {
}
@Override
public ByteBuffer read(Decoder decoder, Object reuse) throws IOException {
// use the decoder's readBytes method because it may be a resolving decoder
if (reuse instanceof ByteBuffer) {
return decoder.readBytes((ByteBuffer) reuse);
} else {
return decoder.readBytes(null);
}
// int length = decoder.readInt();
// byte[] bytes = new byte[length];
// decoder.readFixed(bytes, 0, length);
// return bytes;
}
}
private static class DecimalReader implements ValueReader<BigDecimal> {
private final ValueReader<byte[]> bytesReader;
private final int scale;
private DecimalReader(ValueReader<byte[]> bytesReader, int scale) {
this.bytesReader = bytesReader;
this.scale = scale;
}
@Override
public BigDecimal read(Decoder decoder, Object ignored) throws IOException {
// there isn't a way to get the backing buffer out of a BigInteger, so this can't reuse.
byte[] bytes = bytesReader.read(decoder, null);
return new BigDecimal(new BigInteger(bytes), scale);
}
}
private static class UnionReader implements ValueReader<Object> {
private final ValueReader[] readers;
private UnionReader(List<ValueReader<?>> readers) {
this.readers = new ValueReader[readers.size()];
for (int i = 0; i < this.readers.length; i += 1) {
this.readers[i] = readers.get(i);
}
}
@Override
public Object read(Decoder decoder, Object reuse) throws IOException {
int index = decoder.readIndex();
return readers[index].read(decoder, reuse);
}
}
private static class EnumReader implements ValueReader<String> {
private final String[] symbols;
private EnumReader(List<String> symbols) {
this.symbols = new String[symbols.size()];
for (int i = 0; i < this.symbols.length; i += 1) {
this.symbols[i] = symbols.get(i);
}
}
@Override
public String read(Decoder decoder, Object ignored) throws IOException {
int index = decoder.readEnum();
return symbols[index];
}
}
private static class ArrayReader<T> implements ValueReader<Collection<T>> {
private final ValueReader<T> elementReader;
private LinkedList<?> lastList = null;
private ArrayReader(ValueReader<T> elementReader) {
this.elementReader = elementReader;
}
@Override
@SuppressWarnings("unchecked")
public Collection<T> read(Decoder decoder, Object reused) throws IOException {
LinkedList<T> resultList;
if (lastList != null) {
lastList.clear();
resultList = (LinkedList<T>) lastList;
} else {
resultList = Lists.newLinkedList();
}
if (reused instanceof LinkedList) {
this.lastList = (LinkedList<?>) reused;
} else {
this.lastList = null;
}
long chunkLength = decoder.readArrayStart();
Iterator<?> elIter = lastList != null ? lastList.iterator() : emptyIterator();
while (chunkLength > 0) {
for (long i = 0; i < chunkLength; i += 1) {
Object lastValue = elIter.hasNext() ? elIter.next() : null;
resultList.addLast(elementReader.read(decoder, lastValue));
}
chunkLength = decoder.arrayNext();
}
return resultList;
}
}
private static class ArrayMapReader<K, V> implements ValueReader<Map<K, V>> {
private final ValueReader<K> keyReader;
private final ValueReader<V> valueReader;
private Map lastMap = null;
private ArrayMapReader(ValueReader<K> keyReader, ValueReader<V> valueReader) {
this.keyReader = keyReader;
this.valueReader = valueReader;
}
@Override
@SuppressWarnings("unchecked")
public Map<K, V> read(Decoder decoder, Object reuse) throws IOException {
if (reuse instanceof Map) {
this.lastMap = (Map<?, ?>) reuse;
} else {
this.lastMap = null;
}
Map<K, V> resultMap;
if (lastMap != null) {
lastMap.clear();
resultMap = (Map<K, V>) lastMap;
} else {
resultMap = Maps.newLinkedHashMap();
}
long chunkLength = decoder.readArrayStart();
Iterator<Map.Entry<?, ?>> kvIter = lastMap != null ?
lastMap.entrySet().iterator() :
emptyIterator();
while (chunkLength > 0) {
for (long i = 0; i < chunkLength; i += 1) {
K key;
V value;
if (kvIter.hasNext()) {
Map.Entry<?, ?> last = kvIter.next();
key = keyReader.read(decoder, last.getKey());
value = valueReader.read(decoder, last.getValue());
} else {
key = keyReader.read(decoder, null);
value = valueReader.read(decoder, null);
}
resultMap.put(key, value);
}
chunkLength = decoder.arrayNext();
}
return resultMap;
}
}
private static class MapReader<K, V> implements ValueReader<Map<K, V>> {
private final ValueReader<K> keyReader;
private final ValueReader<V> valueReader;
private Map lastMap = null;
private MapReader(ValueReader<K> keyReader, ValueReader<V> valueReader) {
this.keyReader = keyReader;
this.valueReader = valueReader;
}
@Override
@SuppressWarnings("unchecked")
public Map<K, V> read(Decoder decoder, Object reuse) throws IOException {
if (reuse instanceof Map) {
this.lastMap = (Map<?, ?>) reuse;
} else {
this.lastMap = null;
}
Map<K, V> resultMap;
if (lastMap != null) {
lastMap.clear();
resultMap = (Map<K, V>) lastMap;
} else {
resultMap = Maps.newLinkedHashMap();
}
long chunkLength = decoder.readMapStart();
Iterator<Map.Entry<?, ?>> kvIter = lastMap != null ?
lastMap.entrySet().iterator() :
emptyIterator();
while (chunkLength > 0) {
for (long i = 0; i < chunkLength; i += 1) {
K key;
V value;
if (kvIter.hasNext()) {
Map.Entry<?, ?> last = kvIter.next();
key = keyReader.read(decoder, last.getKey());
value = valueReader.read(decoder, last.getValue());
} else {
key = keyReader.read(decoder, null);
value = valueReader.read(decoder, null);
}
resultMap.put(key, value);
}
chunkLength = decoder.mapNext();
}
return resultMap;
}
}
public abstract static class StructReader<S> implements ValueReader<S> {
private final ValueReader<?>[] readers;
protected StructReader(List<ValueReader<?>> readers) {
this.readers = new ValueReader[readers.size()];
for (int i = 0; i < this.readers.length; i += 1) {
this.readers[i] = readers.get(i);
}
}
protected abstract S reuseOrCreate(Object reuse);
protected abstract Object get(S struct, int pos);
protected abstract void set(S struct, int pos, Object value);
public ValueReader<?> reader(int pos) {
return readers[pos];
}
@Override
public S read(Decoder decoder, Object reuse) throws IOException {
S struct = reuseOrCreate(reuse);
if (decoder instanceof ResolvingDecoder) {
// this may not set all of the fields. nulls are set by default.
for (org.apache.avro.Schema.Field field : ((ResolvingDecoder) decoder).readFieldOrder()) {
Object reusedValue = get(struct, field.pos());
set(struct, field.pos(), readers[field.pos()].read(decoder, reusedValue));
}
} else {
for (int i = 0; i < readers.length; i += 1) {
Object reusedValue = get(struct, i);
set(struct, i, readers[i].read(decoder, reusedValue));
}
}
return struct;
}
}
private static class RecordReader extends StructReader<GenericData.Record> {
private final Schema recordSchema;
private RecordReader(List<ValueReader<?>> readers, Schema recordSchema) {
super(readers);
this.recordSchema = recordSchema;
}
@Override
protected GenericData.Record reuseOrCreate(Object reuse) {
if (reuse instanceof GenericData.Record) {
return (GenericData.Record) reuse;
} else {
return new GenericData.Record(recordSchema);
}
}
@Override
protected Object get(GenericData.Record struct, int pos) {
return struct.get(pos);
}
@Override
protected void set(GenericData.Record struct, int pos, Object value) {
struct.put(pos, value);
}
}
static class IndexedRecordReader<R extends IndexedRecord> extends StructReader<R> {
private final Class<R> recordClass;
private final DynConstructors.Ctor<R> ctor;
private final Schema schema;
IndexedRecordReader(List<ValueReader<?>> readers, Class<R> recordClass, Schema schema) {
super(readers);
this.recordClass = recordClass;
this.ctor = DynConstructors.builder(IndexedRecord.class)
.hiddenImpl(recordClass, Schema.class)
.hiddenImpl(recordClass)
.build();
this.schema = schema;
}
@Override
protected R reuseOrCreate(Object reuse) {
if (recordClass.isInstance(reuse)) {
return recordClass.cast(reuse);
} else {
return ctor.newInstance(schema);
}
}
@Override
protected Object get(R struct, int pos) {
return struct.get(pos);
}
@Override
protected void set(R struct, int pos, Object value) {
struct.put(pos, value);
}
}
}
| 6,353 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/AvroIO.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.netflix.iceberg.common.DynClasses;
import com.netflix.iceberg.common.DynConstructors;
import com.netflix.iceberg.io.DelegatingInputStream;
import com.netflix.iceberg.io.SeekableInputStream;
import org.apache.avro.file.SeekableInput;
import java.io.IOException;
import java.io.InputStream;
class AvroIO {
private AvroIO() {
}
private static final Class<?> fsDataInputStreamClass = DynClasses.builder()
.impl("org.apache.hadoop.fs.FSDataInputStream")
.orNull()
.build();
private static final boolean relocated =
"org.apache.avro.file.SeekableInput".equals(SeekableInput.class.getName());
private static final DynConstructors.Ctor<SeekableInput> avroFsInputCtor =
!relocated && fsDataInputStreamClass != null ?
DynConstructors.builder(SeekableInput.class)
.impl("org.apache.hadoop.fs.AvroFSInput", fsDataInputStreamClass, Long.TYPE)
.build() :
null;
static SeekableInput stream(SeekableInputStream stream, long length) {
if (stream instanceof DelegatingInputStream) {
InputStream wrapped = ((DelegatingInputStream) stream).getDelegate();
if (avroFsInputCtor != null && fsDataInputStreamClass != null &&
fsDataInputStreamClass.isInstance(wrapped)) {
return avroFsInputCtor.newInstance(wrapped, length);
}
}
return new AvroInputStreamAdapter(stream, length);
}
private static class AvroInputStreamAdapter extends SeekableInputStream implements SeekableInput {
private final SeekableInputStream stream;
private final long length;
public AvroInputStreamAdapter(SeekableInputStream stream, long length) {
this.stream = stream;
this.length = length;
}
@Override
public void close() throws IOException {
stream.close();
}
@Override
public long getPos() throws IOException {
return stream.getPos();
}
@Override
public void seek(long newPos) throws IOException {
stream.seek(newPos);
}
@Override
public long tell() throws IOException {
return getPos();
}
@Override
public long length() throws IOException {
return length;
}
@Override
public int read() throws IOException {
return stream.read();
}
@Override
public int read(byte[] b) throws IOException {
return stream.read(b);
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return stream.read(b, off, len);
}
@Override
public long skip(long n) throws IOException {
return stream.skip(n);
}
@Override
public int available() throws IOException {
return stream.available();
}
@Override
public synchronized void mark(int readlimit) {
stream.mark(readlimit);
}
@Override
public synchronized void reset() throws IOException {
stream.reset();
}
@Override
public boolean markSupported() {
return stream.markSupported();
}
}
}
| 6,354 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/ValueWriter.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import org.apache.avro.io.Encoder;
import java.io.IOException;
public interface ValueWriter<D> {
void write(D datum, Encoder encoder) throws IOException;
}
| 6,355 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/AvroIterable.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Maps;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.CloseableGroup;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.io.InputFile;
import org.apache.avro.Schema;
import org.apache.avro.file.DataFileReader;
import org.apache.avro.file.FileReader;
import org.apache.avro.io.DatumReader;
import java.io.Closeable;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
public class AvroIterable<D> extends CloseableGroup implements CloseableIterable<D> {
private final InputFile file;
private final DatumReader<D> reader;
private final Long start;
private final Long end;
private final boolean reuseContainers;
private Map<String, String> metadata = null;
AvroIterable(InputFile file, DatumReader<D> reader,
Long start, Long length, boolean reuseContainers) {
this.file = file;
this.reader = reader;
this.start = start;
this.end = start != null ? start + length : null;
this.reuseContainers = reuseContainers;
}
private DataFileReader<D> initMetadata(DataFileReader<D> reader) {
if (metadata == null) {
this.metadata = Maps.newHashMap();
for (String key : reader.getMetaKeys()) {
metadata.put(key, reader.getMetaString(key));
}
}
return reader;
}
public Map<String, String> getMetadata() {
if (metadata == null) {
try (DataFileReader<D> reader = newFileReader()) {
initMetadata(reader);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read metadata for file: %s", file);
}
}
return metadata;
}
@Override
public Iterator<D> iterator() {
FileReader<D> reader = initMetadata(newFileReader());
if (start != null) {
reader = new AvroRangeIterator<>(reader, start, end);
}
if (reuseContainers) {
return new AvroReuseIterator<>(reader);
}
addCloseable(reader);
return reader;
}
private DataFileReader<D> newFileReader() {
try {
return (DataFileReader<D>) DataFileReader.openReader(
AvroIO.stream(file.newStream(), file.getLength()), reader);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to open file: %s", file);
}
}
private static class AvroRangeIterator<D> implements FileReader<D> {
private final FileReader<D> reader;
private final long end;
AvroRangeIterator(FileReader<D> reader, long start, long end) {
this.reader = reader;
this.end = end;
try {
reader.sync(start);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to find sync past position %d", start);
}
}
@Override
public Schema getSchema() {
return reader.getSchema();
}
@Override
public boolean hasNext() {
try {
return (reader.hasNext() && !reader.pastSync(end));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to check range end: %d", end);
}
}
@Override
public D next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
return reader.next();
}
@Override
public D next(D reuse) {
if (!hasNext()) {
throw new NoSuchElementException();
}
try {
return reader.next(reuse);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read next record");
}
}
@Override
public void sync(long position) throws IOException {
reader.sync(position);
}
@Override
public boolean pastSync(long position) throws IOException {
return reader.pastSync(position);
}
@Override
public long tell() throws IOException {
return reader.tell();
}
@Override
public void close() throws IOException {
reader.close();
}
@Override
public Iterator<D> iterator() {
return this;
}
}
private static class AvroReuseIterator<D> implements Iterator<D>, Closeable {
private final FileReader<D> reader;
private D reused = null;
AvroReuseIterator(FileReader<D> reader) {
this.reader = reader;
}
@Override
public boolean hasNext() {
return reader.hasNext();
}
@Override
public D next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
try {
this.reused = reader.next(reused);
return reused;
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to read next record");
}
}
@Override
public void close() throws IOException {
reader.close();
}
}
}
| 6,356 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/AvroSchemaVisitor.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import org.apache.avro.Schema;
import java.util.LinkedList;
import java.util.List;
public abstract class AvroSchemaVisitor<T> {
public static <T> T visit(Schema schema, AvroSchemaVisitor<T> visitor) {
switch (schema.getType()) {
case RECORD:
// check to make sure this hasn't been visited before
String name = schema.getFullName();
Preconditions.checkState(!visitor.recordLevels.contains(name),
"Cannot process recursive Avro record %s", name);
visitor.recordLevels.push(name);
List<Schema.Field> fields = schema.getFields();
List<String> names = Lists.newArrayListWithExpectedSize(fields.size());
List<T> results = Lists.newArrayListWithExpectedSize(fields.size());
for (Schema.Field field : schema.getFields()) {
names.add(field.name());
results.add(visit(field.schema(), visitor));
}
visitor.recordLevels.pop();
return visitor.record(schema, names, results);
case UNION:
List<Schema> types = schema.getTypes();
List<T> options = Lists.newArrayListWithExpectedSize(types.size());
for (Schema type : types) {
options.add(visit(type, visitor));
}
return visitor.union(schema, options);
case ARRAY:
return visitor.array(schema, visit(schema.getElementType(), visitor));
case MAP:
return visitor.map(schema, visit(schema.getValueType(), visitor));
default:
return visitor.primitive(schema);
}
}
protected LinkedList<String> recordLevels = Lists.newLinkedList();
public T record(Schema record, List<String> names, List<T> fields) {
return null;
}
public T union(Schema union, List<T> options) {
return null;
}
public T array(Schema array, T element) {
return null;
}
public T map(Schema map, T value) {
return null;
}
public T primitive(Schema primitive) {
return null;
}
}
| 6,357 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/ValueReader.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import org.apache.avro.io.Decoder;
import java.io.IOException;
public interface ValueReader<T> {
T read(Decoder decoder, Object reuse) throws IOException;
}
| 6,358 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/PruneColumns.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.apache.avro.Schema;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static com.netflix.iceberg.avro.AvroSchemaUtil.getElementId;
import static com.netflix.iceberg.avro.AvroSchemaUtil.getFieldId;
import static com.netflix.iceberg.avro.AvroSchemaUtil.getKeyId;
import static com.netflix.iceberg.avro.AvroSchemaUtil.getValueId;
class PruneColumns extends AvroSchemaVisitor<Schema> {
private final Set<Integer> selectedIds;
PruneColumns(Set<Integer> selectedIds) {
this.selectedIds = selectedIds;
}
public Schema rootSchema(Schema record) {
Schema result = visit(record, this);
if (result != null) {
return result;
}
return copyRecord(record, ImmutableList.of());
}
@Override
public Schema record(Schema record, List<String> names, List<Schema> fields) {
// Then this should access the record's fields by name
List<Schema.Field> filteredFields = Lists.newArrayListWithExpectedSize(fields.size());
boolean hasChange = false;
for (Schema.Field field : record.getFields()) {
int fieldId = getFieldId(field);
Schema fieldSchema = fields.get(field.pos());
// All primitives are selected by selecting the field, but map and list
// types can be selected by projecting the keys, values, or elements.
// This creates two conditions where the field should be selected: if the
// id is selected or if the result of the field is non-null. The only
// case where the converted field is non-null is when a map or list is
// selected by lower IDs.
if (selectedIds.contains(fieldId)) {
filteredFields.add(copyField(field, field.schema()));
} else if (fieldSchema != null) {
hasChange = true;
filteredFields.add(copyField(field, fieldSchema));
}
}
if (hasChange) {
return copyRecord(record, filteredFields);
} else if (filteredFields.size() == record.getFields().size()) {
return record;
} else if (!filteredFields.isEmpty()) {
return copyRecord(record, filteredFields);
}
return null;
}
@Override
public Schema union(Schema union, List<Schema> options) {
Preconditions.checkState(AvroSchemaUtil.isOptionSchema(union),
"Invalid schema: non-option unions are not supported: {}", union);
// only unions with null are allowed, and a null schema results in null
Schema pruned = null;
if (options.get(0) != null) {
pruned = options.get(0);
} else if (options.get(1) != null) {
pruned = options.get(1);
}
if (pruned != null) {
if (pruned != AvroSchemaUtil.fromOption(union)) {
return AvroSchemaUtil.toOption(pruned);
}
return union;
}
return null;
}
@Override
public Schema array(Schema array, Schema element) {
if (array.getLogicalType() instanceof LogicalMap) {
Schema keyValue = array.getElementType();
int keyId = getFieldId(keyValue.getField("key"));
int valueId = getFieldId(keyValue.getField("value"));
// if either key or value is selected, the whole map must be projected
if (selectedIds.contains(keyId) || selectedIds.contains(valueId)) {
return array;
} else if (element != null) {
if (keyValue.getField("value").schema() != element.getField("value").schema()) {
// the value must be a projection
return AvroSchemaUtil.createMap(
keyId, keyValue.getField("key").schema(),
valueId, element.getField("value").schema());
} else {
return array;
}
}
} else {
int elementId = getElementId(array);
if (selectedIds.contains(elementId)) {
return array;
} else if (element != null) {
if (element != array.getElementType()) {
// the element must be a projection
return Schema.createArray(element);
}
return array;
}
}
return null;
}
@Override
public Schema map(Schema map, Schema value) {
int keyId = getKeyId(map);
int valueId = getValueId(map);
// if either key or value is selected, the whole map must be projected
if (selectedIds.contains(keyId) || selectedIds.contains(valueId)) {
return map;
} else if (value != null) {
if (value != map.getValueType()) {
// the value must be a projection
return Schema.createMap(value);
}
return map;
}
return null;
}
@Override
public Schema primitive(Schema primitive) {
// primitives are not selected directly
return null;
}
private static Schema copyRecord(Schema record, List<Schema.Field> newFields) {
Schema copy = Schema.createRecord(record.getName(),
record.getDoc(), record.getNamespace(), record.isError(), newFields);
for (Map.Entry<String, Object> prop : record.getObjectProps().entrySet()) {
copy.addProp(prop.getKey(), prop.getValue());
}
return copy;
}
private static Schema.Field copyField(Schema.Field field, Schema newSchema) {
Schema.Field copy = new Schema.Field(field.name(),
newSchema, field.doc(), field.defaultVal(), field.order());
for (Map.Entry<String, Object> prop : field.getObjectProps().entrySet()) {
copy.addProp(prop.getKey(), prop.getValue());
}
return copy;
}
}
| 6,359 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/AvroSchemaUtil.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import org.apache.avro.JsonProperties;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static org.apache.avro.Schema.Type.ARRAY;
import static org.apache.avro.Schema.Type.MAP;
import static org.apache.avro.Schema.Type.RECORD;
import static org.apache.avro.Schema.Type.UNION;
public class AvroSchemaUtil {
public static final String FIELD_ID_PROP = "field-id";
public static final String KEY_ID_PROP = "key-id";
public static final String VALUE_ID_PROP = "value-id";
public static final String ELEMENT_ID_PROP = "element-id";
public static final String ADJUST_TO_UTC_PROP = "adjust-to-utc";
private static final Schema NULL = Schema.create(Schema.Type.NULL);
public static Schema convert(com.netflix.iceberg.Schema schema,
String tableName) {
return convert(schema, ImmutableMap.of(schema.asStruct(), tableName));
}
public static Schema convert(com.netflix.iceberg.Schema schema,
Map<Types.StructType, String> names) {
return TypeUtil.visit(schema, new TypeToSchema(names));
}
public static Schema convert(Type type) {
return convert(type, ImmutableMap.of());
}
public static Schema convert(Types.StructType type, String name) {
return convert(type, ImmutableMap.of(type, name));
}
public static Schema convert(Type type, Map<Types.StructType, String> names) {
return TypeUtil.visit(type, new TypeToSchema(names));
}
public static Type convert(Schema schema) {
return AvroSchemaVisitor.visit(schema, new SchemaToType(schema));
}
public static Map<Type, Schema> convertTypes(Types.StructType type, String name) {
TypeToSchema converter = new TypeToSchema(ImmutableMap.of(type, name));
TypeUtil.visit(type, converter);
return ImmutableMap.copyOf(converter.getConversionMap());
}
public static Schema pruneColumns(Schema schema, Set<Integer> selectedIds) {
return new PruneColumns(selectedIds).rootSchema(schema);
}
public static Schema buildAvroProjection(Schema schema, com.netflix.iceberg.Schema expected,
Map<String, String> renames) {
return AvroCustomOrderSchemaVisitor.visit(schema, new BuildAvroProjection(expected, renames));
}
public static boolean isTimestamptz(Schema schema) {
LogicalType logicalType = schema.getLogicalType();
if (logicalType != null && logicalType instanceof LogicalTypes.TimestampMicros) {
// timestamptz is adjusted to UTC
Object value = schema.getObjectProp(ADJUST_TO_UTC_PROP);
if (value instanceof Boolean) {
return (Boolean) value;
} else if (value instanceof String) {
return Boolean.parseBoolean((String) value);
}
}
return false;
}
static boolean isOptionSchema(Schema schema) {
if (schema.getType() == UNION && schema.getTypes().size() == 2) {
if (schema.getTypes().get(0).getType() == Schema.Type.NULL) {
return true;
} else if (schema.getTypes().get(1).getType() == Schema.Type.NULL) {
return true;
}
}
return false;
}
static Schema toOption(Schema schema) {
if (schema.getType() == UNION) {
Preconditions.checkArgument(isOptionSchema(schema),
"Union schemas are not supported: " + schema);
return schema;
} else {
return Schema.createUnion(NULL, schema);
}
}
static Schema fromOption(Schema schema) {
Preconditions.checkArgument(schema.getType() == UNION,
"Expected union schema but was passed: {}", schema);
Preconditions.checkArgument(schema.getTypes().size() == 2,
"Expected optional schema, but was passed: {}", schema);
if (schema.getTypes().get(0).getType() == Schema.Type.NULL) {
return schema.getTypes().get(1);
} else {
return schema.getTypes().get(0);
}
}
static Schema fromOptions(List<Schema> options) {
Preconditions.checkArgument(options.size() == 2,
"Expected two schemas, but was passed: {} options", options.size());
if (options.get(0).getType() == Schema.Type.NULL) {
return options.get(1);
} else {
return options.get(0);
}
}
static boolean isKeyValueSchema(Schema schema) {
return (schema.getType() == RECORD && schema.getFields().size() == 2);
}
static Schema createMap(int keyId, Schema keySchema,
int valueId, Schema valueSchema) {
String keyValueName = "k" + keyId + "_v" + valueId;
Schema.Field keyField = new Schema.Field("key", keySchema, null, null);
keyField.addProp(FIELD_ID_PROP, keyId);
Schema.Field valueField = new Schema.Field("value", valueSchema, null,
isOptionSchema(valueSchema) ? JsonProperties.NULL_VALUE: null);
valueField.addProp(FIELD_ID_PROP, valueId);
return LogicalMap.get().addToSchema(Schema.createArray(Schema.createRecord(
keyValueName, null, null, false, ImmutableList.of(keyField, valueField))));
}
static Schema createProjectionMap(String recordName,
int keyId, String keyName, Schema keySchema,
int valueId, String valueName, Schema valueSchema) {
String keyValueName = "k" + keyId + "_v" + valueId;
Schema.Field keyField = new Schema.Field("key", keySchema, null, null);
if (!"key".equals(keyName)) {
keyField.addAlias(keyName);
}
keyField.addProp(FIELD_ID_PROP, keyId);
Schema.Field valueField = new Schema.Field("value", valueSchema, null,
isOptionSchema(valueSchema) ? JsonProperties.NULL_VALUE: null);
valueField.addProp(FIELD_ID_PROP, valueId);
if (!"value".equals(valueName)) {
valueField.addAlias(valueName);
}
Schema keyValueRecord = Schema.createRecord(
keyValueName, null, null, false, ImmutableList.of(keyField, valueField));
if (!keyValueName.equals(recordName)) {
keyValueRecord.addAlias(recordName);
}
return LogicalMap.get().addToSchema(Schema.createArray(keyValueRecord));
}
private static int getId(Schema schema, String propertyName) {
if (schema.getType() == UNION) {
return getId(fromOption(schema), propertyName);
}
Object id = schema.getObjectProp(propertyName);
Preconditions.checkNotNull(id, "Missing expected '%s' property", propertyName);
return toInt(id);
}
public static int getKeyId(Schema schema) {
Preconditions.checkArgument(schema.getType() == MAP,
"Cannot get map key id for non-map schema: " + schema);
return getId(schema, KEY_ID_PROP);
}
public static int getValueId(Schema schema) {
Preconditions.checkArgument(schema.getType() == MAP,
"Cannot get map value id for non-map schema: " + schema);
return getId(schema, VALUE_ID_PROP);
}
public static int getElementId(Schema schema) {
Preconditions.checkArgument(schema.getType() == ARRAY,
"Cannot get array element id for non-array schema: " + schema);
return getId(schema, ELEMENT_ID_PROP);
}
public static int getFieldId(Schema.Field field) {
Object id = field.getObjectProp(FIELD_ID_PROP);
Preconditions.checkNotNull(id, "Missing expected '%s' property", FIELD_ID_PROP);
return toInt(id);
}
private static int toInt(Object value) {
if (value instanceof Number) {
return ((Number) value).intValue();
} else if (value instanceof String) {
return Integer.parseInt((String) value);
}
throw new UnsupportedOperationException("Cannot coerce value to int: " + value);
}
static Schema copyRecord(Schema record, List<Schema.Field> newFields, String newName) {
Schema copy;
if (newName != null) {
copy = Schema.createRecord(newName, record.getDoc(), null, record.isError(), newFields);
// the namespace is defaulted to the record's namespace if it is null, which causes renames
// without the namespace to fail. using "" instead of null changes this behavior to match the
// original schema.
copy.addAlias(record.getName(), record.getNamespace() == null ? "" : record.getNamespace());
} else {
copy = Schema.createRecord(record.getName(),
record.getDoc(), record.getNamespace(), record.isError(), newFields);
}
for (Map.Entry<String, Object> prop : record.getObjectProps().entrySet()) {
copy.addProp(prop.getKey(), prop.getValue());
}
return copy;
}
static Schema.Field copyField(Schema.Field field, Schema newSchema, String newName) {
Schema.Field copy = new Schema.Field(newName,
newSchema, field.doc(), field.defaultVal(), field.order());
for (Map.Entry<String, Object> prop : field.getObjectProps().entrySet()) {
copy.addProp(prop.getKey(), prop.getValue());
}
if (!newName.equals(field.name())) {
copy.addAlias(field.name());
}
return copy;
}
}
| 6,360 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/TypeToSchema.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import java.util.List;
import java.util.Map;
import static com.netflix.iceberg.avro.AvroSchemaUtil.toOption;
import static org.apache.avro.JsonProperties.NULL_VALUE;
class TypeToSchema extends TypeUtil.SchemaVisitor<Schema> {
private static final Schema BOOLEAN_SCHEMA = Schema.create(Schema.Type.BOOLEAN);
private static final Schema INTEGER_SCHEMA = Schema.create(Schema.Type.INT);
private static final Schema LONG_SCHEMA = Schema.create(Schema.Type.LONG);
private static final Schema FLOAT_SCHEMA = Schema.create(Schema.Type.FLOAT);
private static final Schema DOUBLE_SCHEMA = Schema.create(Schema.Type.DOUBLE);
private static final Schema DATE_SCHEMA = LogicalTypes.date()
.addToSchema(Schema.create(Schema.Type.INT));
private static final Schema TIME_SCHEMA = LogicalTypes.timeMicros()
.addToSchema(Schema.create(Schema.Type.LONG));
private static final Schema TIMESTAMP_SCHEMA = LogicalTypes.timestampMicros()
.addToSchema(Schema.create(Schema.Type.LONG));
private static final Schema TIMESTAMPTZ_SCHEMA = LogicalTypes.timestampMicros()
.addToSchema(Schema.create(Schema.Type.LONG));
private static final Schema STRING_SCHEMA = Schema.create(Schema.Type.STRING);
private static final Schema UUID_SCHEMA = LogicalTypes.uuid()
.addToSchema(Schema.createFixed("uuid_fixed", null, null, 16));
private static final Schema BINARY_SCHEMA = Schema.create(Schema.Type.BYTES);
static {
TIMESTAMP_SCHEMA.addProp(AvroSchemaUtil.ADJUST_TO_UTC_PROP, false);
TIMESTAMPTZ_SCHEMA.addProp(AvroSchemaUtil.ADJUST_TO_UTC_PROP, true);
}
private final Map<Type, Schema> results = Maps.newHashMap();
private final Map<Types.StructType, String> names;
TypeToSchema(Map<Types.StructType, String> names) {
this.names = names;
}
Map<Type, Schema> getConversionMap() {
return results;
}
@Override
public Schema schema(com.netflix.iceberg.Schema schema, Schema structSchema) {
return structSchema;
}
@Override
public Schema struct(Types.StructType struct, List<Schema> fieldSchemas) {
Schema recordSchema = results.get(struct);
if (recordSchema != null) {
return recordSchema;
}
String recordName = names.get(struct);
if (recordName == null) {
recordName = "r" + fieldIds.peek();
}
List<Types.NestedField> structFields = struct.fields();
List<Schema.Field> fields = Lists.newArrayListWithExpectedSize(fieldSchemas.size());
for (int i = 0; i < structFields.size(); i += 1) {
Types.NestedField structField = structFields.get(i);
Schema.Field field = new Schema.Field(
structField.name(), fieldSchemas.get(i), null,
structField.isOptional() ? NULL_VALUE : null);
field.addProp(AvroSchemaUtil.FIELD_ID_PROP, structField.fieldId());
fields.add(field);
}
recordSchema = Schema.createRecord(recordName, null, null, false, fields);
results.put(struct, recordSchema);
return recordSchema;
}
@Override
public Schema field(Types.NestedField field, Schema fieldSchema) {
if (field.isOptional()) {
return toOption(fieldSchema);
} else {
return fieldSchema;
}
}
@Override
public Schema list(Types.ListType list, Schema elementSchema) {
Schema listSchema = results.get(list);
if (listSchema != null) {
return listSchema;
}
if (list.isElementOptional()) {
listSchema = Schema.createArray(toOption(elementSchema));
} else {
listSchema = Schema.createArray(elementSchema);
}
listSchema.addProp(AvroSchemaUtil.ELEMENT_ID_PROP, list.elementId());
results.put(list, listSchema);
return listSchema;
}
@Override
public Schema map(Types.MapType map, Schema keySchema, Schema valueSchema) {
Schema mapSchema = results.get(map);
if (mapSchema != null) {
return mapSchema;
}
if (keySchema.getType() == Schema.Type.STRING) {
// if the map has string keys, use Avro's map type
mapSchema = Schema.createMap(
map.isValueOptional() ? toOption(valueSchema) : valueSchema);
mapSchema.addProp(AvroSchemaUtil.KEY_ID_PROP, map.keyId());
mapSchema.addProp(AvroSchemaUtil.VALUE_ID_PROP, map.valueId());
} else {
mapSchema = AvroSchemaUtil.createMap(map.keyId(), keySchema,
map.valueId(), map.isValueOptional() ? toOption(valueSchema) : valueSchema);
}
results.put(map, mapSchema);
return mapSchema;
}
@Override
public Schema primitive(Type.PrimitiveType primitive) {
Schema primitiveSchema;
switch (primitive.typeId()) {
case BOOLEAN:
primitiveSchema = BOOLEAN_SCHEMA;
break;
case INTEGER:
primitiveSchema = INTEGER_SCHEMA;
break;
case LONG:
primitiveSchema = LONG_SCHEMA;
break;
case FLOAT:
primitiveSchema = FLOAT_SCHEMA;
break;
case DOUBLE:
primitiveSchema = DOUBLE_SCHEMA;
break;
case DATE:
primitiveSchema = DATE_SCHEMA;
break;
case TIME:
primitiveSchema = TIME_SCHEMA;
break;
case TIMESTAMP:
if (((Types.TimestampType) primitive).shouldAdjustToUTC()) {
primitiveSchema = TIMESTAMPTZ_SCHEMA;
} else {
primitiveSchema = TIMESTAMP_SCHEMA;
}
break;
case STRING:
primitiveSchema = STRING_SCHEMA;
break;
case UUID:
primitiveSchema = UUID_SCHEMA;
break;
case FIXED:
Types.FixedType fixed = (Types.FixedType) primitive;
primitiveSchema = Schema.createFixed("fixed_" + fixed.length(), null, null, fixed.length());
break;
case BINARY:
primitiveSchema = BINARY_SCHEMA;
break;
case DECIMAL:
Types.DecimalType decimal = (Types.DecimalType) primitive;
primitiveSchema = LogicalTypes.decimal(decimal.precision(), decimal.scale())
.addToSchema(Schema.createFixed(
"decimal_" + decimal.precision() + "_" + decimal.scale(),
null, null, TypeUtil.decimalRequriedBytes(decimal.precision())));
break;
default:
throw new UnsupportedOperationException(
"Unsupported type ID: " + primitive.typeId());
}
results.put(primitive, primitiveSchema);
return primitiveSchema;
}
}
| 6,361 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/GenericAvroReader.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.MapMaker;
import com.netflix.iceberg.common.DynClasses;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
import org.apache.avro.io.ResolvingDecoder;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
class GenericAvroReader<T> implements DatumReader<T> {
private static final ThreadLocal<Map<Schema, Map<Schema, ResolvingDecoder>>> DECODER_CACHES =
ThreadLocal.withInitial(() -> new MapMaker().weakKeys().makeMap());
private final Schema readSchema;
private ClassLoader loader = Thread.currentThread().getContextClassLoader();
private Schema fileSchema = null;
private ValueReader<T> reader = null;
public GenericAvroReader(Schema readSchema) {
this.readSchema = readSchema;
}
@SuppressWarnings("unchecked")
private void initReader() {
this.reader = (ValueReader<T>) AvroSchemaVisitor.visit(readSchema, new ReadBuilder(loader));
}
@Override
public void setSchema(Schema fileSchema) {
this.fileSchema = Schema.applyAliases(fileSchema, readSchema);
initReader();
}
public void setClassLoader(ClassLoader loader) {
this.loader = loader;
}
@Override
public T read(T reuse, Decoder decoder) throws IOException {
ResolvingDecoder resolver = resolve(decoder);
T value = reader.read(resolver, reuse);
resolver.drain();
return value;
}
private ResolvingDecoder resolve(Decoder decoder) throws IOException {
Map<Schema, Map<Schema, ResolvingDecoder>> cache = DECODER_CACHES.get();
Map<Schema, ResolvingDecoder> fileSchemaToResolver = cache
.computeIfAbsent(readSchema, k -> new HashMap<>());
ResolvingDecoder resolver = fileSchemaToResolver.get(fileSchema);
if (resolver == null) {
resolver = newResolver();
fileSchemaToResolver.put(fileSchema, resolver);
}
resolver.configure(decoder);
return resolver;
}
private ResolvingDecoder newResolver() {
try {
return DecoderFactory.get().resolvingDecoder(fileSchema, readSchema, null);
} catch (IOException e) {
throw new RuntimeIOException(e);
}
}
private static class ReadBuilder extends AvroSchemaVisitor<ValueReader<?>> {
private final ClassLoader loader;
private ReadBuilder(ClassLoader loader) {
this.loader = loader;
}
@Override
@SuppressWarnings("unchecked")
public ValueReader<?> record(Schema record, List<String> names, List<ValueReader<?>> fields) {
try {
Class<?> recordClass = DynClasses.builder()
.loader(loader)
.impl(record.getFullName())
.buildChecked();
if (IndexedRecord.class.isAssignableFrom(recordClass)) {
return ValueReaders.record(fields, (Class<? extends IndexedRecord>) recordClass, record);
}
return ValueReaders.record(fields, record);
} catch (ClassNotFoundException e) {
return ValueReaders.record(fields, record);
}
}
@Override
public ValueReader<?> union(Schema union, List<ValueReader<?>> options) {
return ValueReaders.union(options);
}
@Override
public ValueReader<?> array(Schema array, ValueReader<?> elementReader) {
if (array.getLogicalType() instanceof LogicalMap) {
ValueReaders.StructReader<?> keyValueReader = (ValueReaders.StructReader) elementReader;
ValueReader<?> keyReader = keyValueReader.reader(0);
ValueReader<?> valueReader = keyValueReader.reader(1);
if (keyReader == ValueReaders.utf8s()) {
return ValueReaders.arrayMap(ValueReaders.strings(), valueReader);
}
return ValueReaders.arrayMap(keyReader, valueReader);
}
return ValueReaders.array(elementReader);
}
@Override
public ValueReader<?> map(Schema map, ValueReader<?> valueReader) {
return ValueReaders.map(ValueReaders.strings(), valueReader);
}
@Override
public ValueReader<?> primitive(Schema primitive) {
LogicalType logicalType = primitive.getLogicalType();
if (logicalType != null) {
switch (logicalType.getName()) {
case "date":
// Spark uses the same representation
return ValueReaders.ints();
case "timestamp-millis":
// adjust to microseconds
ValueReader<Long> longs = ValueReaders.longs();
return (ValueReader<Long>) (decoder, ignored) -> longs.read(decoder, null) * 1000L;
case "timestamp-micros":
// Spark uses the same representation
return ValueReaders.longs();
case "decimal":
ValueReader<byte[]> inner;
switch (primitive.getType()) {
case FIXED:
inner = ValueReaders.fixed(primitive.getFixedSize());
break;
case BYTES:
inner = ValueReaders.bytes();
break;
default:
throw new IllegalArgumentException(
"Invalid primitive type for decimal: " + primitive.getType());
}
LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType;
return ValueReaders.decimal(inner, decimal.getScale());
case "uuid":
return ValueReaders.uuids();
default:
throw new IllegalArgumentException("Unknown logical type: " + logicalType);
}
}
switch (primitive.getType()) {
case NULL:
return ValueReaders.nulls();
case BOOLEAN:
return ValueReaders.booleans();
case INT:
return ValueReaders.ints();
case LONG:
return ValueReaders.longs();
case FLOAT:
return ValueReaders.floats();
case DOUBLE:
return ValueReaders.doubles();
case STRING:
return ValueReaders.utf8s();
case FIXED:
return ValueReaders.fixed(primitive);
case BYTES:
return ValueReaders.byteBuffers();
default:
throw new IllegalArgumentException("Unsupported type: " + primitive);
}
}
}
}
| 6,362 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/GenericAvroWriter.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.Encoder;
import java.io.IOException;
import java.util.List;
import static com.netflix.iceberg.avro.AvroSchemaVisitor.visit;
class GenericAvroWriter<T> implements DatumWriter<T> {
private ValueWriter<T> writer = null;
public GenericAvroWriter(Schema schema) {
setSchema(schema);
}
@Override
@SuppressWarnings("unchecked")
public void setSchema(Schema schema) {
this.writer = (ValueWriter<T>) visit(schema, new WriteBuilder());
}
@Override
public void write(T datum, Encoder out) throws IOException {
writer.write(datum, out);
}
private static class WriteBuilder extends AvroSchemaVisitor<ValueWriter<?>> {
private WriteBuilder() {
}
@Override
public ValueWriter<?> record(Schema record, List<String> names, List<ValueWriter<?>> fields) {
return ValueWriters.record(fields);
}
@Override
public ValueWriter<?> union(Schema union, List<ValueWriter<?>> options) {
Preconditions.checkArgument(options.contains(ValueWriters.nulls()),
"Cannot create writer for non-option union: " + union);
Preconditions.checkArgument(options.size() == 2,
"Cannot create writer for non-option union: " + union);
if (union.getTypes().get(0).getType() == Schema.Type.NULL) {
return ValueWriters.option(0, options.get(1));
} else {
return ValueWriters.option(1, options.get(0));
}
}
@Override
public ValueWriter<?> array(Schema array, ValueWriter<?> elementWriter) {
if (array.getLogicalType() instanceof LogicalMap) {
ValueWriters.StructWriter<?> keyValueWriter = (ValueWriters.StructWriter<?>) elementWriter;
return ValueWriters.arrayMap(keyValueWriter.writer(0), keyValueWriter.writer(1));
}
return ValueWriters.array(elementWriter);
}
@Override
public ValueWriter<?> map(Schema map, ValueWriter<?> valueWriter) {
return ValueWriters.map(ValueWriters.strings(), valueWriter);
}
@Override
public ValueWriter<?> primitive(Schema primitive) {
LogicalType logicalType = primitive.getLogicalType();
if (logicalType != null) {
switch (logicalType.getName()) {
case "date":
return ValueWriters.ints();
case "timestamp-micros":
return ValueWriters.longs();
case "decimal":
LogicalTypes.Decimal decimal = (LogicalTypes.Decimal) logicalType;
return ValueWriters.decimal(decimal.getPrecision(), decimal.getScale());
case "uuid":
return ValueWriters.uuids();
default:
throw new IllegalArgumentException("Unsupported logical type: " + logicalType);
}
}
switch (primitive.getType()) {
case NULL:
return ValueWriters.nulls();
case BOOLEAN:
return ValueWriters.booleans();
case INT:
return ValueWriters.ints();
case LONG:
return ValueWriters.longs();
case FLOAT:
return ValueWriters.floats();
case DOUBLE:
return ValueWriters.doubles();
case STRING:
return ValueWriters.strings();
case FIXED:
return ValueWriters.genericFixed(primitive.getFixedSize());
case BYTES:
return ValueWriters.byteBuffers();
default:
throw new IllegalArgumentException("Unsupported type: " + primitive);
}
}
}
}
| 6,363 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/SchemaToType.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import java.util.List;
class SchemaToType extends AvroSchemaVisitor<Type> {
private final Schema root;
SchemaToType(Schema root) {
this.root = root;
if (root.getType() == Schema.Type.RECORD) {
this.nextId = root.getFields().size();
}
}
private int nextId = 1;
private int getElementId(Schema schema) {
if (schema.getObjectProp(AvroSchemaUtil.ELEMENT_ID_PROP) != null) {
return AvroSchemaUtil.getElementId(schema);
} else {
return allocateId();
}
}
private int getKeyId(Schema schema) {
if (schema.getObjectProp(AvroSchemaUtil.KEY_ID_PROP) != null) {
return AvroSchemaUtil.getKeyId(schema);
} else {
return allocateId();
}
}
private int getValueId(Schema schema) {
if (schema.getObjectProp(AvroSchemaUtil.VALUE_ID_PROP) != null) {
return AvroSchemaUtil.getValueId(schema);
} else {
return allocateId();
}
}
private int getId(Schema.Field field) {
if (field.getObjectProp(AvroSchemaUtil.FIELD_ID_PROP) != null) {
return AvroSchemaUtil.getFieldId(field);
} else {
return allocateId();
}
}
private int allocateId() {
int current = nextId;
nextId += 1;
return current;
}
@Override
public Type record(Schema record, List<String> names, List<Type> fieldTypes) {
List<Schema.Field> fields = record.getFields();
List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(fields.size());
if (root == record) {
this.nextId = 0;
}
for (int i = 0; i < fields.size(); i += 1) {
Schema.Field field = fields.get(i);
Type fieldType = fieldTypes.get(i);
int fieldId = getId(field);
if (AvroSchemaUtil.isOptionSchema(field.schema())) {
newFields.add(Types.NestedField.optional(fieldId, field.name(), fieldType));
} else {
newFields.add(Types.NestedField.required(fieldId, field.name(), fieldType));
}
}
return Types.StructType.of(newFields);
}
@Override
public Type union(Schema union, List<Type> options) {
Preconditions.checkArgument(AvroSchemaUtil.isOptionSchema(union),
"Unsupported type: non-option union: {}", union);
// records, arrays, and maps will check nullability later
if (options.get(0) == null) {
return options.get(1);
} else {
return options.get(0);
}
}
@Override
public Type array(Schema array, Type elementType) {
if (array.getLogicalType() instanceof LogicalMap) {
// map stored as an array
Schema keyValueSchema = array.getElementType();
Preconditions.checkArgument(AvroSchemaUtil.isKeyValueSchema(keyValueSchema),
"Invalid key-value pair schema: {}", keyValueSchema);
Types.StructType keyValueType = elementType.asStructType();
Types.NestedField keyField = keyValueType.field("key");
Types.NestedField valueField = keyValueType.field("value");
if (keyValueType.field("value").isOptional()) {
return Types.MapType.ofOptional(
keyField.fieldId(), valueField.fieldId(), keyField.type(), valueField.type());
} else {
return Types.MapType.ofRequired(
keyField.fieldId(), valueField.fieldId(), keyField.type(), valueField.type());
}
} else {
// normal array
Schema elementSchema = array.getElementType();
int id = getElementId(array);
if (AvroSchemaUtil.isOptionSchema(elementSchema)) {
return Types.ListType.ofOptional(id, elementType);
} else {
return Types.ListType.ofRequired(id, elementType);
}
}
}
@Override
public Type map(Schema map, Type valueType) {
Schema valueSchema = map.getValueType();
int keyId = getKeyId(map);
int valueId = getValueId(map);
if (AvroSchemaUtil.isOptionSchema(valueSchema)) {
return Types.MapType.ofOptional(keyId, valueId, Types.StringType.get(), valueType);
} else {
return Types.MapType.ofRequired(keyId, valueId, Types.StringType.get(), valueType);
}
}
@Override
public Type primitive(Schema primitive) {
// first check supported logical types
LogicalType logical = primitive.getLogicalType();
if (logical != null) {
String name = logical.getName();
if (logical instanceof LogicalTypes.Decimal) {
return Types.DecimalType.of(
((LogicalTypes.Decimal) logical).getPrecision(),
((LogicalTypes.Decimal) logical).getScale());
} else if (logical instanceof LogicalTypes.Date) {
return Types.DateType.get();
} else if (
logical instanceof LogicalTypes.TimeMillis ||
logical instanceof LogicalTypes.TimeMicros) {
return Types.TimeType.get();
} else if (
logical instanceof LogicalTypes.TimestampMillis ||
logical instanceof LogicalTypes.TimestampMicros) {
Object adjustToUTC = primitive.getObjectProp(AvroSchemaUtil.ADJUST_TO_UTC_PROP);
Preconditions.checkArgument(adjustToUTC instanceof Boolean,
"Invalid value for adjust-to-utc: %s", adjustToUTC);
if ((Boolean) adjustToUTC) {
return Types.TimestampType.withZone();
} else {
return Types.TimestampType.withoutZone();
}
} else if (LogicalTypes.uuid().getName().equals(name)) {
return Types.UUIDType.get();
}
}
switch (primitive.getType()) {
case BOOLEAN:
return Types.BooleanType.get();
case INT:
return Types.IntegerType.get();
case LONG:
return Types.LongType.get();
case FLOAT:
return Types.FloatType.get();
case DOUBLE:
return Types.DoubleType.get();
case STRING:
case ENUM:
return Types.StringType.get();
case FIXED:
return Types.FixedType.ofLength(primitive.getFixedSize());
case BYTES:
return Types.BinaryType.get();
case NULL:
return null;
}
throw new UnsupportedOperationException(
"Unsupported primitive type: " + primitive);
}
}
| 6,364 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/AvroCustomOrderSchemaVisitor.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import org.apache.avro.Schema;
import java.util.LinkedList;
import java.util.List;
import java.util.function.Supplier;
abstract class AvroCustomOrderSchemaVisitor<T, F> {
public static <T, F> T visit(Schema schema, AvroCustomOrderSchemaVisitor<T, F> visitor) {
switch (schema.getType()) {
case RECORD:
// check to make sure this hasn't been visited before
String name = schema.getFullName();
Preconditions.checkState(!visitor.recordLevels.contains(name),
"Cannot process recursive Avro record %s", name);
visitor.recordLevels.push(name);
List<Schema.Field> fields = schema.getFields();
List<String> names = Lists.newArrayListWithExpectedSize(fields.size());
List<Supplier<F>> results = Lists.newArrayListWithExpectedSize(fields.size());
for (Schema.Field field : schema.getFields()) {
names.add(field.name());
results.add(new VisitFieldFuture<>(field, visitor));
}
visitor.recordLevels.pop();
return visitor.record(schema, names, Iterables.transform(results, Supplier::get));
case UNION:
List<Schema> types = schema.getTypes();
List<Supplier<T>> options = Lists.newArrayListWithExpectedSize(types.size());
for (Schema type : types) {
options.add(new VisitFuture<>(type, visitor));
}
return visitor.union(schema, Iterables.transform(options, Supplier::get));
case ARRAY:
return visitor.array(schema, new VisitFuture<>(schema.getElementType(), visitor));
case MAP:
return visitor.map(schema, new VisitFuture<>(schema.getValueType(), visitor));
default:
return visitor.primitive(schema);
}
}
protected LinkedList<String> recordLevels = Lists.newLinkedList();
public T record(Schema record, List<String> names, Iterable<F> fields) {
return null;
}
public F field(Schema.Field field, Supplier<T> fieldResult) {
return null;
}
public T union(Schema union, Iterable<T> options) {
return null;
}
public T array(Schema array, Supplier<T> element) {
return null;
}
public T map(Schema map, Supplier<T> value) {
return null;
}
public T primitive(Schema primitive) {
return null;
}
private static class VisitFuture<T, F> implements Supplier<T> {
private final Schema schema;
private final AvroCustomOrderSchemaVisitor<T, F> visitor;
private VisitFuture(Schema schema, AvroCustomOrderSchemaVisitor<T, F> visitor) {
this.schema = schema;
this.visitor = visitor;
}
@Override
public T get() {
return visit(schema, visitor);
}
}
private static class VisitFieldFuture<T, F> implements Supplier<F> {
private final Schema.Field field;
private final AvroCustomOrderSchemaVisitor<T, F> visitor;
private VisitFieldFuture(Schema.Field field, AvroCustomOrderSchemaVisitor<T, F> visitor) {
this.field = field;
this.visitor = visitor;
}
@Override
public F get() {
return visitor.field(field, new VisitFuture<>(field.schema(), visitor));
}
}
}
| 6,365 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/Avro.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import com.netflix.iceberg.SchemaParser;
import com.netflix.iceberg.io.FileAppender;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import org.apache.avro.Conversions;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.file.CodecFactory;
import org.apache.avro.generic.GenericData;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.specific.SpecificData;
import java.io.IOException;
import java.util.Locale;
import java.util.Map;
import java.util.function.Function;
import static com.netflix.iceberg.TableProperties.AVRO_COMPRESSION;
import static com.netflix.iceberg.TableProperties.AVRO_COMPRESSION_DEFAULT;
public class Avro {
private Avro() {
}
private enum CodecName {
UNCOMPRESSED(CodecFactory.nullCodec()),
SNAPPY(CodecFactory.snappyCodec()),
GZIP(CodecFactory.deflateCodec(9)),
LZ4(null),
BROTLI(null),
ZSTD(null);
private CodecFactory avroCodec;
CodecName(CodecFactory avroCodec) {
this.avroCodec = avroCodec;
}
public CodecFactory get() {
Preconditions.checkArgument(avroCodec != null, "Missing implementation for codec " + this);
return avroCodec;
}
}
private static GenericData DEFAULT_MODEL = new SpecificData();
static {
LogicalTypes.register(LogicalMap.NAME, schema -> LogicalMap.get());
DEFAULT_MODEL.addLogicalTypeConversion(new Conversions.DecimalConversion());
DEFAULT_MODEL.addLogicalTypeConversion(new UUIDConversion());
}
public static WriteBuilder write(OutputFile file) {
return new WriteBuilder(file);
}
public static class WriteBuilder {
private final OutputFile file;
private com.netflix.iceberg.Schema schema = null;
private String name = "table";
private Map<String, String> config = Maps.newHashMap();
private Map<String, String> metadata = Maps.newLinkedHashMap();
private Function<Schema, DatumWriter<?>> createWriterFunc = GenericAvroWriter::new;
private WriteBuilder(OutputFile file) {
this.file = file;
}
public WriteBuilder schema(com.netflix.iceberg.Schema schema) {
this.schema = schema;
return this;
}
public WriteBuilder named(String name) {
this.name = name;
return this;
}
public WriteBuilder createWriterFunc(Function<Schema, DatumWriter<?>> writerFunction) {
this.createWriterFunc = writerFunction;
return this;
}
public WriteBuilder set(String property, String value) {
config.put(property, value);
return this;
}
public WriteBuilder setAll(Map<String, String> properties) {
config.putAll(properties);
return this;
}
public WriteBuilder meta(String property, String value) {
metadata.put(property, value);
return this;
}
public WriteBuilder meta(Map<String, String> properties) {
metadata.putAll(properties);
return this;
}
private CodecFactory codec() {
String codec = config.getOrDefault(AVRO_COMPRESSION, AVRO_COMPRESSION_DEFAULT);
try {
return CodecName.valueOf(codec.toUpperCase(Locale.ENGLISH)).get();
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Unsupported compression codec: " + codec);
}
}
public <D> FileAppender<D> build() throws IOException {
Preconditions.checkNotNull(schema, "Schema is required");
Preconditions.checkNotNull(name, "Table name is required and cannot be null");
// add the Iceberg schema to keyValueMetadata
meta("iceberg.schema", SchemaParser.toJson(schema));
return new AvroFileAppender<>(
AvroSchemaUtil.convert(schema, name), file, createWriterFunc, codec(), metadata);
}
}
public static ReadBuilder read(InputFile file) {
return new ReadBuilder(file);
}
public static class ReadBuilder {
private final ClassLoader defaultLoader = Thread.currentThread().getContextClassLoader();
private final InputFile file;
private final Map<String, String> renames = Maps.newLinkedHashMap();
private boolean reuseContainers = false;
private com.netflix.iceberg.Schema schema = null;
private Function<Schema, DatumReader<?>> createReaderFunc = schema -> {
GenericAvroReader<?> reader = new GenericAvroReader<>(schema);
reader.setClassLoader(defaultLoader);
return reader;
};
private Long start = null;
private Long length = null;
private ReadBuilder(InputFile file) {
Preconditions.checkNotNull(file, "Input file cannot be null");
this.file = file;
}
public ReadBuilder createReaderFunc(Function<Schema, DatumReader<?>> readerFunction) {
this.createReaderFunc = readerFunction;
return this;
}
/**
* Restricts the read to the given range: [start, end = start + length).
*
* @param start the start position for this read
* @param length the length of the range this read should scan
* @return this builder for method chaining
*/
public ReadBuilder split(long start, long length) {
this.start = start;
this.length = length;
return this;
}
public ReadBuilder project(com.netflix.iceberg.Schema schema) {
this.schema = schema;
return this;
}
public ReadBuilder reuseContainers() {
this.reuseContainers = true;
return this;
}
public ReadBuilder reuseContainers(boolean shouldReuse) {
this.reuseContainers = shouldReuse;
return this;
}
public ReadBuilder rename(String fullName, String newName) {
renames.put(fullName, newName);
return this;
}
public <D> AvroIterable<D> build() {
Preconditions.checkNotNull(schema, "Schema is required");
return new AvroIterable<>(file,
new ProjectionDatumReader<>(createReaderFunc, schema, renames),
start, length, reuseContainers);
}
}
}
| 6,366 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/ProjectionDatumReader.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import org.apache.avro.Schema;
import org.apache.avro.io.DatumReader;
import org.apache.avro.io.Decoder;
import java.io.IOException;
import java.util.Map;
import java.util.Set;
import java.util.function.Function;
import static com.netflix.iceberg.types.TypeUtil.getProjectedIds;
public class ProjectionDatumReader<D> implements DatumReader<D> {
private final Function<Schema, DatumReader<?>> getReader;
private final com.netflix.iceberg.Schema expectedSchema;
private final Map<String, String> renames;
private Schema readSchema = null;
private Schema fileSchema = null;
private DatumReader<D> wrapped = null;
public ProjectionDatumReader(Function<Schema, DatumReader<?>> getReader,
com.netflix.iceberg.Schema expectedSchema,
Map<String, String> renames) {
this.getReader = getReader;
this.expectedSchema = expectedSchema;
this.renames = renames;
}
@Override
public void setSchema(Schema fileSchema) {
this.fileSchema = fileSchema;
Set<Integer> projectedIds = getProjectedIds(expectedSchema);
Schema prunedSchema = AvroSchemaUtil.pruneColumns(fileSchema, projectedIds);
this.readSchema = AvroSchemaUtil.buildAvroProjection(prunedSchema, expectedSchema, renames);
this.wrapped = newDatumReader();
}
@Override
public D read(D reuse, Decoder in) throws IOException {
return wrapped.read(reuse, in);
}
@SuppressWarnings("unchecked")
private DatumReader<D> newDatumReader() {
DatumReader<D> reader = (DatumReader<D>) getReader.apply(readSchema);
reader.setSchema(fileSchema);
return reader;
}
}
| 6,367 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/BuildAvroProjection.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.apache.avro.JsonProperties;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import static com.netflix.iceberg.avro.AvroSchemaUtil.convert;
import static com.netflix.iceberg.avro.AvroSchemaUtil.copyField;
import static com.netflix.iceberg.avro.AvroSchemaUtil.copyRecord;
import static com.netflix.iceberg.avro.AvroSchemaUtil.fromOption;
import static com.netflix.iceberg.avro.AvroSchemaUtil.fromOptions;
import static com.netflix.iceberg.avro.AvroSchemaUtil.getFieldId;
import static com.netflix.iceberg.avro.AvroSchemaUtil.isKeyValueSchema;
import static com.netflix.iceberg.avro.AvroSchemaUtil.isOptionSchema;
import static com.netflix.iceberg.avro.AvroSchemaUtil.toOption;
/**
* Renames and aliases fields in an Avro schema based on the current table schema.
* <p>
* This class creates a read schema based on an Avro file's schema that will correctly translate
* from the file's field names to the current table schema.
* <p>
* This will also rename records in the file's Avro schema to support custom read classes.
*/
class BuildAvroProjection extends AvroCustomOrderSchemaVisitor<Schema, Schema.Field> {
private final Map<String, String> renames;
private Type current = null;
BuildAvroProjection(com.netflix.iceberg.Schema expectedSchema, Map<String, String> renames) {
this.renames = renames;
this.current = expectedSchema.asStruct();
}
@Override
public Schema record(Schema record, List<String> names, Iterable<Schema.Field> schemaIterable) {
Preconditions.checkArgument(
current.isNestedType() && current.asNestedType().isStructType(),
"Cannot project non-struct: %s", current);
Types.StructType struct = current.asNestedType().asStructType();
boolean hasChange = false;
List<Schema.Field> fields = record.getFields();
List<Schema.Field> fieldResults = Lists.newArrayList(schemaIterable);
Map<String, Schema.Field> updateMap = Maps.newHashMap();
for (int i = 0; i < fields.size(); i += 1) {
Schema.Field field = fields.get(i);
Schema.Field updatedField = fieldResults.get(i);
if (updatedField != null) {
updateMap.put(updatedField.name(), updatedField);
if (!updatedField.schema().equals(field.schema()) ||
!updatedField.name().equals(field.name())) {
hasChange = true;
}
} else {
hasChange = true; // column was not projected
}
}
// construct the schema using the expected order
List<Schema.Field> updatedFields = Lists.newArrayListWithExpectedSize(struct.fields().size());
List<Types.NestedField> expectedFields = struct.fields();
for (int i = 0; i < expectedFields.size(); i += 1) {
Types.NestedField field = expectedFields.get(i);
// detect reordering
if (i < fields.size() && !field.name().equals(fields.get(i).name())) {
hasChange = true;
}
Schema.Field avroField = updateMap.get(field.name());
if (avroField != null) {
updatedFields.add(avroField);
} else {
Preconditions.checkArgument(field.isOptional(), "Missing required field: %s", field.name());
// create a field that will be defaulted to null
Schema.Field newField = new Schema.Field(
field.name(), toOption(convert(field.type())), null, JsonProperties.NULL_VALUE);
newField.addProp(AvroSchemaUtil.FIELD_ID_PROP, field.fieldId());
updatedFields.add(newField);
hasChange = true;
}
}
if (hasChange || renames.containsKey(record.getFullName())) {
return copyRecord(record, updatedFields, renames.get(record.getFullName()));
}
return record;
}
@Override
public Schema.Field field(Schema.Field field, Supplier<Schema> fieldResult) {
Types.StructType struct = current.asNestedType().asStructType();
int fieldId = AvroSchemaUtil.getFieldId(field);
Types.NestedField expectedField = struct.field(fieldId); // TODO: what if there are no ids?
// if the field isn't present, it was not selected
if (expectedField == null) {
return null;
}
String expectedName = expectedField.name();
this.current = expectedField.type();
try {
Schema schema = fieldResult.get();
if (schema != field.schema() || !expectedName.equals(field.name())) {
// add an alias for the field
return copyField(field, schema, expectedName);
} else {
// always copy because fields can't be reused
return copyField(field, field.schema(), field.name());
}
} finally {
this.current = struct;
}
}
@Override
public Schema union(Schema union, Iterable<Schema> options) {
Preconditions.checkState(isOptionSchema(union),
"Invalid schema: non-option unions are not supported: {}", union);
Schema nonNullOriginal = fromOption(union);
Schema nonNullResult = fromOptions(Lists.newArrayList(options));
if (nonNullOriginal != nonNullResult) {
return toOption(nonNullResult);
}
return union;
}
@Override
public Schema array(Schema array, Supplier<Schema> element) {
if (array.getLogicalType() instanceof LogicalMap ||
(current.isMapType() && isKeyValueSchema(array.getElementType()))) {
Preconditions.checkArgument(current.isMapType(), "Incompatible projected type: %s", current);
Types.MapType m = current.asNestedType().asMapType();
this.current = Types.StructType.of(m.fields()); // create a struct to correspond to element
try {
Schema keyValueSchema = array.getElementType();
Schema.Field keyField = keyValueSchema.getFields().get(0);
Schema.Field valueField = keyValueSchema.getFields().get(1);
Schema.Field valueProjection = element.get().getField("value");
// element was changed, create a new array
if (valueProjection.schema() != valueField.schema()) {
return AvroSchemaUtil.createProjectionMap(keyValueSchema.getFullName(),
getFieldId(keyField), keyField.name(), keyField.schema(),
getFieldId(valueField), valueField.name(), valueProjection.schema());
} else if (!(array.getLogicalType() instanceof LogicalMap)) {
return AvroSchemaUtil.createProjectionMap(keyValueSchema.getFullName(),
getFieldId(keyField), keyField.name(), keyField.schema(),
getFieldId(valueField), valueField.name(), valueField.schema());
}
return array;
} finally {
this.current = m;
}
} else {
Preconditions.checkArgument(current.isListType(),
"Incompatible projected type: %s", current);
Types.ListType list = current.asNestedType().asListType();
this.current = list.elementType();
try {
Schema elementSchema = element.get();
// element was changed, create a new array
if (elementSchema != array.getElementType()) {
return Schema.createArray(elementSchema);
}
return array;
} finally {
this.current = list;
}
}
}
@Override
public Schema map(Schema map, Supplier<Schema> value) {
Preconditions.checkArgument(current.isNestedType() && current.asNestedType().isMapType(),
"Incompatible projected type: %s", current);
Types.MapType m = current.asNestedType().asMapType();
Preconditions.checkArgument(m.keyType() == Types.StringType.get(),
"Incompatible projected type: key type %s is not string", m.keyType());
this.current = m.valueType();
try {
Schema valueSchema = value.get();
// element was changed, create a new map
if (valueSchema != map.getValueType()) {
return Schema.createMap(valueSchema);
}
return map;
} finally {
this.current = m;
}
}
@Override
public Schema primitive(Schema primitive) {
// check for type promotion
switch (primitive.getType()) {
case INT:
if (current.typeId() == Type.TypeID.LONG) {
return Schema.create(Schema.Type.LONG);
}
return primitive;
case FLOAT:
if (current.typeId() == Type.TypeID.DOUBLE) {
return Schema.create(Schema.Type.DOUBLE);
}
return primitive;
default:
return primitive;
}
}
}
| 6,368 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/ValueWriters.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.netflix.iceberg.types.TypeUtil;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.io.Encoder;
import org.apache.avro.util.Utf8;
import java.io.IOException;
import java.lang.reflect.Array;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.UUID;
public class ValueWriters {
private ValueWriters() {
}
public static ValueWriter<Void> nulls() {
return NullWriter.INSTANCE;
}
public static ValueWriter<Boolean> booleans() {
return BooleanWriter.INSTANCE;
}
public static ValueWriter<Integer> ints() {
return IntegerWriter.INSTANCE;
}
public static ValueWriter<Long> longs() {
return LongWriter.INSTANCE;
}
public static ValueWriter<Float> floats() {
return FloatWriter.INSTANCE;
}
public static ValueWriter<Double> doubles() {
return DoubleWriter.INSTANCE;
}
public static ValueWriter<Object> strings() {
return StringWriter.INSTANCE;
}
public static ValueWriter<Utf8> utf8s() {
return Utf8Writer.INSTANCE;
}
public static ValueWriter<UUID> uuids() {
return UUIDWriter.INSTANCE;
}
public static ValueWriter<byte[]> fixed(int length) {
return new FixedWriter(length);
}
public static ValueWriter<GenericData.Fixed> genericFixed(int length) {
return new GenericFixedWriter(length);
}
public static ValueWriter<byte[]> bytes() {
return BytesWriter.INSTANCE;
}
public static ValueWriter<ByteBuffer> byteBuffers() {
return ByteBufferWriter.INSTANCE;
}
public static ValueWriter<BigDecimal> decimal(int precision, int scale) {
return new DecimalWriter(precision, scale);
}
public static <T> ValueWriter<T> option(int nullIndex, ValueWriter<T> writer) {
return new OptionWriter<>(nullIndex, writer);
}
public static <T> ValueWriter<Collection<T>> array(ValueWriter<T> elementWriter) {
return new CollectionWriter<>(elementWriter);
}
public static <K, V> ValueWriter<Map<K, V>> arrayMap(ValueWriter<K> keyWriter,
ValueWriter<V> valueWriter) {
return new ArrayMapWriter<>(keyWriter, valueWriter);
}
public static <K, V> ValueWriter<Map<K, V>> map(ValueWriter<K> keyWriter,
ValueWriter<V> valueWriter) {
return new MapWriter<>(keyWriter, valueWriter);
}
public static ValueWriter<IndexedRecord> record(List<ValueWriter<?>> writers) {
return new RecordWriter(writers);
}
private static class NullWriter implements ValueWriter<Void> {
private static NullWriter INSTANCE = new NullWriter();
private NullWriter() {
}
@Override
public void write(Void ignored, Encoder encoder) throws IOException {
encoder.writeNull();
}
}
private static class BooleanWriter implements ValueWriter<Boolean> {
private static BooleanWriter INSTANCE = new BooleanWriter();
private BooleanWriter() {
}
@Override
public void write(Boolean bool, Encoder encoder) throws IOException {
encoder.writeBoolean(bool);
}
}
private static class IntegerWriter implements ValueWriter<Integer> {
private static IntegerWriter INSTANCE = new IntegerWriter();
private IntegerWriter() {
}
@Override
public void write(Integer i, Encoder encoder) throws IOException {
encoder.writeInt(i);
}
}
private static class LongWriter implements ValueWriter<Long> {
private static LongWriter INSTANCE = new LongWriter();
private LongWriter() {
}
@Override
public void write(Long l, Encoder encoder) throws IOException {
encoder.writeLong(l);
}
}
private static class FloatWriter implements ValueWriter<Float> {
private static FloatWriter INSTANCE = new FloatWriter();
private FloatWriter() {
}
@Override
public void write(Float f, Encoder encoder) throws IOException {
encoder.writeFloat(f);
}
}
private static class DoubleWriter implements ValueWriter<Double> {
private static DoubleWriter INSTANCE = new DoubleWriter();
private DoubleWriter() {
}
@Override
public void write(Double d, Encoder encoder) throws IOException {
encoder.writeDouble(d);
}
}
private static class StringWriter implements ValueWriter<Object> {
private static StringWriter INSTANCE = new StringWriter();
private StringWriter() {
}
@Override
public void write(Object s, Encoder encoder) throws IOException {
// use getBytes because it may return the backing byte array if available.
// otherwise, it copies to a new byte array, which is still cheaper than Avro
// calling toString, which incurs encoding costs
if (s instanceof Utf8) {
encoder.writeString((Utf8) s);
} else if (s instanceof String) {
encoder.writeString(new Utf8((String) s));
} else if (s == null) {
throw new IllegalArgumentException("Cannot write null to required string column");
} else {
throw new IllegalArgumentException(
"Cannot write unknown string type: " + s.getClass().getName() + ": " + s.toString());
}
}
}
private static class Utf8Writer implements ValueWriter<Utf8> {
private static Utf8Writer INSTANCE = new Utf8Writer();
private Utf8Writer() {
}
@Override
public void write(Utf8 s, Encoder encoder) throws IOException {
encoder.writeString(s);
}
}
private static class UUIDWriter implements ValueWriter<UUID> {
private static final ThreadLocal<ByteBuffer> BUFFER = ThreadLocal.withInitial(() -> {
ByteBuffer buffer = ByteBuffer.allocate(16);
buffer.order(ByteOrder.BIG_ENDIAN);
return buffer;
});
private static UUIDWriter INSTANCE = new UUIDWriter();
private UUIDWriter() {
}
@Override
public void write(UUID uuid, Encoder encoder) throws IOException {
// TODO: direct conversion from string to byte buffer
ByteBuffer buffer = BUFFER.get();
buffer.rewind();
buffer.putLong(uuid.getMostSignificantBits());
buffer.putLong(uuid.getLeastSignificantBits());
encoder.writeFixed(buffer.array());
}
}
private static class FixedWriter implements ValueWriter<byte[]> {
private final int length;
private FixedWriter(int length) {
this.length = length;
}
@Override
public void write(byte[] bytes, Encoder encoder) throws IOException {
Preconditions.checkArgument(bytes.length == length,
"Cannot write byte array of length %s as fixed[%s]", bytes.length, length);
encoder.writeFixed(bytes);
}
}
private static class GenericFixedWriter implements ValueWriter<GenericData.Fixed> {
private final int length;
private GenericFixedWriter(int length) {
this.length = length;
}
@Override
public void write(GenericData.Fixed datum, Encoder encoder) throws IOException {
Preconditions.checkArgument(datum.bytes().length == length,
"Cannot write byte array of length %s as fixed[%s]", datum.bytes().length, length);
encoder.writeFixed(datum.bytes());
}
}
private static class BytesWriter implements ValueWriter<byte[]> {
private static BytesWriter INSTANCE = new BytesWriter();
private BytesWriter() {
}
@Override
public void write(byte[] bytes, Encoder encoder) throws IOException {
encoder.writeBytes(bytes);
}
}
private static class ByteBufferWriter implements ValueWriter<ByteBuffer> {
private static ByteBufferWriter INSTANCE = new ByteBufferWriter();
private ByteBufferWriter() {
}
@Override
public void write(ByteBuffer bytes, Encoder encoder) throws IOException {
encoder.writeBytes(bytes);
}
}
private static class DecimalWriter implements ValueWriter<BigDecimal> {
private final int precision;
private final int scale;
private final int length;
private final ThreadLocal<byte[]> bytes;
private DecimalWriter(int precision, int scale) {
this.precision = precision;
this.scale = scale;
this.length = TypeUtil.decimalRequriedBytes(precision);
this.bytes = ThreadLocal.withInitial(() -> new byte[length]);
}
@Override
public void write(BigDecimal decimal, Encoder encoder) throws IOException {
Preconditions.checkArgument(decimal.scale() == scale,
"Cannot write value as decimal(%s,%s), wrong scale: %s", precision, scale, decimal);
Preconditions.checkArgument(decimal.precision() <= precision,
"Cannot write value as decimal(%s,%s), too large: %s", precision, scale, decimal);
byte fillByte = (byte) (decimal.signum() < 0 ? 0xFF : 0x00);
byte[] unscaled = decimal.unscaledValue().toByteArray();
byte[] buf = bytes.get();
int offset = length - unscaled.length;
for (int i = 0; i < length; i += 1) {
if (i < offset) {
buf[i] = fillByte;
} else {
buf[i] = unscaled[i - offset];
}
}
encoder.writeFixed(buf);
}
}
private static class OptionWriter<T> implements ValueWriter<T> {
private final int nullIndex;
private final int valueIndex;
private final ValueWriter<T> valueWriter;
private OptionWriter(int nullIndex, ValueWriter<T> valueWriter) {
this.nullIndex = nullIndex;
if (nullIndex == 0) {
this.valueIndex = 1;
} else if (nullIndex == 1) {
this.valueIndex = 0;
} else {
throw new IllegalArgumentException("Invalid option index: " + nullIndex);
}
this.valueWriter = valueWriter;
}
@Override
public void write(T option, Encoder encoder) throws IOException {
if (option == null) {
encoder.writeIndex(nullIndex);
} else {
encoder.writeIndex(valueIndex);
valueWriter.write(option, encoder);
}
}
}
private static class CollectionWriter<T> implements ValueWriter<Collection<T>> {
private final ValueWriter<T> elementWriter;
private CollectionWriter(ValueWriter<T> elementWriter) {
this.elementWriter = elementWriter;
}
@Override
@SuppressWarnings("unchecked")
public void write(Collection<T> array, Encoder encoder) throws IOException {
encoder.writeArrayStart();
int numElements = array.size();
encoder.setItemCount(numElements);
Iterator<T> iter = array.iterator();
for (int i = 0; i < numElements; i += 1) {
encoder.startItem();
elementWriter.write(iter.next(), encoder);
}
encoder.writeArrayEnd();
}
}
private static class ArrayMapWriter<K, V> implements ValueWriter<Map<K, V>> {
private final ValueWriter<K> keyWriter;
private final ValueWriter<V> valueWriter;
private ArrayMapWriter(ValueWriter<K> keyWriter, ValueWriter<V> valueWriter) {
this.keyWriter = keyWriter;
this.valueWriter = valueWriter;
}
@Override
@SuppressWarnings("unchecked")
public void write(Map<K, V> map, Encoder encoder) throws IOException {
encoder.writeArrayStart();
int numElements = map.size();
encoder.setItemCount(numElements);
Iterator<Map.Entry<K, V>> iter = map.entrySet().iterator();
for (int i = 0; i < numElements; i += 1) {
encoder.startItem();
Map.Entry<K, V> entry = iter.next();
keyWriter.write(entry.getKey(), encoder);
valueWriter.write(entry.getValue(), encoder);
}
encoder.writeArrayEnd();
}
}
private static class MapWriter<K, V> implements ValueWriter<Map<K, V>> {
private final ValueWriter<K> keyWriter;
private final ValueWriter<V> valueWriter;
private MapWriter(ValueWriter<K> keyWriter, ValueWriter<V> valueWriter) {
this.keyWriter = keyWriter;
this.valueWriter = valueWriter;
}
@Override
@SuppressWarnings("unchecked")
public void write(Map<K, V> map, Encoder encoder) throws IOException {
encoder.writeMapStart();
int numElements = map.size();
encoder.setItemCount(numElements);
Iterator<Map.Entry<K, V>> iter = map.entrySet().iterator();
for (int i = 0; i < numElements; i += 1) {
encoder.startItem();
Map.Entry<K, V> entry = iter.next();
keyWriter.write(entry.getKey(), encoder);
valueWriter.write(entry.getValue(), encoder);
}
encoder.writeMapEnd();
}
}
public abstract static class StructWriter<S> implements ValueWriter<S> {
private final ValueWriter<Object>[] writers;
@SuppressWarnings("unchecked")
protected StructWriter(List<ValueWriter<?>> writers) {
this.writers = (ValueWriter<Object>[]) Array.newInstance(ValueWriter.class, writers.size());
for (int i = 0; i < this.writers.length; i += 1) {
this.writers[i] = (ValueWriter<Object>) writers.get(i);
}
}
protected abstract Object get(S struct, int pos);
public ValueWriter<?> writer(int pos) {
return writers[pos];
}
@Override
public void write(S row, Encoder encoder) throws IOException {
for (int i = 0; i < writers.length; i += 1) {
writers[i].write(get(row, i), encoder);
}
}
}
private static class RecordWriter extends StructWriter<IndexedRecord> {
@SuppressWarnings("unchecked")
private RecordWriter(List<ValueWriter<?>> writers) {
super(writers);
}
@Override
protected Object get(IndexedRecord struct, int pos) {
return struct.get(pos);
}
}
}
| 6,369 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/UUIDConversion.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import org.apache.avro.Conversion;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericFixed;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.UUID;
public class UUIDConversion extends Conversion<UUID> {
@Override
public Class<UUID> getConvertedType() {
return UUID.class;
}
@Override
public String getLogicalTypeName() {
return LogicalTypes.uuid().getName();
}
@Override
public UUID fromFixed(GenericFixed value, Schema schema, LogicalType type) {
ByteBuffer buffer = ByteBuffer.wrap(value.bytes());
buffer.order(ByteOrder.BIG_ENDIAN);
long mostSigBits = buffer.getLong();
long leastSigBits = buffer.getLong();
return new UUID(mostSigBits, leastSigBits);
}
@Override
public GenericFixed toFixed(UUID value, Schema schema, LogicalType type) {
ByteBuffer buffer = ByteBuffer.allocate(16);
buffer.order(ByteOrder.BIG_ENDIAN);
buffer.putLong(value.getMostSignificantBits());
buffer.putLong(value.getLeastSignificantBits());
return new GenericData.Fixed(schema, buffer.array());
}
}
| 6,370 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/avro/LogicalMap.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.Maps;
import org.apache.avro.Conversion;
import org.apache.avro.LogicalType;
import org.apache.avro.Schema;
import java.util.Collection;
import java.util.Map;
import static org.apache.avro.Schema.Type.ARRAY;
public class LogicalMap extends LogicalType {
static final String NAME = "map";
private static final LogicalMap INSTANCE = new LogicalMap();
static LogicalMap get() {
return INSTANCE;
}
private LogicalMap() {
super(NAME);
}
@Override
public void validate(Schema schema) {
super.validate(schema);
Preconditions.checkArgument(schema.getType() == ARRAY,
"Invalid type for map, must be an array: %s", schema);
Preconditions.checkArgument(AvroSchemaUtil.isKeyValueSchema(schema.getElementType()),
"Invalid key-value record: %s", schema.getElementType());
}
}
| 6,371 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/HadoopFileIO.java | package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.FileIO;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
public class HadoopFileIO implements FileIO {
private final SerializableConfiguration hadoopConf;
public HadoopFileIO(Configuration hadoopConf) {
this.hadoopConf = new SerializableConfiguration(hadoopConf);
}
@Override
public InputFile newInputFile(String path) {
return HadoopInputFile.fromLocation(path, hadoopConf.get());
}
@Override
public OutputFile newOutputFile(String path) {
return HadoopOutputFile.fromPath(new Path(path), hadoopConf.get());
}
@Override
public void deleteFile(String path) {
Path toDelete = new Path(path);
FileSystem fs = Util.getFS(toDelete, hadoopConf.get());
try {
fs.delete(toDelete, false /* not recursive */);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to delete file: %s", path);
}
}
}
| 6,372 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/HadoopStreams.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.io.DelegatingInputStream;
import com.netflix.iceberg.io.DelegatingOutputStream;
import com.netflix.iceberg.io.PositionOutputStream;
import com.netflix.iceberg.io.SeekableInputStream;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
/**
* Convenience methods to get Parquet abstractions for Hadoop data streams.
*
* This class is based on Parquet's HadoopStreams.
*/
class HadoopStreams {
private static final Logger LOG = LoggerFactory.getLogger(HadoopStreams.class);
/**
* Wraps a {@link FSDataInputStream} in a {@link SeekableInputStream} implementation for readers.
*
* @param stream a Hadoop FSDataInputStream
* @return a SeekableInputStream
*/
static SeekableInputStream wrap(FSDataInputStream stream) {
return new HadoopSeekableInputStream(stream);
}
/**
* Wraps a {@link FSDataOutputStream} in a {@link PositionOutputStream} implementation for
* writers.
*
* @param stream a Hadoop FSDataOutputStream
* @return a PositionOutputStream
*/
static PositionOutputStream wrap(FSDataOutputStream stream) {
return new HadoopPositionOutputStream(stream);
}
/**
* SeekableInputStream implementation for FSDataInputStream that implements ByteBufferReadable in
* Hadoop 2.
*/
private static class HadoopSeekableInputStream extends SeekableInputStream implements DelegatingInputStream {
private final FSDataInputStream stream;
HadoopSeekableInputStream(FSDataInputStream stream) {
this.stream = stream;
}
@Override
public InputStream getDelegate() {
return stream;
}
@Override
public void close() throws IOException {
stream.close();
}
@Override
public long getPos() throws IOException {
return stream.getPos();
}
@Override
public void seek(long newPos) throws IOException {
stream.seek(newPos);
}
@Override
public int read() throws IOException {
return stream.read();
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return stream.read(b, off, len);
}
public int read(ByteBuffer buf) throws IOException {
return stream.read(buf);
}
}
/**
* PositionOutputStream implementation for FSDataOutputStream.
*/
private static class HadoopPositionOutputStream extends PositionOutputStream implements DelegatingOutputStream {
private final FSDataOutputStream stream;
public HadoopPositionOutputStream(FSDataOutputStream stream) {
this.stream = stream;
}
@Override
public OutputStream getDelegate() {
return stream;
}
@Override
public long getPos() throws IOException {
return stream.getPos();
}
@Override
public void write(int b) throws IOException {
stream.write(b);
}
@Override
public void write(byte[] b) throws IOException {
stream.write(b);
}
@Override
public void write(byte[] b, int off, int len) throws IOException {
stream.write(b, off, len);
}
@Override
public void flush() throws IOException {
stream.flush();
}
@Override
public void close() throws IOException {
stream.close();
}
}
}
| 6,373 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/Util.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
class Util {
private Util() {
}
public static FileSystem getFS(Path path, Configuration conf) {
try {
return path.getFileSystem(conf);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get file system for path: %s", path);
}
}
}
| 6,374 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/HadoopInputFile.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.SeekableInputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
/**
* {@link InputFile} implementation using the Hadoop {@link FileSystem} API.
* <p>
* This class is based on Parquet's HadoopInputFile.
*/
public class HadoopInputFile implements InputFile {
private final FileSystem fs;
private final Path path;
private final Configuration conf;
private FileStatus stat = null;
private Long length = null;
public static HadoopInputFile fromLocation(CharSequence location, Configuration conf) {
Path path = new Path(location.toString());
return fromPath(path, conf);
}
public static HadoopInputFile fromLocation(CharSequence location, long length,
Configuration conf) {
Path path = new Path(location.toString());
return fromPath(path, length, conf);
}
public static HadoopInputFile fromPath(Path path, Configuration conf) {
try {
FileSystem fs = path.getFileSystem(conf);
return new HadoopInputFile(fs, path, conf);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get file system for path: %s", path);
}
}
public static HadoopInputFile fromPath(Path path, long length, Configuration conf) {
try {
FileSystem fs = path.getFileSystem(conf);
return new HadoopInputFile(fs, path, length, conf);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get file system for path: %s", path);
}
}
public static HadoopInputFile fromStatus(FileStatus stat, Configuration conf) {
try {
FileSystem fs = stat.getPath().getFileSystem(conf);
return new HadoopInputFile(fs, stat, conf);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get file system for path: %s", stat.getPath());
}
}
private HadoopInputFile(FileSystem fs, Path path, Configuration conf) {
this.fs = fs;
this.path = path;
this.conf = conf;
}
private HadoopInputFile(FileSystem fs, Path path, long length, Configuration conf) {
this.fs = fs;
this.path = path;
this.conf = conf;
this.length = length;
}
private HadoopInputFile(FileSystem fs, FileStatus stat, Configuration conf) {
this.fs = fs;
this.path = stat.getPath();
this.stat = stat;
this.conf = conf;
this.length = stat.getLen();
}
private FileStatus lazyStat() {
if (stat == null) {
try {
this.stat = fs.getFileStatus(path);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get status for file: %s", path);
}
}
return stat;
}
@Override
public long getLength() {
if (length == null) {
this.length = lazyStat().getLen();
}
return length;
}
@Override
public SeekableInputStream newStream() {
try {
return HadoopStreams.wrap(fs.open(path));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to open input stream for file: %s", path);
}
}
public Configuration getConf() {
return conf;
}
public FileStatus getStat() {
return lazyStat();
}
@Override
public String location() {
return path.toString();
}
@Override
public String toString() {
return path.toString();
}
}
| 6,375 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/HadoopOutputFile.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.exceptions.AlreadyExistsException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.io.PositionOutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import java.io.IOException;
/**
* {@link OutputFile} implementation using the Hadoop {@link FileSystem} API.
*/
public class HadoopOutputFile implements OutputFile {
public static OutputFile fromPath(Path path, Configuration conf) {
return new HadoopOutputFile(path, conf);
}
private final Path path;
private final Configuration conf;
private HadoopOutputFile(Path path, Configuration conf) {
this.path = path;
this.conf = conf;
}
@Override
public PositionOutputStream create() {
FileSystem fs = Util.getFS(path, conf);
try {
return HadoopStreams.wrap(fs.create(path, false /* createOrOverwrite */));
} catch (FileAlreadyExistsException e) {
throw new AlreadyExistsException(e, "Path already exists: %s", path);
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to create file: %s", path);
}
}
@Override
public PositionOutputStream createOrOverwrite() {
FileSystem fs = Util.getFS(path, conf);
try {
return HadoopStreams.wrap(fs.create(path, true /* createOrOverwrite */ ));
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to create file: %s", path);
}
}
public Path getPath() {
return path;
}
public Configuration getConf() {
return conf;
}
@Override
public String location() {
return path.toString();
}
@Override
public InputFile toInputFile() {
return HadoopInputFile.fromPath(path, conf);
}
@Override
public String toString() {
return location();
}
}
| 6,376 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/HadoopTableOperations.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.FileIO;
import com.netflix.iceberg.TableMetadata;
import com.netflix.iceberg.TableMetadataParser;
import com.netflix.iceberg.TableOperations;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.exceptions.ValidationException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.UUID;
import static com.netflix.iceberg.TableMetadataParser.getFileExtension;
/**
* TableOperations implementation for file systems that support atomic rename.
* <p>
* This maintains metadata in a "metadata" folder under the table location.
*/
public class HadoopTableOperations implements TableOperations {
private static final Logger LOG = LoggerFactory.getLogger(HadoopTableOperations.class);
private final Configuration conf;
private final Path location;
private TableMetadata currentMetadata = null;
private Integer version = null;
private boolean shouldRefresh = true;
private HadoopFileIO defaultFileIo = null;
protected HadoopTableOperations(Path location, Configuration conf) {
this.conf = conf;
this.location = location;
}
public TableMetadata current() {
if (shouldRefresh) {
return refresh();
}
return currentMetadata;
}
@Override
public TableMetadata refresh() {
int ver = version != null ? version : readVersionHint();
Path metadataFile = metadataFile(ver);
FileSystem fs = getFS(metadataFile, conf);
try {
// don't check if the file exists if version is non-null because it was already checked
if (version == null && !fs.exists(metadataFile)) {
if (ver == 0) {
// no v0 metadata means the table doesn't exist yet
return null;
}
throw new ValidationException("Metadata file is missing: %s", metadataFile);
}
while (fs.exists(metadataFile(ver + 1))) {
ver += 1;
metadataFile = metadataFile(ver);
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get file system for path: %s", metadataFile);
}
this.version = ver;
this.currentMetadata = TableMetadataParser.read(this,
io().newInputFile(metadataFile.toString()));
this.shouldRefresh = false;
return currentMetadata;
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
if (base != current()) {
throw new CommitFailedException("Cannot commit changes based on stale table metadata");
}
if (base == metadata) {
LOG.info("Nothing to commit.");
return;
}
Path tempMetadataFile = metadataPath(UUID.randomUUID().toString() + getFileExtension(conf));
TableMetadataParser.write(metadata, io().newOutputFile(tempMetadataFile.toString()));
int nextVersion = (version != null ? version : 0) + 1;
Path finalMetadataFile = metadataFile(nextVersion);
FileSystem fs = getFS(tempMetadataFile, conf);
try {
if (fs.exists(finalMetadataFile)) {
throw new CommitFailedException(
"Version %d already exists: %s", nextVersion, finalMetadataFile);
}
} catch (IOException e) {
throw new RuntimeIOException(e,
"Failed to check if next version exists: " + finalMetadataFile);
}
try {
// this rename operation is the atomic commit operation
if (!fs.rename(tempMetadataFile, finalMetadataFile)) {
throw new CommitFailedException(
"Failed to commit changes using rename: %s", finalMetadataFile);
}
} catch (IOException e) {
throw new CommitFailedException(e,
"Failed to commit changes using rename: %s", finalMetadataFile);
}
// update the best-effort version pointer
writeVersionHint(nextVersion);
this.shouldRefresh = true;
}
@Override
public FileIO io() {
if (defaultFileIo == null) {
defaultFileIo = new HadoopFileIO(conf);
}
return defaultFileIo;
}
@Override
public String metadataFileLocation(String fileName) {
return metadataPath(fileName).toString();
}
@Override
public long newSnapshotId() {
return System.currentTimeMillis();
}
private Path metadataFile(int version) {
return metadataPath("v" + version + getFileExtension(conf));
}
private Path metadataPath(String filename) {
return new Path(new Path(location, "metadata"), filename);
}
private Path versionHintFile() {
return metadataPath("version-hint.text");
}
private void writeVersionHint(int version) {
Path versionHintFile = versionHintFile();
FileSystem fs = getFS(versionHintFile, conf);
try (FSDataOutputStream out = fs.create(versionHintFile, true /* overwrite */ )) {
out.write(String.valueOf(version).getBytes("UTF-8"));
} catch (IOException e) {
LOG.warn("Failed to update version hint", e);
}
}
private int readVersionHint() {
Path versionHintFile = versionHintFile();
try {
FileSystem fs = Util.getFS(versionHintFile, conf);
if (!fs.exists(versionHintFile)) {
return 0;
}
try (BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(versionHintFile)))) {
return Integer.parseInt(in.readLine().replace("\n", ""));
}
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to get file system for path: %s", versionHintFile);
}
}
protected FileSystem getFS(Path path, Configuration conf) {
return Util.getFS(path, conf);
}
}
| 6,377 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/SerializableConfiguration.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg.hadoop;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import org.apache.hadoop.conf.Configuration;
/**
* Wraps a {@link Configuration} object in a {@link Serializable} layer.
*/
public class SerializableConfiguration implements Serializable {
private transient Configuration hadoopConf;
public SerializableConfiguration(Configuration hadoopCOnf) {
this.hadoopConf = hadoopCOnf;
}
private void writeObject(ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
hadoopConf.write(out);
}
private void readObject(ObjectInputStream in) throws ClassNotFoundException, IOException {
in.defaultReadObject();
hadoopConf = new Configuration(false);
hadoopConf.readFields(in);
}
public Configuration get() {
return hadoopConf;
}
}
| 6,378 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/hadoop/HadoopTables.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.netflix.iceberg.BaseTable;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.Table;
import com.netflix.iceberg.TableMetadata;
import com.netflix.iceberg.TableOperations;
import com.netflix.iceberg.Tables;
import com.netflix.iceberg.exceptions.AlreadyExistsException;
import com.netflix.iceberg.exceptions.NoSuchTableException;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import java.util.Map;
import static com.netflix.iceberg.TableMetadata.newTableMetadata;
/**
* Implementation of Iceberg tables that uses the Hadoop FileSystem
* to store metadata and manifests.
*/
public class HadoopTables implements Tables, Configurable {
private Configuration conf;
public HadoopTables() {
this(new Configuration());
}
public HadoopTables(Configuration conf) {
this.conf = conf;
}
/**
* Loads the table location from a FileSystem path location.
*
* @param location a path URI (e.g. hdfs:///warehouse/my_table/)
* @return table implementation
*/
@Override
public Table load(String location) {
TableOperations ops = newTableOps(location);
if (ops.current() == null) {
throw new NoSuchTableException("Table does not exist at location: " + location);
}
return new BaseTable(ops, location);
}
/**
* Create a table using the FileSystem implementation resolve from
* location.
*
* @param schema iceberg schema used to create the table
* @param spec partition specification
* @param location a path URI (e.g. hdfs:///warehouse/my_table)
* @return newly created table implementation
*/
@Override
public Table create(Schema schema, PartitionSpec spec, Map<String, String> properties,
String location) {
TableOperations ops = newTableOps(location);
if (ops.current() != null) {
throw new AlreadyExistsException("Table already exists at location: " + location);
}
TableMetadata metadata = newTableMetadata(ops, schema, spec, location, properties);
ops.commit(null, metadata);
return new BaseTable(ops, location);
}
private TableOperations newTableOps(String location) {
return new HadoopTableOperations(new Path(location), conf);
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return conf;
}
}
| 6,379 |
0 | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg/common/DynFields.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.common;
import com.google.common.base.Joiner;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import com.google.common.collect.Sets;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Set;
public class DynFields {
/**
* Convenience wrapper class around {@link java.lang.reflect.Field}.
*
* Allows callers to invoke the wrapped method with all Exceptions wrapped by
* RuntimeException, or with a single Exception catch block.
*/
public static class UnboundField<T> {
private final Field field;
private final String name;
private UnboundField(Field field, String name) {
this.field = field;
this.name = name;
}
@SuppressWarnings("unchecked")
public T get(Object target) {
try {
return (T) field.get(target);
} catch (IllegalAccessException e) {
throw Throwables.propagate(e);
}
}
public void set(Object target, T value) {
try {
field.set(target, value);
} catch (IllegalAccessException e) {
throw Throwables.propagate(e);
}
}
public String toString() {
return Objects.toStringHelper(this)
.add("class", field.getDeclaringClass().toString())
.add("name", name)
.add("type", field.getType())
.toString();
}
/**
* Returns this method as a BoundMethod for the given receiver.
*
* @param target an Object on which to get or set this field
* @return a {@link BoundField} for this field and the target
* @throws IllegalStateException if the method is static
* @throws IllegalArgumentException if the receiver's class is incompatible
*/
public BoundField<T> bind(Object target) {
Preconditions.checkState(!isStatic() || this == AlwaysNull.INSTANCE,
"Cannot bind static field " + name);
Preconditions.checkArgument(
field.getDeclaringClass().isAssignableFrom(target.getClass()),
"Cannot bind field " + name + " to instance of " +
target.getClass());
return new BoundField<>(this, target);
}
/**
* Returns this field as a StaticField.
*
* @return a {@link StaticField} for this field
* @throws IllegalStateException if the method is not static
*/
public StaticField<T> asStatic() {
Preconditions.checkState(isStatic(), "Field " + name + " is not static");
return new StaticField<>(this);
}
/**
* @return whether the field is a static field
*/
public boolean isStatic() {
return Modifier.isStatic(field.getModifiers());
}
/**
* @return whether the field is always null
*/
public boolean isAlwaysNull() {
return this == AlwaysNull.INSTANCE;
}
}
private static class AlwaysNull extends UnboundField<Void> {
private static final AlwaysNull INSTANCE = new AlwaysNull();
private AlwaysNull() {
super(null, "AlwaysNull");
}
@Override
public Void get(Object target) {
return null;
}
@Override
public void set(Object target, Void value) {
}
public String toString() {
return "Field(AlwaysNull)";
}
@Override
public boolean isStatic() {
return true;
}
@Override
public boolean isAlwaysNull() {
return true;
}
}
public static class StaticField<T> {
private final UnboundField<T> field;
private StaticField(UnboundField<T> field) {
this.field = field;
}
public T get() {
return field.get(null);
}
public void set(T value) {
field.set(null, value);
}
}
public static class BoundField<T> {
private final UnboundField<T> field;
private final Object target;
private BoundField(UnboundField<T> field, Object target) {
this.field = field;
this.target = target;
}
public T get() {
return field.get(target);
}
public void set(T value) {
field.set(target, value);
}
}
public static Builder builder() {
return new Builder();
}
public static class Builder {
private ClassLoader loader = Thread.currentThread().getContextClassLoader();
private UnboundField<?> field = null;
private final Set<String> candidates = Sets.newHashSet();
private boolean defaultAlwaysNull = false;
/**
* Set the {@link ClassLoader} used to lookup classes by name.
* <p>
* If not set, the current thread's ClassLoader is used.
*
* @param loader a ClassLoader
* @return this Builder for method chaining
*/
public Builder loader(ClassLoader loader) {
this.loader = loader;
return this;
}
/**
* Instructs this builder to return AlwaysNull if no implementation is
* found.
*
* @return this Builder for method chaining
*/
public Builder defaultAlwaysNull() {
this.defaultAlwaysNull = true;
return this;
}
/**
* Checks for an implementation, first finding the class by name.
*
* @param className name of a class
* @param fieldName name of the field
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getField(String)
*/
public Builder impl(String className, String fieldName) {
// don't do any work if an implementation has been found
if (field != null) {
return this;
}
try {
Class<?> targetClass = Class.forName(className, true, loader);
impl(targetClass, fieldName);
} catch (ClassNotFoundException e) {
// not the right implementation
candidates.add(className + "." + fieldName);
}
return this;
}
/**
* Checks for an implementation.
*
* @param targetClass a class instance
* @param fieldName name of a field (different from constructor)
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getField(String)
*/
public Builder impl(Class<?> targetClass, String fieldName) {
// don't do any work if an implementation has been found
if (field != null || targetClass == null) {
return this;
}
try {
this.field = new UnboundField<>(
targetClass.getField(fieldName), fieldName);
} catch (NoSuchFieldException e) {
// not the right implementation
candidates.add(targetClass.getName() + "." + fieldName);
}
return this;
}
/**
* Checks for a hidden implementation, first finding the class by name.
*
* @param className name of a class
* @param fieldName name of a field (different from constructor)
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getField(String)
*/
public Builder hiddenImpl(String className, String fieldName) {
// don't do any work if an implementation has been found
if (field != null) {
return this;
}
try {
Class<?> targetClass = Class.forName(className, true, loader);
hiddenImpl(targetClass, fieldName);
} catch (ClassNotFoundException e) {
// not the right implementation
candidates.add(className + "." + fieldName);
}
return this;
}
/**
* Checks for a hidden implementation.
*
* @param targetClass a class instance
* @param fieldName name of a field (different from constructor)
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getField(String)
*/
public Builder hiddenImpl(Class<?> targetClass, String fieldName) {
// don't do any work if an implementation has been found
if (field != null || targetClass == null) {
return this;
}
try {
Field hidden = targetClass.getDeclaredField(fieldName);
AccessController.doPrivileged(new MakeFieldAccessible(hidden));
this.field = new UnboundField(hidden, fieldName);
} catch (SecurityException | NoSuchFieldException e) {
// unusable
candidates.add(targetClass.getName() + "." + fieldName);
}
return this;
}
/**
* Returns the first valid implementation as a UnboundField or throws a
* NoSuchFieldException if there is none.
*
* @param <T> Java class stored in the field
* @return a {@link UnboundField} with a valid implementation
* @throws NoSuchFieldException if no implementation was found
*/
@SuppressWarnings("unchecked")
public <T> UnboundField<T> buildChecked() throws NoSuchFieldException {
if (field != null) {
return (UnboundField<T>) field;
} else if (defaultAlwaysNull) {
return (UnboundField<T>) AlwaysNull.INSTANCE;
} else {
throw new NoSuchFieldException("Cannot find field from candidates: " +
Joiner.on(", ").join(candidates));
}
}
/**
* Returns the first valid implementation as a UnboundField or throws a
* NoSuchFieldException if there is none.
*
* @param <T> Java class stored in the field
* @return a {@link UnboundField} with a valid implementation
* @throws RuntimeException if no implementation was found
*/
@SuppressWarnings("unchecked")
public <T> UnboundField<T> build() {
if (field != null) {
return (UnboundField<T>) field;
} else if (defaultAlwaysNull) {
return (UnboundField<T>) AlwaysNull.INSTANCE;
} else {
throw new RuntimeException("Cannot find field from candidates: " +
Joiner.on(", ").join(candidates));
}
}
/**
* Returns the first valid implementation as a BoundMethod or throws a
* NoSuchMethodException if there is none.
*
* @param target an Object on which to get and set the field
* @param <T> Java class stored in the field
* @return a {@link BoundField} with a valid implementation and target
* @throws IllegalStateException if the method is static
* @throws IllegalArgumentException if the receiver's class is incompatible
* @throws NoSuchFieldException if no implementation was found
*/
public <T> BoundField<T> buildChecked(Object target) throws NoSuchFieldException {
return this.<T>buildChecked().bind(target);
}
/**
* Returns the first valid implementation as a BoundMethod or throws a
* RuntimeException if there is none.
*
* @param target an Object on which to get and set the field
* @param <T> Java class stored in the field
* @return a {@link BoundField} with a valid implementation and target
* @throws IllegalStateException if the method is static
* @throws IllegalArgumentException if the receiver's class is incompatible
* @throws RuntimeException if no implementation was found
*/
public <T> BoundField<T> build(Object target) {
return this.<T>build().bind(target);
}
/**
* Returns the first valid implementation as a StaticField or throws a
* NoSuchFieldException if there is none.
*
* @param <T> Java class stored in the field
* @return a {@link StaticField} with a valid implementation
* @throws IllegalStateException if the method is not static
* @throws NoSuchFieldException if no implementation was found
*/
public <T> StaticField<T> buildStaticChecked() throws NoSuchFieldException {
return this.<T>buildChecked().asStatic();
}
/**
* Returns the first valid implementation as a StaticField or throws a
* RuntimeException if there is none.
*
* @param <T> Java class stored in the field
* @return a {@link StaticField} with a valid implementation
* @throws IllegalStateException if the method is not static
* @throws RuntimeException if no implementation was found
*/
public <T> StaticField<T> buildStatic() {
return this.<T>build().asStatic();
}
}
private static class MakeFieldAccessible implements PrivilegedAction<Void> {
private Field hidden;
public MakeFieldAccessible(Field hidden) {
this.hidden = hidden;
}
@Override
public Void run() {
hidden.setAccessible(true);
return null;
}
}
}
| 6,380 |
0 | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg/common/DynMethods.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg.common;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Arrays;
/**
* Copied from parquet-common
*/
public class DynMethods {
/**
* Convenience wrapper class around {@link java.lang.reflect.Method}.
*
* Allows callers to invoke the wrapped method with all Exceptions wrapped by
* RuntimeException, or with a single Exception catch block.
*/
public static class UnboundMethod {
private final Method method;
private final String name;
private final int argLength;
UnboundMethod(Method method, String name) {
this.method = method;
this.name = name;
this.argLength = (method == null || method.isVarArgs()) ? -1 :
method.getParameterTypes().length;
}
@SuppressWarnings("unchecked")
public <R> R invokeChecked(Object target, Object... args) throws Exception {
try {
if (argLength < 0) {
return (R) method.invoke(target, args);
} else {
return (R) method.invoke(target, Arrays.copyOfRange(args, 0, argLength));
}
} catch (InvocationTargetException e) {
Throwables.propagateIfInstanceOf(e.getCause(), Exception.class);
Throwables.propagateIfInstanceOf(e.getCause(), RuntimeException.class);
throw Throwables.propagate(e.getCause());
}
}
public <R> R invoke(Object target, Object... args) {
try {
return this.invokeChecked(target, args);
} catch (Exception e) {
Throwables.propagateIfInstanceOf(e, RuntimeException.class);
throw Throwables.propagate(e);
}
}
/**
* Returns this method as a BoundMethod for the given receiver.
*
* @param receiver an Object to receive the method invocation
* @return a {@link BoundMethod} for this method and the receiver
* @throws IllegalStateException if the method is static
* @throws IllegalArgumentException if the receiver's class is incompatible
*/
public BoundMethod bind(Object receiver) {
Preconditions.checkState(!isStatic(),
"Cannot bind static method " + method.toGenericString());
Preconditions.checkArgument(
method.getDeclaringClass().isAssignableFrom(receiver.getClass()),
"Cannot bind " + method.toGenericString() + " to instance of " +
receiver.getClass());
return new BoundMethod(this, receiver);
}
/**
* @return whether the method is a static method
*/
public boolean isStatic() {
return Modifier.isStatic(method.getModifiers());
}
/**
* @return whether the method is a noop
*/
public boolean isNoop() {
return this == NOOP;
}
/**
* Returns this method as a StaticMethod.
*
* @return a {@link StaticMethod} for this method
* @throws IllegalStateException if the method is not static
*/
public StaticMethod asStatic() {
Preconditions.checkState(isStatic(), "Method is not static");
return new StaticMethod(this);
}
public String toString() {
return "DynMethods.UnboundMethod(name=" + name +" method=" +
method.toGenericString() + ")";
}
/**
* Singleton {@link UnboundMethod}, performs no operation and returns null.
*/
private static UnboundMethod NOOP = new UnboundMethod(null, "NOOP") {
@Override
public <R> R invokeChecked(Object target, Object... args) throws Exception {
return null;
}
@Override
public BoundMethod bind(Object receiver) {
return new BoundMethod(this, receiver);
}
@Override
public StaticMethod asStatic() {
return new StaticMethod(this);
}
@Override
public boolean isStatic() {
return true;
}
@Override
public String toString() {
return "DynMethods.UnboundMethod(NOOP)";
}
};
}
public static class BoundMethod {
private final UnboundMethod method;
private final Object receiver;
private BoundMethod(UnboundMethod method, Object receiver) {
this.method = method;
this.receiver = receiver;
}
public <R> R invokeChecked(Object... args) throws Exception {
return method.invokeChecked(receiver, args);
}
public <R> R invoke(Object... args) {
return method.invoke(receiver, args);
}
}
public static class StaticMethod {
private final UnboundMethod method;
private StaticMethod(UnboundMethod method) {
this.method = method;
}
public <R> R invokeChecked(Object... args) throws Exception {
return method.invokeChecked(null, args);
}
public <R> R invoke(Object... args) {
return method.invoke(null, args);
}
}
/**
* Constructs a new builder for calling methods dynamically.
*
* @param methodName name of the method the builder will locate
* @return a Builder for finding a method
*/
public static Builder builder(String methodName) {
return new Builder(methodName);
}
public static class Builder {
private final String name;
private ClassLoader loader = Thread.currentThread().getContextClassLoader();
private UnboundMethod method = null;
public Builder(String methodName) {
this.name = methodName;
}
/**
* Set the {@link ClassLoader} used to lookup classes by name.
* <p>
* If not set, the current thread's ClassLoader is used.
*
* @param loader a ClassLoader
* @return this Builder for method chaining
*/
public Builder loader(ClassLoader loader) {
this.loader = loader;
return this;
}
/**
* If no implementation has been found, adds a NOOP method.
*
* Note: calls to impl will not match after this method is called!
*
* @return this Builder for method chaining
*/
public Builder orNoop() {
if (method == null) {
this.method = UnboundMethod.NOOP;
}
return this;
}
/**
* Checks for an implementation, first finding the given class by name.
*
* @param className name of a class
* @param methodName name of a method (different from constructor)
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder impl(String className, String methodName, Class<?>... argClasses) {
// don't do any work if an implementation has been found
if (method != null) {
return this;
}
try {
Class<?> targetClass = Class.forName(className, true, loader);
impl(targetClass, methodName, argClasses);
} catch (ClassNotFoundException e) {
// not the right implementation
}
return this;
}
/**
* Checks for an implementation, first finding the given class by name.
*
* The name passed to the constructor is the method name used.
*
* @param className name of a class
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder impl(String className, Class<?>... argClasses) {
impl(className, name, argClasses);
return this;
}
/**
* Checks for a method implementation.
*
* @param targetClass a class instance
* @param methodName name of a method (different from constructor)
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder impl(Class<?> targetClass, String methodName, Class<?>... argClasses) {
// don't do any work if an implementation has been found
if (method != null) {
return this;
}
try {
this.method = new UnboundMethod(
targetClass.getMethod(methodName, argClasses), name);
} catch (NoSuchMethodException e) {
// not the right implementation
}
return this;
}
/**
* Checks for a method implementation.
*
* The name passed to the constructor is the method name used.
*
* @param targetClass a class instance
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder impl(Class<?> targetClass, Class<?>... argClasses) {
impl(targetClass, name, argClasses);
return this;
}
public Builder ctorImpl(Class<?> targetClass, Class<?>... argClasses) {
// don't do any work if an implementation has been found
if (method != null) {
return this;
}
try {
this.method = new DynConstructors.Builder()
.impl(targetClass, argClasses)
.buildChecked();
} catch (NoSuchMethodException e) {
// not the right implementation
}
return this;
}
public Builder ctorImpl(String className, Class<?>... argClasses) {
// don't do any work if an implementation has been found
if (method != null) {
return this;
}
try {
this.method = new DynConstructors.Builder()
.impl(className, argClasses)
.buildChecked();
} catch (NoSuchMethodException e) {
// not the right implementation
}
return this;
}
/**
* Checks for an implementation, first finding the given class by name.
*
* @param className name of a class
* @param methodName name of a method (different from constructor)
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder hiddenImpl(String className, String methodName, Class<?>... argClasses) {
// don't do any work if an implementation has been found
if (method != null) {
return this;
}
try {
Class<?> targetClass = Class.forName(className, true, loader);
hiddenImpl(targetClass, methodName, argClasses);
} catch (ClassNotFoundException e) {
// not the right implementation
}
return this;
}
/**
* Checks for an implementation, first finding the given class by name.
*
* The name passed to the constructor is the method name used.
*
* @param className name of a class
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder hiddenImpl(String className, Class<?>... argClasses) {
hiddenImpl(className, name, argClasses);
return this;
}
/**
* Checks for a method implementation.
*
* @param targetClass a class instance
* @param methodName name of a method (different from constructor)
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder hiddenImpl(Class<?> targetClass, String methodName, Class<?>... argClasses) {
// don't do any work if an implementation has been found
if (method != null) {
return this;
}
try {
Method hidden = targetClass.getDeclaredMethod(methodName, argClasses);
AccessController.doPrivileged(new MakeAccessible(hidden));
this.method = new UnboundMethod(hidden, name);
} catch (SecurityException | NoSuchMethodException e) {
// unusable or not the right implementation
}
return this;
}
/**
* Checks for a method implementation.
*
* The name passed to the constructor is the method name used.
*
* @param targetClass a class instance
* @param argClasses argument classes for the method
* @return this Builder for method chaining
* @see java.lang.Class#forName(String)
* @see java.lang.Class#getMethod(String, Class[])
*/
public Builder hiddenImpl(Class<?> targetClass, Class<?>... argClasses) {
hiddenImpl(targetClass, name, argClasses);
return this;
}
/**
* Returns the first valid implementation as a UnboundMethod or throws a
* NoSuchMethodException if there is none.
*
* @return a {@link UnboundMethod} with a valid implementation
* @throws NoSuchMethodException if no implementation was found
*/
public UnboundMethod buildChecked() throws NoSuchMethodException {
if (method != null) {
return method;
} else {
throw new NoSuchMethodException("Cannot find method: " + name);
}
}
/**
* Returns the first valid implementation as a UnboundMethod or throws a
* RuntimeError if there is none.
*
* @return a {@link UnboundMethod} with a valid implementation
* @throws RuntimeException if no implementation was found
*/
public UnboundMethod build() {
if (method != null) {
return method;
} else {
throw new RuntimeException("Cannot find method: " + name);
}
}
/**
* Returns the first valid implementation as a BoundMethod or throws a
* NoSuchMethodException if there is none.
*
* @param receiver an Object to receive the method invocation
* @return a {@link BoundMethod} with a valid implementation and receiver
* @throws IllegalStateException if the method is static
* @throws IllegalArgumentException if the receiver's class is incompatible
* @throws NoSuchMethodException if no implementation was found
*/
public BoundMethod buildChecked(Object receiver) throws NoSuchMethodException {
return buildChecked().bind(receiver);
}
/**
* Returns the first valid implementation as a BoundMethod or throws a
* RuntimeError if there is none.
*
* @param receiver an Object to receive the method invocation
* @return a {@link BoundMethod} with a valid implementation and receiver
* @throws IllegalStateException if the method is static
* @throws IllegalArgumentException if the receiver's class is incompatible
* @throws RuntimeException if no implementation was found
*/
public BoundMethod build(Object receiver) {
return build().bind(receiver);
}
/**
* Returns the first valid implementation as a StaticMethod or throws a
* NoSuchMethodException if there is none.
*
* @return a {@link StaticMethod} with a valid implementation
* @throws IllegalStateException if the method is not static
* @throws NoSuchMethodException if no implementation was found
*/
public StaticMethod buildStaticChecked() throws NoSuchMethodException {
return buildChecked().asStatic();
}
/**
* Returns the first valid implementation as a StaticMethod or throws a
* RuntimeException if there is none.
*
* @return a {@link StaticMethod} with a valid implementation
* @throws IllegalStateException if the method is not static
* @throws RuntimeException if no implementation was found
*/
public StaticMethod buildStatic() {
return build().asStatic();
}
}
private static class MakeAccessible implements PrivilegedAction<Void> {
private Method hidden;
public MakeAccessible(Method hidden) {
this.hidden = hidden;
}
@Override
public Void run() {
hidden.setAccessible(true);
return null;
}
}
}
| 6,381 |
0 | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg/common/DynClasses.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.common;
import com.google.common.base.Joiner;
import java.util.LinkedHashSet;
import java.util.Set;
public class DynClasses {
public static Builder builder() {
return new Builder();
}
public static class Builder {
private ClassLoader loader = Thread.currentThread().getContextClassLoader();
private Class<?> foundClass = null;
private boolean nullOk = false;
private Set<String> classNames = new LinkedHashSet<>();
/**
* Set the {@link ClassLoader} used to lookup classes by name.
* <p>
* If not set, the current thread's ClassLoader is used.
*
* @param loader a ClassLoader
* @return this Builder for method chaining
*/
public Builder loader(ClassLoader loader) {
this.loader = loader;
return this;
}
/**
* Checks for an implementation of the class by name.
*
* @param className name of a class
* @return this Builder for method chaining
*/
public Builder impl(String className) {
classNames.add(className);
if (foundClass != null) {
return this;
}
try {
this.foundClass = Class.forName(className, true, loader);
} catch (ClassNotFoundException e) {
// not the right implementation
}
return this;
}
/**
* Instructs this builder to return null if no class is found, rather than
* throwing an Exception.
*
* @return this Builder for method chaining
*/
public Builder orNull() {
this.nullOk = true;
return this;
}
/**
* Returns the first implementation or throws ClassNotFoundException if
* one was not found.
*
* @param <S> Java superclass
* @return a {@link Class} for the first implementation found
* @throws ClassNotFoundException if no implementation was found
*/
@SuppressWarnings("unchecked")
public <S> Class<? extends S> buildChecked() throws ClassNotFoundException {
if (!nullOk && foundClass == null) {
throw new ClassNotFoundException("Cannot find class; alternatives: " +
Joiner.on(", ").join(classNames));
}
return (Class<? extends S>) foundClass;
}
/**
* Returns the first implementation or throws RuntimeException if one was
* not found.
*
* @param <S> Java superclass
* @return a {@link Class} for the first implementation found
* @throws RuntimeException if no implementation was found
*/
@SuppressWarnings("unchecked")
public <S> Class<? extends S> build() {
if (!nullOk && foundClass == null) {
throw new RuntimeException("Cannot find class; alternatives: " +
Joiner.on(", ").join(classNames));
}
return (Class<? extends S>) foundClass;
}
}
}
| 6,382 |
0 | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg | Create_ds/iceberg/common/src/main/java/com/netflix/iceberg/common/DynConstructors.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg.common;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
/**
* Copied from parquet-common
*/
public class DynConstructors {
public static class Ctor<C> extends DynMethods.UnboundMethod {
private final Constructor<C> ctor;
private final Class<? extends C> constructed;
private Ctor(Constructor<C> constructor, Class<? extends C> constructed) {
super(null, "newInstance");
this.ctor = constructor;
this.constructed = constructed;
}
public Class<? extends C> getConstructedClass() {
return constructed;
}
public C newInstanceChecked(Object... args) throws Exception {
try {
if (args.length > ctor.getParameterCount()) {
return ctor.newInstance(Arrays.copyOfRange(args, 0, ctor.getParameterCount()));
} else {
return ctor.newInstance(args);
}
} catch (InstantiationException | IllegalAccessException e) {
throw e;
} catch (InvocationTargetException e) {
Throwables.propagateIfInstanceOf(e.getCause(), Exception.class);
Throwables.propagateIfInstanceOf(e.getCause(), RuntimeException.class);
throw Throwables.propagate(e.getCause());
}
}
public C newInstance(Object... args) {
try {
return newInstanceChecked(args);
} catch (Exception e) {
Throwables.propagateIfInstanceOf(e, RuntimeException.class);
throw Throwables.propagate(e);
}
}
@Override
@SuppressWarnings("unchecked")
public <R> R invoke(Object target, Object... args) {
Preconditions.checkArgument(target == null,
"Invalid call to constructor: target must be null");
return (R) newInstance(args);
}
@Override
@SuppressWarnings("unchecked")
public <R> R invokeChecked(Object target, Object... args) throws Exception {
Preconditions.checkArgument(target == null,
"Invalid call to constructor: target must be null");
return (R) newInstanceChecked(args);
}
@Override
public DynMethods.BoundMethod bind(Object receiver) {
throw new IllegalStateException("Cannot bind constructors");
}
@Override
public boolean isStatic() {
return true;
}
@Override
public String toString() {
return getClass().getSimpleName() +
"(constructor=" + ctor + ", class=" + constructed + ")";
}
}
public static Builder builder() {
return new Builder();
}
public static Builder builder(Class<?> baseClass) {
return new Builder(baseClass);
}
public static class Builder {
private final Class<?> baseClass;
private ClassLoader loader = Thread.currentThread().getContextClassLoader();
private Ctor ctor = null;
private Map<String, Throwable> problems = new HashMap<String, Throwable>();
public Builder(Class<?> baseClass) {
this.baseClass = baseClass;
}
public Builder() {
this.baseClass = null;
}
/**
* Set the {@link ClassLoader} used to lookup classes by name.
* <p>
* If not set, the current thread's ClassLoader is used.
*
* @param loader a ClassLoader
* @return this Builder for method chaining
*/
public Builder loader(ClassLoader loader) {
this.loader = loader;
return this;
}
public Builder impl(String className, Class<?>... types) {
// don't do any work if an implementation has been found
if (ctor != null) {
return this;
}
try {
Class<?> targetClass = Class.forName(className, true, loader);
impl(targetClass, types);
} catch (NoClassDefFoundError | ClassNotFoundException e) {
// cannot load this implementation
problems.put(className, e);
}
return this;
}
public <T> Builder impl(Class<T> targetClass, Class<?>... types) {
// don't do any work if an implementation has been found
if (ctor != null) {
return this;
}
try {
ctor = new Ctor<T>(targetClass.getConstructor(types), targetClass);
} catch (NoSuchMethodException e) {
// not the right implementation
problems.put(methodName(targetClass, types), e);
}
return this;
}
public Builder hiddenImpl(Class<?>... types) {
hiddenImpl(baseClass, types);
return this;
}
@SuppressWarnings("unchecked")
public Builder hiddenImpl(String className, Class<?>... types) {
// don't do any work if an implementation has been found
if (ctor != null) {
return this;
}
try {
Class targetClass = Class.forName(className, true, loader);
hiddenImpl(targetClass, types);
} catch (NoClassDefFoundError | ClassNotFoundException e) {
// cannot load this implementation
problems.put(className, e);
}
return this;
}
public <T> Builder hiddenImpl(Class<T> targetClass, Class<?>... types) {
// don't do any work if an implementation has been found
if (ctor != null) {
return this;
}
try {
Constructor<T> hidden = targetClass.getDeclaredConstructor(types);
AccessController.doPrivileged(new MakeAccessible(hidden));
ctor = new Ctor<T>(hidden, targetClass);
} catch (SecurityException e) {
// unusable
problems.put(methodName(targetClass, types), e);
} catch (NoSuchMethodException e) {
// not the right implementation
problems.put(methodName(targetClass, types), e);
}
return this;
}
@SuppressWarnings("unchecked")
public <C> Ctor<C> buildChecked() throws NoSuchMethodException {
if (ctor != null) {
return ctor;
}
throw new NoSuchMethodException("Cannot find constructor for " +
baseClass + "\n" + formatProblems(problems));
}
@SuppressWarnings("unchecked")
public <C> Ctor<C> build() {
if (ctor != null) {
return ctor;
}
throw new RuntimeException("Cannot find constructor for " +
baseClass + "\n" + formatProblems(problems));
}
}
private static class MakeAccessible implements PrivilegedAction<Void> {
private Constructor<?> hidden;
public MakeAccessible(Constructor<?> hidden) {
this.hidden = hidden;
}
@Override
public Void run() {
hidden.setAccessible(true);
return null;
}
}
private static String formatProblems(Map<String, Throwable> problems) {
StringBuilder sb = new StringBuilder();
boolean first = true;
for (Map.Entry<String, Throwable> problem : problems.entrySet()) {
if (first) {
first = false;
} else {
sb.append("\n");
}
sb.append("\tMissing ").append(problem.getKey()).append(" [")
.append(problem.getValue().getClass().getName()).append(": ")
.append(problem.getValue().getMessage()).append("]");
}
return sb.toString();
}
private static String methodName(Class<?> targetClass, Class<?>... types) {
StringBuilder sb = new StringBuilder();
sb.append(targetClass.getName()).append("(");
boolean first = true;
for (Class<?> type : types) {
if (first) {
first = false;
} else {
sb.append(",");
}
sb.append(type.getName());
}
sb.append(")");
return sb.toString();
}
}
| 6,383 |
0 | Create_ds/iceberg/hive/src/test/java/com/netflix/iceberg | Create_ds/iceberg/hive/src/test/java/com/netflix/iceberg/hive/HiveTableBaseTest.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hive;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.TableMetadataParser;
import com.netflix.iceberg.types.Types;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStore;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IHMSHandler;
import org.apache.hadoop.hive.metastore.RetryingHMSHandler;
import org.apache.hadoop.hive.metastore.TServerSocketKeepAlive;
import org.apache.hadoop.hive.metastore.TSetIpAddressProcessor;
import org.apache.hadoop.hive.metastore.api.Catalog;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.metastore.api.MetaException;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.thrift.TException;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.server.TServer;
import org.apache.thrift.server.TThreadPoolServer;
import org.apache.thrift.transport.TServerSocket;
import org.apache.thrift.transport.TTransportException;
import org.apache.thrift.transport.TTransportFactory;
import org.junit.After;
import org.junit.Before;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.lang.reflect.InvocationTargetException;
import java.net.URL;
import java.nio.file.Paths;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.stream.Collectors;
import static com.netflix.iceberg.PartitionSpec.builderFor;
import static com.netflix.iceberg.TableMetadataParser.getFileExtension;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
import static java.nio.file.Files.createTempDirectory;
import static java.nio.file.attribute.PosixFilePermissions.asFileAttribute;
import static java.nio.file.attribute.PosixFilePermissions.fromString;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.AUTO_CREATE_ALL;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.COMPACTOR_WORKER_THREADS;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.CONNECT_URL_KEY;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_SUPPORT_CONCURRENCY;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.HIVE_TXN_MANAGER;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.SCHEMA_VERIFICATION;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.THRIFT_URIS;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.WAREHOUSE;
class HiveTableBaseTest {
static final String DB_NAME = "hivedb";
static final String TABLE_NAME = "tbl";
static final Schema schema = new Schema(Types.StructType.of(
required(1, "id", Types.LongType.get())).fields());
static final Schema altered = new Schema(Types.StructType.of(
required(1, "id", Types.LongType.get()),
optional(2, "data", Types.LongType.get())).fields());
private static final PartitionSpec partitionSpec = builderFor(schema).identity("id").build();
Configuration hiveConf;
HiveMetaStoreClient metastoreClient;
private File hiveLocalDir;
private ExecutorService executorService;
private TServer server;
@Before
public void setup() throws IOException,
TException,
InvocationTargetException,
NoSuchMethodException,
IllegalAccessException,
NoSuchFieldException, SQLException {
this.executorService = Executors.newSingleThreadExecutor();
hiveLocalDir = createTempDirectory("hive", asFileAttribute(fromString("rwxrwxrwx"))).toFile();
setupDB("jdbc:derby:" + getDerbyPath() + ";create=true");
this.server = thriftServer();
executorService.submit(() -> server.serve());
this.metastoreClient = new HiveMetaStoreClient(this.hiveConf);
createIfNotExistsCatalog("hive");
this.metastoreClient.createDatabase(new Database(DB_NAME, "description", getDBPath(), new HashMap<>()));
new HiveTables(this.hiveConf).create(schema, partitionSpec, DB_NAME, TABLE_NAME);
}
@After
public void cleanup() {
if (server != null) {
server.stop();
}
executorService.shutdown();
if (hiveLocalDir != null) {
hiveLocalDir.delete();
}
}
private HiveConf hiveConf(Configuration conf, int port) throws IOException {
File derbyLogFile = new File(hiveLocalDir, "derby.log");
derbyLogFile.createNewFile();
System.setProperty("derby.stream.error.file", derbyLogFile.getPath());
final HiveConf hiveConf = new HiveConf(conf, this.getClass());
// Setting AUTO_CREATE_ALL in hadoop config somehow still reverts to false.
hiveConf.set(SCHEMA_VERIFICATION.getVarname(), "false");
hiveConf.set(THRIFT_URIS.getVarname(), "thrift://localhost:" + port);
hiveConf.set(WAREHOUSE.getVarname(), "file:" + hiveLocalDir.getAbsolutePath());
hiveConf.set(WAREHOUSE.getHiveName(), "file:" + hiveLocalDir.getAbsolutePath());
hiveConf.set(CONNECT_URL_KEY.getVarname(), "jdbc:derby:" + getDerbyPath() + ";create=true");
hiveConf.set(AUTO_CREATE_ALL.getVarname(), "true");
hiveConf.set(HIVE_TXN_MANAGER.getVarname(), "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager");
hiveConf.set(COMPACTOR_INITIATOR_ON.getVarname(), "true");
hiveConf.set(COMPACTOR_WORKER_THREADS.getVarname(), "1");
hiveConf.set(HIVE_SUPPORT_CONCURRENCY.getVarname(), "true");
return hiveConf;
}
private String getDerbyPath() {
final File metastore_db = new File(hiveLocalDir, "metastore_db");
return metastore_db.getPath();
}
private TServer thriftServer() throws IOException,
TTransportException,
MetaException,
InvocationTargetException,
NoSuchMethodException,
IllegalAccessException,
NoSuchFieldException {
final TServerSocketKeepAlive socket = new TServerSocketKeepAlive(new TServerSocket(0));
this.hiveConf = hiveConf(new Configuration(), socket.getServerSocket().getLocalPort());
HiveMetaStore.HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", hiveConf);
IHMSHandler handler = RetryingHMSHandler.getProxy(hiveConf, baseHandler, true);
final TTransportFactory transportFactory = new TTransportFactory();
final TSetIpAddressProcessor<IHMSHandler> processor = new TSetIpAddressProcessor<>(handler);
TThreadPoolServer.Args args = new TThreadPoolServer.Args(socket)
.processor(processor)
.transportFactory(transportFactory)
.protocolFactory(new TBinaryProtocol.Factory())
.minWorkerThreads(3)
.maxWorkerThreads(5);
return new TThreadPoolServer(args);
}
private void setupDB(String dbURL) throws SQLException, IOException {
Connection connection = DriverManager.getConnection(dbURL);
ScriptRunner scriptRunner = new ScriptRunner(connection, true, true);
URL hiveSqlScript = getClass().getClassLoader().getResource("hive-schema-3.1.0.derby.sql");
Reader reader = new BufferedReader(new FileReader(new File(hiveSqlScript.getFile())));
scriptRunner.runScript(reader);
}
private void createIfNotExistsCatalog(String catalogName) throws TException {
try {
metastoreClient.getCatalog(catalogName);
} catch(NoSuchObjectException e) {
String catalogPath = Paths.get(hiveLocalDir.getAbsolutePath(), catalogName + ".catalog").toString();
metastoreClient.createCatalog(new Catalog(catalogName, catalogPath));
}
}
private String getDBPath() {
return Paths.get(hiveLocalDir.getAbsolutePath(), DB_NAME + ".db").toAbsolutePath().toString();
}
String getTableBasePath(String tableName) {
return Paths.get(getDBPath(), tableName).toAbsolutePath().toString();
}
String getTableLocation(String tableName) {
return new Path("file", null, Paths.get(getTableBasePath(tableName), "empty").toString()).toString();
}
String metadataLocation(String tableName) {
return Paths.get(getTableBasePath(tableName), "metadata").toString();
}
private List<String> metadataFiles(String tableName) {
return Arrays.stream(new File(metadataLocation(tableName)).listFiles())
.map(File::getAbsolutePath)
.collect(Collectors.toList());
}
List<String> metadataVersionFiles(String tableName) {
return filterByExtension(tableName, getFileExtension(hiveConf));
}
List<String> manifestFiles(String tableName) {
return filterByExtension(tableName, ".avro");
}
private List<String> filterByExtension(String tableName, String extension) {
return metadataFiles(tableName)
.stream()
.filter(f -> f.endsWith(extension))
.collect(Collectors.toList());
}
}
| 6,384 |
0 | Create_ds/iceberg/hive/src/test/java/com/netflix/iceberg | Create_ds/iceberg/hive/src/test/java/com/netflix/iceberg/hive/HiveTablesTest.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hive;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.types.Types;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.thrift.TException;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import static com.netflix.iceberg.BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE;
import static com.netflix.iceberg.BaseMetastoreTableOperations.METADATA_LOCATION_PROP;
import static com.netflix.iceberg.BaseMetastoreTableOperations.TABLE_TYPE_PROP;
public class HiveTablesTest extends HiveTableBaseTest {
@Test
public void testCreate() throws TException {
// Table should be created in hive metastore
final Table table = metastoreClient.getTable(DB_NAME, TABLE_NAME);
// check parameters are in expected state
final Map<String, String> parameters = table.getParameters();
Assert.assertNotNull(parameters);
Assert.assertTrue(ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(parameters.get(TABLE_TYPE_PROP)));
Assert.assertTrue(ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(table.getTableType()));
// Ensure the table is pointing to empty location
Assert.assertEquals(getTableLocation(TABLE_NAME) , table.getSd().getLocation());
// Ensure it is stored as unpartitioned table in hive.
Assert.assertEquals(0 , table.getPartitionKeysSize());
// Only 1 snapshotFile Should exist and no manifests should exist
Assert.assertEquals(1, metadataVersionFiles(TABLE_NAME).size());
Assert.assertEquals(0, manifestFiles(TABLE_NAME).size());
final com.netflix.iceberg.Table icebergTable = new HiveTables(hiveConf).load(DB_NAME, TABLE_NAME);
// Iceberg schema should match the loaded table
Assert.assertEquals(schema.asStruct(), icebergTable.schema().asStruct());
}
@Test
public void testExistingTableUpdate() throws TException {
com.netflix.iceberg.Table icebergTable = new HiveTables(hiveConf).load(DB_NAME, TABLE_NAME);
// add a column
icebergTable.updateSchema().addColumn("data", Types.LongType.get()).commit();
icebergTable = new HiveTables(hiveConf).load(DB_NAME, TABLE_NAME);
// Only 2 snapshotFile Should exist and no manifests should exist
Assert.assertEquals(2, metadataVersionFiles(TABLE_NAME).size());
Assert.assertEquals(0, manifestFiles(TABLE_NAME).size());
Assert.assertEquals(altered.asStruct(), icebergTable.schema().asStruct());
final Table table = metastoreClient.getTable(DB_NAME, TABLE_NAME);
final List<String> hiveColumns = table.getSd().getCols().stream().map(f -> f.getName()).collect(Collectors.toList());
final List<String> icebergColumns = altered.columns().stream().map(f -> f.name()).collect(Collectors.toList());
Assert.assertEquals(icebergColumns, hiveColumns);
}
@Test(expected = CommitFailedException.class)
public void testFailure() throws TException {
com.netflix.iceberg.Table icebergTable = new HiveTables(hiveConf).load(DB_NAME, TABLE_NAME);
final Table table = metastoreClient.getTable(DB_NAME, TABLE_NAME);
final String dummyLocation = "dummylocation";
table.getParameters().put(METADATA_LOCATION_PROP, dummyLocation);
metastoreClient.alter_table(DB_NAME, TABLE_NAME, table);
icebergTable.updateSchema()
.addColumn("data", Types.LongType.get())
.commit();
}
}
| 6,385 |
0 | Create_ds/iceberg/hive/src/test/java/com/netflix/iceberg | Create_ds/iceberg/hive/src/test/java/com/netflix/iceberg/hive/ScriptRunner.java | /*
*
* Copyright 2004 Clinton Begin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Slightly modified version of the com.ibatis.common.jdbc.ScriptRunner class
* from the iBATIS Apache project. Only removed dependency on Resource class
* and a constructor.
*/
package com.netflix.iceberg.hive;
import java.io.IOException;
import java.io.LineNumberReader;
import java.io.PrintWriter;
import java.io.Reader;
import java.sql.Connection;
import java.sql.Statement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.ResultSetMetaData;
/**
* Tool to run database scripts
*/
public class ScriptRunner {
private static final String DEFAULT_DELIMITER = ";";
private Connection connection;
private boolean stopOnError;
private boolean autoCommit;
private PrintWriter logWriter = new PrintWriter(System.out);
private PrintWriter errorLogWriter = new PrintWriter(System.err);
private String delimiter = DEFAULT_DELIMITER;
private boolean fullLineDelimiter = false;
/**
* Default constructor
*/
public ScriptRunner(Connection connection, boolean autoCommit,
boolean stopOnError) {
this.connection = connection;
this.autoCommit = autoCommit;
this.stopOnError = stopOnError;
}
public void setDelimiter(String delimiter, boolean fullLineDelimiter) {
this.delimiter = delimiter;
this.fullLineDelimiter = fullLineDelimiter;
}
/**
* Setter for logWriter property
*
* @param logWriter
* - the new value of the logWriter property
*/
public void setLogWriter(PrintWriter logWriter) {
this.logWriter = logWriter;
}
/**
* Setter for errorLogWriter property
*
* @param errorLogWriter
* - the new value of the errorLogWriter property
*/
public void setErrorLogWriter(PrintWriter errorLogWriter) {
this.errorLogWriter = errorLogWriter;
}
/**
* Runs an SQL script (read in using the Reader parameter)
*
* @param reader
* - the source of the script
*/
public void runScript(Reader reader) throws IOException, SQLException {
try {
boolean originalAutoCommit = connection.getAutoCommit();
try {
if (originalAutoCommit != this.autoCommit) {
connection.setAutoCommit(this.autoCommit);
}
runScript(connection, reader);
} finally {
connection.setAutoCommit(originalAutoCommit);
}
} catch (IOException e) {
throw e;
} catch (SQLException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException("Error running script. Cause: " + e, e);
}
}
/**
* Runs an SQL script (read in using the Reader parameter) using the
* connection passed in
*
* @param conn
* - the connection to use for the script
* @param reader
* - the source of the script
* @throws SQLException
* if any SQL errors occur
* @throws IOException
* if there is an error reading from the Reader
*/
private void runScript(Connection conn, Reader reader) throws IOException,
SQLException {
StringBuffer command = null;
try {
LineNumberReader lineReader = new LineNumberReader(reader);
String line = null;
while ((line = lineReader.readLine()) != null) {
if (command == null) {
command = new StringBuffer();
}
String trimmedLine = line.trim();
if (trimmedLine.startsWith("--")) {
println(trimmedLine);
} else if (trimmedLine.length() < 1
|| trimmedLine.startsWith("//")) {
// Do nothing
} else if (trimmedLine.length() < 1
|| trimmedLine.startsWith("--")) {
// Do nothing
} else if (!fullLineDelimiter
&& trimmedLine.endsWith(getDelimiter())
|| fullLineDelimiter
&& trimmedLine.equals(getDelimiter())) {
command.append(line.substring(0, line
.lastIndexOf(getDelimiter())));
command.append(" ");
Statement statement = conn.createStatement();
println(command);
boolean hasResults = false;
if (stopOnError) {
hasResults = statement.execute(command.toString());
} else {
try {
statement.execute(command.toString());
} catch (SQLException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
}
}
if (autoCommit && !conn.getAutoCommit()) {
conn.commit();
}
ResultSet rs = statement.getResultSet();
if (hasResults && rs != null) {
ResultSetMetaData md = rs.getMetaData();
int cols = md.getColumnCount();
for (int i = 0; i < cols; i++) {
String name = md.getColumnLabel(i);
print(name + "\t");
}
println("");
while (rs.next()) {
for (int i = 0; i < cols; i++) {
String value = rs.getString(i);
print(value + "\t");
}
println("");
}
}
command = null;
try {
statement.close();
} catch (Exception e) {
// Ignore to workaround a bug in Jakarta DBCP
}
Thread.yield();
} else {
command.append(line);
command.append(" ");
}
}
if (!autoCommit) {
conn.commit();
}
} catch (SQLException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
throw e;
} catch (IOException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
throw e;
} finally {
conn.rollback();
flush();
}
}
private String getDelimiter() {
return delimiter;
}
private void print(Object o) {
if (logWriter != null) {
System.out.print(o);
}
}
private void println(Object o) {
if (logWriter != null) {
logWriter.println(o);
}
}
private void printlnError(Object o) {
if (errorLogWriter != null) {
errorLogWriter.println(o);
}
}
private void flush() {
if (logWriter != null) {
logWriter.flush();
}
if (errorLogWriter != null) {
errorLogWriter.flush();
}
}
}
| 6,386 |
0 | Create_ds/iceberg/hive/src/main/java/com/netflix/iceberg | Create_ds/iceberg/hive/src/main/java/com/netflix/iceberg/hive/HiveTables.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hive;
import com.google.common.base.Splitter;
import com.netflix.iceberg.BaseMetastoreTableOperations;
import com.netflix.iceberg.BaseMetastoreTables;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.Table;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.transport.TSocket;
import org.apache.thrift.transport.TTransport;
import org.apache.thrift.transport.TTransportException;
import java.net.URI;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.CLIENT_SOCKET_TIMEOUT;
import static org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars.THRIFT_URIS;
public class HiveTables extends BaseMetastoreTables {
private static final Splitter DOT = Splitter.on('.').limit(2);
private Configuration conf;
public HiveTables(Configuration conf) {
super(conf);
this.conf = conf;
}
@Override
public Table create(Schema schema, String tableIdentifier) {
return create(schema, PartitionSpec.unpartitioned(), tableIdentifier);
}
@Override
public Table create(Schema schema, PartitionSpec spec, Map<String, String> properties, String tableIdentifier) {
List<String> parts = DOT.splitToList(tableIdentifier);
if (parts.size() == 2) {
return create(schema, spec, properties, parts.get(0), parts.get(1));
}
throw new UnsupportedOperationException("Could not parse table identifier: " + tableIdentifier);
}
@Override
public Table load(String tableIdentifier) {
List<String> parts = DOT.splitToList(tableIdentifier);
if (parts.size() == 2) {
return load(parts.get(0), parts.get(1));
}
throw new UnsupportedOperationException("Could not parse table identifier: " + tableIdentifier);
}
@Override
public BaseMetastoreTableOperations newTableOps(Configuration conf, String database, String table) {
return new HiveTableOperations(conf, getClient(), database, table);
}
private ThriftHiveMetastore.Client getClient() {
final URI metastoreUri = URI.create(MetastoreConf.getAsString(conf, THRIFT_URIS));
final int socketTimeOut = (int) MetastoreConf.getTimeVar(conf, CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
TTransport transport = new TSocket(metastoreUri.getHost(), metastoreUri.getPort(), socketTimeOut);
try {
transport.open();
} catch (TTransportException e) {
throw new RuntimeException("failed to open socket for " + metastoreUri + " with timeoutMillis " + socketTimeOut);
}
return new ThriftHiveMetastore.Client(new TBinaryProtocol(transport));
}
}
| 6,387 |
0 | Create_ds/iceberg/hive/src/main/java/com/netflix/iceberg | Create_ds/iceberg/hive/src/main/java/com/netflix/iceberg/hive/HiveTableOperations.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hive;
import com.google.common.collect.Lists;
import com.netflix.iceberg.BaseMetastoreTableOperations;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.TableMetadata;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.NoSuchTableException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.metastore.api.CheckLockRequest;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.LockComponent;
import org.apache.hadoop.hive.metastore.api.LockLevel;
import org.apache.hadoop.hive.metastore.api.LockRequest;
import org.apache.hadoop.hive.metastore.api.LockResponse;
import org.apache.hadoop.hive.metastore.api.LockState;
import org.apache.hadoop.hive.metastore.api.LockType;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.SerdeType;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
import org.apache.hadoop.hive.metastore.api.UnlockRequest;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import static java.lang.String.format;
/**
* TODO we should be able to extract some more commonalities to BaseMetastoreTableOperations to
* avoid code duplication between this class and Metacat Tables.
*/
public class HiveTableOperations extends BaseMetastoreTableOperations {
private static final Logger LOG = LoggerFactory.getLogger(HiveTableOperations.class);
private final ThriftHiveMetastore.Client metaStoreClient;
private final String database;
private final String tableName;
protected HiveTableOperations(Configuration conf, ThriftHiveMetastore.Client metaStoreClient, String database, String table) {
super(conf);
this.metaStoreClient = metaStoreClient;
this.database = database;
this.tableName = table;
}
@Override
public TableMetadata refresh() {
String metadataLocation = null;
try {
final Table table = metaStoreClient.get_table(database, tableName);
String tableType = table.getParameters().get(TABLE_TYPE_PROP);
if (tableType == null || !tableType.equalsIgnoreCase(ICEBERG_TABLE_TYPE_VALUE)) {
throw new IllegalArgumentException(format("Invalid tableName, not Iceberg: %s.%s", database, table));
}
metadataLocation = table.getParameters().get(METADATA_LOCATION_PROP);
if (metadataLocation == null) {
throw new IllegalArgumentException(format("%s.%s is missing %s property", database, tableName, METADATA_LOCATION_PROP));
}
} catch (NoSuchObjectException e) {
if (currentMetadataLocation() != null) {
throw new NoSuchTableException(format("No such table: %s.%s", database, tableName));
}
} catch (TException e) {
throw new RuntimeException(format("Failed to get table info from metastore %s.%s", database, tableName));
}
refreshFromMetadataLocation(metadataLocation);
return current();
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
// if the metadata is already out of date, reject it
if (base != current()) {
throw new CommitFailedException(format("stale table metadata for %s.%s", database, tableName));
}
// if the metadata is not changed, return early
if (base == metadata) {
LOG.info("Nothing to commit.");
return;
}
String newMetadataLocation = writeNewMetadata(metadata, currentVersion() + 1);
boolean threw = true;
Optional<Long> lockId = Optional.empty();
try {
lockId = Optional.of(acquireLock());
// TODO add lock heart beating for cases where default lock timeout is too low.
Table tbl;
if (base != null) {
tbl = metaStoreClient.get_table(database, tableName);
} else {
final long currentTimeMillis = System.currentTimeMillis();
tbl = new Table(tableName,
database,
System.getProperty("user.name"),
(int) currentTimeMillis / 1000,
(int) currentTimeMillis / 1000,
Integer.MAX_VALUE,
storageDescriptor(metadata.schema()),
Collections.emptyList(),
new HashMap<>(),
null,
null,
ICEBERG_TABLE_TYPE_VALUE);
}
tbl.setSd(storageDescriptor(metadata.schema())); // set to pickup any schema changes
final String metadataLocation = tbl.getParameters().get(METADATA_LOCATION_PROP);
if (!Objects.equals(currentMetadataLocation(), metadataLocation)) {
throw new CommitFailedException(format("metadataLocation = %s is not same as table metadataLocation %s for %s.%s",
currentMetadataLocation(), metadataLocation, database, tableName));
}
setParameters(newMetadataLocation, tbl);
if (base != null) {
metaStoreClient.alter_table(database, tableName, tbl);
} else {
metaStoreClient.create_table(tbl);
}
threw = false;
} catch (TException | UnknownHostException e) {
throw new RuntimeException(format("Metastore operation failed for %s.%s", database, tableName), e);
} finally {
if (threw) {
// if anything went wrong, clean up the uncommitted metadata file
io().deleteFile(newMetadataLocation);
}
unlock(lockId);
}
requestRefresh();
}
private void setParameters(String newMetadataLocation, Table tbl) {
Map<String, String> parameters = tbl.getParameters();
if (parameters == null) {
parameters = new HashMap<>();
}
parameters.put(TABLE_TYPE_PROP, ICEBERG_TABLE_TYPE_VALUE.toUpperCase(Locale.ENGLISH));
parameters.put(METADATA_LOCATION_PROP, newMetadataLocation);
if (currentMetadataLocation() != null && !currentMetadataLocation().isEmpty()) {
parameters.put(PREVIOUS_METADATA_LOCATION_PROP, currentMetadataLocation());
}
tbl.setParameters(parameters);
}
private StorageDescriptor storageDescriptor(Schema schema) {
final StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(columns(schema));
storageDescriptor.setLocation(hiveTableLocation());
storageDescriptor.setOutputFormat("org.apache.hadoop.mapred.FileInputFormat");
storageDescriptor.setInputFormat("org.apache.hadoop.mapred.FileOutputFormat");
SerDeInfo serDeInfo = new SerDeInfo();
serDeInfo.setSerdeType(SerdeType.HIVE);
serDeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
storageDescriptor.setSerdeInfo(serDeInfo);
return storageDescriptor;
}
private final List<FieldSchema> columns(Schema schema) {
return schema.columns().stream().map(col -> new FieldSchema(col.name(), HiveTypeConverter.convert(col.type()), "")).collect(Collectors.toList());
}
private long acquireLock() throws UnknownHostException, TException {
final LockComponent lockComponent = new LockComponent(LockType.EXCLUSIVE, LockLevel.TABLE, database);
lockComponent.setTablename(tableName);
final LockRequest lockRequest = new LockRequest(Lists.newArrayList(lockComponent),
System.getProperty("user.name"),
InetAddress.getLocalHost().getHostName());
LockResponse lockResponse = metaStoreClient.lock(lockRequest);
LockState state = lockResponse.getState();
long lockId = lockResponse.getLockid();
//TODO add timeout
while (state.equals(LockState.WAITING)) {
lockResponse = metaStoreClient.check_lock(new CheckLockRequest(lockResponse.getLockid()));
state = lockResponse.getState();
}
if (!state.equals(LockState.ACQUIRED)) {
throw new CommitFailedException(format("Could not acquire the lock on %s.%s, " +
"lock request ended in state %s", database, tableName, state));
}
return lockId;
}
private void unlock(Optional<Long> lockId) {
if (lockId.isPresent()) {
try {
metaStoreClient.unlock(new UnlockRequest(lockId.get()));
} catch (TException e) {
throw new RuntimeException(format("Failed to unlock %s.%s", database, tableName) , e);
}
}
}
}
| 6,388 |
0 | Create_ds/iceberg/hive/src/main/java/com/netflix/iceberg | Create_ds/iceberg/hive/src/main/java/com/netflix/iceberg/hive/HiveTypeConverter.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hive;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import java.util.stream.Collectors;
import static java.lang.String.format;
public final class HiveTypeConverter {
private HiveTypeConverter() {
}
public static String convert(Type type) {
switch (type.typeId()) {
case BOOLEAN:
return "boolean";
case INTEGER:
return "int";
case LONG:
return "bigint";
case FLOAT:
return "float";
case DOUBLE:
return "double";
case DATE:
return "date";
case TIME:
throw new UnsupportedOperationException("Hive does not support time fields");
case TIMESTAMP:
return "timestamp";
case STRING:
case UUID:
return "string";
case FIXED:
return "binary";
case BINARY:
return "binary";
case DECIMAL:
final Types.DecimalType decimalType = (Types.DecimalType) type;
return format("decimal(%s,%s)", decimalType.precision(), decimalType.scale()); //TODO may be just decimal?
case STRUCT:
final Types.StructType structType = type.asStructType();
final String nameToType = structType.fields().stream().map(
f -> format("%s:%s", f.name(), convert(f.type()))
).collect(Collectors.joining(","));
return format("struct<%s>", nameToType);
case LIST:
final Types.ListType listType = type.asListType();
return format("array<%s>", convert(listType.elementType()));
case MAP:
final Types.MapType mapType = type.asMapType();
return format("map<%s,%s>", convert(mapType.keyType()), convert(mapType.valueType()));
default:
throw new UnsupportedOperationException(type +" is not supported");
}
}
}
| 6,389 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/TestHelpers.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.expressions.BoundPredicate;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.ExpressionVisitors;
import com.netflix.iceberg.expressions.UnboundPredicate;
import org.junit.Assert;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
public class TestHelpers {
public static <T> T assertAndUnwrap(Expression expr, Class<T> expected) {
Assert.assertTrue("Expression should have expected type: " + expected,
expected.isInstance(expr));
return expected.cast(expr);
}
@SuppressWarnings("unchecked")
public static <T> BoundPredicate<T> assertAndUnwrap(Expression expr) {
Assert.assertTrue("Expression should be a bound predicate: " + expr,
expr instanceof BoundPredicate);
return (BoundPredicate<T>) expr;
}
@SuppressWarnings("unchecked")
public static <T> UnboundPredicate<T> assertAndUnwrapUnbound(Expression expr) {
Assert.assertTrue("Expression should be an unbound predicate: " + expr,
expr instanceof UnboundPredicate);
return (UnboundPredicate<T>) expr;
}
public static void assertAllReferencesBound(String message, Expression expr) {
ExpressionVisitors.visit(expr, new CheckReferencesBound(message));
}
@SuppressWarnings("unchecked")
public static <T> T roundTripSerialize(T type) throws IOException, ClassNotFoundException {
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
try (ObjectOutputStream out = new ObjectOutputStream(bytes)) {
out.writeObject(type);
}
try (ObjectInputStream in = new ObjectInputStream(
new ByteArrayInputStream(bytes.toByteArray()))) {
return (T) in.readObject();
}
}
private static class CheckReferencesBound extends ExpressionVisitors.ExpressionVisitor<Void> {
private final String message;
public CheckReferencesBound(String message) {
this.message = message;
}
@Override
public <T> Void predicate(UnboundPredicate<T> pred) {
Assert.fail(message + ": Found unbound predicate: " + pred);
return null;
}
}
/**
* Implements {@link StructLike#get} for passing data in tests.
*/
public static class Row implements StructLike {
public static Row of(Object... values) {
return new Row(values);
}
private final Object[] values;
private Row(Object... values) {
this.values = values;
}
@Override
public int size() {
return values.length;
}
@Override
@SuppressWarnings("unchecked")
public <T> T get(int pos, Class<T> javaClass) {
return javaClass.cast(values[pos]);
}
@Override
public <T> void set(int pos, T value) {
throw new UnsupportedOperationException("Setting values is not supported");
}
}
/**
* A convenience method to avoid a large number of @Test(expected=...) tests
* @param message A String message to describe this assertion
* @param expected An Exception class that the Runnable should throw
* @param containedInMessage A String that should be contained by the thrown
* exception's message
* @param callable A Callable that is expected to throw the exception
*/
public static void assertThrows(String message,
Class<? extends Exception> expected,
String containedInMessage,
Callable callable) {
try {
callable.call();
Assert.fail("No exception was thrown (" + message + "), expected: " +
expected.getName());
} catch (Exception actual) {
handleException(message, expected, containedInMessage, actual);
}
}
/**
* A convenience method to avoid a large number of @Test(expected=...) tests
* @param message A String message to describe this assertion
* @param expected An Exception class that the Runnable should throw
* @param containedInMessage A String that should be contained by the thrown
* exception's message
* @param runnable A Runnable that is expected to throw the runtime exception
*/
public static void assertThrows(String message,
Class<? extends Exception> expected,
String containedInMessage,
Runnable runnable) {
try {
runnable.run();
Assert.fail("No exception was thrown (" + message + "), expected: " +
expected.getName());
} catch (Exception actual) {
handleException(message, expected, containedInMessage, actual);
}
}
private static void handleException(String message,
Class<? extends Exception> expected,
String containedInMessage,
Exception actual) {
try {
Assert.assertEquals(message, expected, actual.getClass());
Assert.assertTrue(
"Expected exception message (" + containedInMessage + ") missing: " +
actual.getMessage(),
actual.getMessage().contains(containedInMessage)
);
} catch (AssertionError e) {
e.addSuppressed(actual);
throw e;
}
}
public static class TestFieldSummary implements ManifestFile.PartitionFieldSummary {
private final boolean containsNull;
private final ByteBuffer lowerBound;
private final ByteBuffer upperBound;
public TestFieldSummary(boolean containsNull, ByteBuffer lowerBound, ByteBuffer upperBound) {
this.containsNull = containsNull;
this.lowerBound = lowerBound;
this.upperBound = upperBound;
}
@Override
public boolean containsNull() {
return containsNull;
}
@Override
public ByteBuffer lowerBound() {
return lowerBound;
}
@Override
public ByteBuffer upperBound() {
return upperBound;
}
@Override
public ManifestFile.PartitionFieldSummary copy() {
return this;
}
}
public static class TestManifestFile implements ManifestFile {
private final String path;
private final long length;
private final int specId;
private final Long snapshotId;
private final Integer addedFiles;
private final Integer existingFiles;
private final Integer deletedFiles;
private final List<PartitionFieldSummary> partitions;
public TestManifestFile(String path, long length, int specId, Long snapshotId,
Integer addedFiles, Integer existingFiles, Integer deletedFiles,
List<PartitionFieldSummary> partitions) {
this.path = path;
this.length = length;
this.specId = specId;
this.snapshotId = snapshotId;
this.addedFiles = addedFiles;
this.existingFiles = existingFiles;
this.deletedFiles = deletedFiles;
this.partitions = partitions;
}
@Override
public String path() {
return path;
}
@Override
public long length() {
return length;
}
@Override
public int partitionSpecId() {
return specId;
}
@Override
public Long snapshotId() {
return snapshotId;
}
@Override
public Integer addedFilesCount() {
return addedFiles;
}
@Override
public Integer existingFilesCount() {
return existingFiles;
}
@Override
public Integer deletedFilesCount() {
return deletedFiles;
}
@Override
public List<PartitionFieldSummary> partitions() {
return partitions;
}
@Override
public ManifestFile copy() {
return this;
}
}
public static class TestDataFile implements DataFile {
private final String path;
private final StructLike partition;
private final long recordCount;
private final Map<Integer, Long> valueCounts;
private final Map<Integer, Long> nullValueCounts;
private final Map<Integer, ByteBuffer> lowerBounds;
private final Map<Integer, ByteBuffer> upperBounds;
public TestDataFile(String path, StructLike partition, long recordCount) {
this(path, partition, recordCount, null, null, null, null);
}
public TestDataFile(String path, StructLike partition, long recordCount,
Map<Integer, Long> valueCounts,
Map<Integer, Long> nullValueCounts,
Map<Integer, ByteBuffer> lowerBounds,
Map<Integer, ByteBuffer> upperBounds) {
this.path = path;
this.partition = partition;
this.recordCount = recordCount;
this.valueCounts = valueCounts;
this.nullValueCounts = nullValueCounts;
this.lowerBounds = lowerBounds;
this.upperBounds = upperBounds;
}
@Override
public CharSequence path() {
return path;
}
@Override
public FileFormat format() {
return FileFormat.fromFileName(path());
}
@Override
public StructLike partition() {
return partition;
}
@Override
public long recordCount() {
return recordCount;
}
@Override
public long fileSizeInBytes() {
return 0;
}
@Override
public long blockSizeInBytes() {
return 0;
}
@Override
public Integer fileOrdinal() {
return null;
}
@Override
public List<Integer> sortColumns() {
return null;
}
@Override
public Map<Integer, Long> columnSizes() {
return null;
}
@Override
public Map<Integer, Long> valueCounts() {
return valueCounts;
}
@Override
public Map<Integer, Long> nullValueCounts() {
return nullValueCounts;
}
@Override
public Map<Integer, ByteBuffer> lowerBounds() {
return lowerBounds;
}
@Override
public Map<Integer, ByteBuffer> upperBounds() {
return upperBounds;
}
@Override
public DataFile copy() {
return this;
}
}
}
| 6,390 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/TestPartitionPaths.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.TestHelpers.Row;
import com.netflix.iceberg.expressions.Literal;
import com.netflix.iceberg.transforms.Transform;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
public class TestPartitionPaths {
private static final Schema SCHEMA = new Schema(
Types.NestedField.required(1, "id", Types.IntegerType.get()),
Types.NestedField.optional(2, "data", Types.StringType.get()),
Types.NestedField.optional(3, "ts", Types.TimestampType.withoutZone())
);
@Test
@SuppressWarnings("unchecked")
public void testPartitionPath() {
PartitionSpec spec = PartitionSpec.builderFor(SCHEMA)
.hour("ts")
.bucket("id", 10)
.build();
Transform hour = spec.getFieldBySourceId(3).transform();
Transform bucket = spec.getFieldBySourceId(1).transform();
Literal<Long> ts = Literal.of("2017-12-01T10:12:55.038194").to(Types.TimestampType.withoutZone());
Object tsHour = hour.apply(ts.value());
Object idBucket = bucket.apply(1);
Row partition = Row.of(tsHour, idBucket);
Assert.assertEquals("Should produce expected partition key",
"ts_hour=2017-12-01-10/id_bucket=" + idBucket, spec.partitionToPath(partition));
}
@Test
public void testEscapedStrings() {
PartitionSpec spec = PartitionSpec.builderFor(SCHEMA)
.identity("data")
.truncate("data", 10)
.build();
Assert.assertEquals("Should escape / as %2F",
"data=a%2Fb%2Fc%2Fd/data_trunc=a%2Fb%2Fc%2Fd",
spec.partitionToPath(Row.of("a/b/c/d", "a/b/c/d")));
}
}
| 6,391 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/types/TestReadabilityChecks.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.netflix.iceberg.Schema;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestReadabilityChecks {
private static final Type.PrimitiveType[] PRIMITIVES = new Type.PrimitiveType[] {
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withoutZone(),
Types.TimestampType.withZone(),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(3),
Types.FixedType.ofLength(4),
Types.BinaryType.get(),
Types.DecimalType.of(9, 2),
Types.DecimalType.of(11, 2),
Types.DecimalType.of(9, 3)
};
@Test
public void testPrimitiveTypes() {
for (Type.PrimitiveType from : PRIMITIVES) {
Schema fromSchema = new Schema(required(1, "from_field", from));
for (Type.PrimitiveType to : PRIMITIVES) {
List<String> errors = CheckCompatibility.writeCompatibilityErrors(
new Schema(required(1, "to_field", to)), fromSchema);
if (TypeUtil.isPromotionAllowed(from, to)) {
Assert.assertEquals("Should produce 0 error messages", 0, errors.size());
} else {
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that promotion is not allowed",
errors.get(0).contains("cannot be promoted to"));
}
}
{
Schema structSchema = new Schema(required(1, "struct_field", Types.StructType.of(
required(2, "from", from))
));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(structSchema, fromSchema);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that primitive to struct is not allowed",
errors.get(0).contains("cannot be read as a struct"));
}
{
Schema listSchema = new Schema(required(1, "list_field", Types.ListType.ofRequired(2, from)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(listSchema, fromSchema);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that primitive to list is not allowed",
errors.get(0).contains("cannot be read as a list"));
}
{
Schema mapSchema = new Schema(required(1, "map_field",
Types.MapType.ofRequired(2, 3, Types.StringType.get(), from)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(mapSchema, fromSchema);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that primitive to map is not allowed",
errors.get(0).contains("cannot be read as a map"));
}
{
Schema mapSchema = new Schema(required(1, "map_field",
Types.MapType.ofRequired(2, 3, from, Types.StringType.get())));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(mapSchema, fromSchema);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that primitive to map is not allowed",
errors.get(0).contains("cannot be read as a map"));
}
}
}
@Test
public void testRequiredSchemaField() {
Schema write = new Schema(optional(1, "from_field", Types.IntegerType.get()));
Schema read = new Schema(required(1, "to_field", Types.IntegerType.get()));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that a required column is optional",
errors.get(0).contains("should be required, but is optional"));
}
@Test
public void testMissingSchemaField() {
Schema write = new Schema(required(0, "other_field", Types.IntegerType.get()));
Schema read = new Schema(required(1, "to_field", Types.IntegerType.get()));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that a required column is missing",
errors.get(0).contains("is required, but is missing"));
}
@Test
public void testRequiredStructField() {
Schema write = new Schema(required(0, "nested", Types.StructType.of(
optional(1, "from_field", Types.IntegerType.get())
)));
Schema read = new Schema(required(0, "nested", Types.StructType.of(
required(1, "to_field", Types.IntegerType.get())
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that a required field is optional",
errors.get(0).contains("should be required, but is optional"));
}
@Test
public void testMissingRequiredStructField() {
Schema write = new Schema(required(0, "nested", Types.StructType.of(
optional(2, "from_field", Types.IntegerType.get())
)));
Schema read = new Schema(required(0, "nested", Types.StructType.of(
required(1, "to_field", Types.IntegerType.get())
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that a required field is missing",
errors.get(0).contains("is required, but is missing"));
}
@Test
public void testMissingOptionalStructField() {
Schema write = new Schema(required(0, "nested", Types.StructType.of(
required(2, "from_field", Types.IntegerType.get())
)));
Schema read = new Schema(required(0, "nested", Types.StructType.of(
optional(1, "to_field", Types.IntegerType.get())
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce no error messages", 0, errors.size());
}
@Test
public void testIncompatibleStructField() {
Schema write = new Schema(required(0, "nested", Types.StructType.of(
required(1, "from_field", Types.IntegerType.get())
)));
Schema read = new Schema(required(0, "nested", Types.StructType.of(
required(1, "to_field", Types.FloatType.get())
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("cannot be promoted to float"));
}
@Test
public void testIncompatibleStructAndPrimitive() {
Schema write = new Schema(required(0, "nested", Types.StructType.of(
required(1, "from_field", Types.StringType.get())
)));
Schema read = new Schema(required(0, "nested", Types.StringType.get()));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("struct cannot be read as a string"));
}
@Test
public void testMultipleErrors() {
// required field is optional and cannot be promoted to the read type
Schema write = new Schema(required(0, "nested", Types.StructType.of(
optional(1, "from_field", Types.IntegerType.get())
)));
Schema read = new Schema(required(0, "nested", Types.StructType.of(
required(1, "to_field", Types.FloatType.get())
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 2, errors.size());
Assert.assertTrue("Should complain that a required field is optional",
errors.get(0).contains("should be required, but is optional"));
Assert.assertTrue("Should complain about incompatible types",
errors.get(1).contains("cannot be promoted to float"));
}
@Test
public void testRequiredMapValue() {
Schema write = new Schema(required(0, "map_field", Types.MapType.ofOptional(
1, 2, Types.StringType.get(), Types.IntegerType.get()
)));
Schema read = new Schema(required(0, "map_field", Types.MapType.ofRequired(
1, 2, Types.StringType.get(), Types.IntegerType.get()
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that values are optional",
errors.get(0).contains("values should be required, but are optional"));
}
@Test
public void testIncompatibleMapKey() {
Schema write = new Schema(required(0, "map_field", Types.MapType.ofOptional(
1, 2, Types.IntegerType.get(), Types.StringType.get()
)));
Schema read = new Schema(required(0, "map_field", Types.MapType.ofOptional(
1, 2, Types.DoubleType.get(), Types.StringType.get()
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("cannot be promoted to double"));
}
@Test
public void testIncompatibleMapValue() {
Schema write = new Schema(required(0, "map_field", Types.MapType.ofOptional(
1, 2, Types.StringType.get(), Types.IntegerType.get()
)));
Schema read = new Schema(required(0, "map_field", Types.MapType.ofOptional(
1, 2, Types.StringType.get(), Types.DoubleType.get()
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("cannot be promoted to double"));
}
@Test
public void testIncompatibleMapAndPrimitive() {
Schema write = new Schema(required(0, "map_field", Types.MapType.ofOptional(
1, 2, Types.StringType.get(), Types.IntegerType.get()
)));
Schema read = new Schema(required(0, "map_field", Types.StringType.get()));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("map cannot be read as a string"));
}
@Test
public void testRequiredListElement() {
Schema write = new Schema(required(0, "list_field", Types.ListType.ofOptional(
1, Types.IntegerType.get()
)));
Schema read = new Schema(required(0, "list_field", Types.ListType.ofRequired(
1, Types.IntegerType.get()
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain that elements are optional",
errors.get(0).contains("elements should be required, but are optional"));
}
@Test
public void testIncompatibleListElement() {
Schema write = new Schema(required(0, "list_field", Types.ListType.ofOptional(
1, Types.IntegerType.get()
)));
Schema read = new Schema(required(0, "list_field", Types.ListType.ofOptional(
1, Types.StringType.get()
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("cannot be promoted to string"));
}
@Test
public void testIncompatibleListAndPrimitive() {
Schema write = new Schema(required(0, "list_field", Types.ListType.ofOptional(
1, Types.IntegerType.get()
)));
Schema read = new Schema(required(0, "list_field", Types.StringType.get()));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
Assert.assertTrue("Should complain about incompatible types",
errors.get(0).contains("list cannot be read as a string"));
}
@Test
public void testStructWriteReordering() {
// writes should not reorder fields
Schema read = new Schema(required(0, "nested", Types.StructType.of(
required(1, "field_a", Types.IntegerType.get()),
required(2, "field_b", Types.IntegerType.get())
)));
Schema write = new Schema(required(0, "nested", Types.StructType.of(
required(2, "field_b", Types.IntegerType.get()),
required(1, "field_a", Types.IntegerType.get())
)));
List<String> errors = CheckCompatibility.writeCompatibilityErrors(read, write);
Assert.assertEquals("Should produce 1 error message", 1, errors.size());
System.err.println(errors);
Assert.assertTrue("Should complain about field_b before field_a",
errors.get(0).contains("field_b is out of order, before field_a"));
}
@Test
public void testStructReadReordering() {
// reads should allow reordering
Schema read = new Schema(required(0, "nested", Types.StructType.of(
required(1, "field_a", Types.IntegerType.get()),
required(2, "field_b", Types.IntegerType.get())
)));
Schema write = new Schema(required(0, "nested", Types.StructType.of(
required(2, "field_b", Types.IntegerType.get()),
required(1, "field_a", Types.IntegerType.get())
)));
List<String> errors = CheckCompatibility.readCompatibilityErrors(read, write);
Assert.assertEquals("Should produce no error messages", 0, errors.size());
}
}
| 6,392 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/types/TestComparableComparator.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.netflix.iceberg.expressions.Literal;
import org.junit.Assert;
import org.junit.Test;
import java.util.Comparator;
/**
* This tests the Comparator returned by ComparableLiteral, which is used for most types.
* <p>
* The tests use assertTrue instead of assertEquals because the return value is not necessarily one
* of {-1, 0, 1}. It is also more clear to compare the return value to 0 because the same operation
* can be used: a < b is equivalent to compare(a, b) < 0.
*/
public class TestComparableComparator {
@Test
public void testNaturalOrder() {
Comparator<Long> cmp = Literal.of(34L).comparator();
Assert.assertTrue("Should use the natural order for non-null values",
cmp.compare(33L, 34L) < 0);
Assert.assertTrue("Should use signed ordering",
cmp.compare(33L, -34L) > 0);
}
@Test
public void testNullHandling() {
Comparator<Long> cmp = Literal.of(34L).comparator();
Assert.assertTrue("null comes before non-null", cmp.compare(null, 34L) < 0);
Assert.assertTrue("null comes before non-null", cmp.compare(34L, null) > 0);
Assert.assertEquals("null equals null", 0, cmp.compare(null, null));
}
}
| 6,393 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/types/TestSerializableTypes.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.TestHelpers;
import org.junit.Assert;
import org.junit.Test;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestSerializableTypes {
@Test
public void testIdentityTypes() throws Exception {
// these types make a strong guarantee than equality, instances are identical
Type[] identityPrimitives = new Type[] {
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withoutZone(),
Types.TimestampType.withZone(),
Types.StringType.get(),
Types.UUIDType.get(),
Types.BinaryType.get()
};
for (Type type : identityPrimitives) {
Assert.assertSame("Serialization result should be identical to starting type",
type, TestHelpers.roundTripSerialize(type));
}
}
@Test
public void testEqualTypes() throws Exception {
Type[] equalityPrimitives = new Type[] {
Types.DecimalType.of(9, 3),
Types.DecimalType.of(11, 0),
Types.FixedType.ofLength(4),
Types.FixedType.ofLength(34)
};
for (Type type : equalityPrimitives) {
Assert.assertEquals("Serialization result should be equal to starting type",
type, TestHelpers.roundTripSerialize(type));
}
}
@Test
public void testStructs() throws Exception {
Types.StructType struct = Types.StructType.of(
Types.NestedField.required(34, "Name!", Types.StringType.get()),
Types.NestedField.optional(35, "col", Types.DecimalType.of(38, 2)));
Type copy = TestHelpers.roundTripSerialize(struct);
Assert.assertEquals("Struct serialization should be equal to starting type", struct, copy);
Type stringType = copy.asNestedType().asStructType().fieldType("Name!");
Assert.assertSame("Struct serialization should preserve identity type",
Types.StringType.get(), stringType);
Type decimalType = copy.asNestedType().asStructType().field(35).type();
Assert.assertEquals("Struct serialization should support id lookup",
Types.DecimalType.of(38, 2), decimalType);
}
@Test
public void testMaps() throws Exception {
Type[] maps = new Type[] {
Types.MapType.ofOptional(1, 2, Types.StringType.get(), Types.LongType.get()),
Types.MapType.ofRequired(4, 5, Types.StringType.get(), Types.LongType.get())
};
for (Type map : maps) {
Type copy = TestHelpers.roundTripSerialize(map);
Assert.assertEquals("Map serialization should be equal to starting type", map, copy);
Assert.assertSame("Map serialization should preserve identity type",
Types.LongType.get(), map.asNestedType().asMapType().valueType());
}
}
@Test
public void testLists() throws Exception {
Type[] maps = new Type[] {
Types.ListType.ofOptional(2, Types.DoubleType.get()),
Types.ListType.ofRequired(5, Types.DoubleType.get())
};
for (Type list : maps) {
Type copy = TestHelpers.roundTripSerialize(list);
Assert.assertEquals("List serialization should be equal to starting type", list, copy);
Assert.assertSame("List serialization should preserve identity type",
Types.DoubleType.get(), list.asNestedType().asListType().elementType());
}
}
@Test
public void testSchema() throws Exception {
Schema schema = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "data", Types.StringType.get()),
optional(3, "preferences", Types.StructType.of(
required(8, "feature1", Types.BooleanType.get()),
optional(9, "feature2", Types.BooleanType.get())
)),
required(4, "locations", Types.MapType.ofRequired(10, 11,
Types.StringType.get(),
Types.StructType.of(
required(12, "lat", Types.FloatType.get()),
required(13, "long", Types.FloatType.get())
))),
optional(5, "points", Types.ListType.ofOptional(14,
Types.StructType.of(
required(15, "x", Types.LongType.get()),
required(16, "y", Types.LongType.get())
))),
required(6, "doubles", Types.ListType.ofRequired(17,
Types.DoubleType.get()
)),
optional(7, "properties", Types.MapType.ofOptional(18, 19,
Types.StringType.get(),
Types.StringType.get()
)),
required(8, "complex_key_map", Types.MapType.ofOptional(20, 21,
Types.StructType.of(
required(22, "x", Types.LongType.get()),
optional(23, "y", Types.LongType.get())),
Types.StringType.get()))
);
Assert.assertEquals("Schema serialziation should be equal to starting schema",
schema.asStruct(), TestHelpers.roundTripSerialize(schema).asStruct());
}
}
| 6,394 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/types/TestCharSeqComparator.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.netflix.iceberg.expressions.Literal;
import org.apache.avro.util.Utf8;
import org.junit.Assert;
import org.junit.Test;
import java.util.Comparator;
/**
* Tests the comparator returned by CharSequence literals.
* <p>
* The tests use assertTrue instead of assertEquals because the return value is not necessarily one
* of {-1, 0, 1}. It is also more clear to compare the return value to 0 because the same operation
* can be used: a < b is equivalent to compare(a, b) < 0.
*/
public class TestCharSeqComparator {
@Test
public void testStringAndUtf8() {
String s1 = "abc";
Utf8 s2 = new Utf8("abc");
Comparator<CharSequence> stringComp = Literal.of(s1).comparator();
Assert.assertEquals("Should consider String and Utf8 equal",
0, stringComp.compare(s1, s2));
Comparator<CharSequence> utf8Comp = Literal.of(s2).comparator();
Assert.assertEquals("Should consider String and Utf8 equal",
0, utf8Comp.compare(s1, s2));
}
@Test
public void testSeqLength() {
String s1 = "abc";
String s2 = "abcd";
Comparator<CharSequence> cmp = Literal.of(s1).comparator();
// Sanity check that String.compareTo gives the same result
Assert.assertTrue("When one string is a substring of the other, the longer is greater",
s1.compareTo(s2) < 0);
Assert.assertTrue("When one string is a substring of the other, the longer is greater",
s2.compareTo(s1) > 0);
// Test the comparator
Assert.assertTrue("When one string is a substring of the other, the longer is greater",
cmp.compare(s1, s2) < 0);
Assert.assertTrue("When one string is a substring of the other, the longer is greater",
cmp.compare(s2, s1) > 0);
}
@Test
public void testCharOrderBeforeLength() {
// abcd < adc even though abcd is longer
String s1 = "adc";
String s2 = "abcd";
Comparator<CharSequence> cmp = Literal.of(s1).comparator();
// Sanity check that String.compareTo gives the same result
Assert.assertTrue("First difference takes precedence over length",
s1.compareTo(s2) > 0);
Assert.assertTrue("First difference takes precedence over length",
s2.compareTo(s1) < 0);
// Test the comparator
Assert.assertTrue("First difference takes precedence over length",
cmp.compare(s1, s2) > 0);
Assert.assertTrue("First difference takes precedence over length",
cmp.compare(s2, s1) < 0);
}
@Test
public void testNullHandling() {
String s1 = "abc";
Comparator<CharSequence> cmp = Literal.of(s1).comparator();
Assert.assertTrue("null comes before non-null", cmp.compare(null, s1) < 0);
Assert.assertTrue("null comes before non-null", cmp.compare(s1, null) > 0);
Assert.assertEquals("null equals null", 0, cmp.compare(null, null));
}
}
| 6,395 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/types/TestBinaryComparator.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.types;
import com.netflix.iceberg.expressions.Literal;
import org.junit.Assert;
import org.junit.Test;
import java.nio.ByteBuffer;
import java.util.Comparator;
/**
* Tests the comparator returned by binary and fixed literals.
* <p>
* The tests use assertTrue instead of assertEquals because the return value is not necessarily one
* of {-1, 0, 1}. It is also more clear to compare the return value to 0 because the same operation
* can be used: a < b is equivalent to compare(a, b) < 0.
*/
public class TestBinaryComparator {
@Test
public void testBinaryUnsignedComparator() {
// b1 < b2 because comparison is unsigned, and -1 has msb set
ByteBuffer b1 = ByteBuffer.wrap(new byte[] { 1, 1, 2 });
ByteBuffer b2 = ByteBuffer.wrap(new byte[] { 1, -1, 2 });
Comparator<ByteBuffer> cmp = Literal.of(b1).comparator();
Assert.assertTrue("Negative bytes should sort after positive bytes",
cmp.compare(b1, b2) < 0);
}
@Test
public void testFixedUnsignedComparator() {
// b1 < b2 because comparison is unsigned, and -1 has msb set
ByteBuffer b1 = ByteBuffer.wrap(new byte[] { 1, 1, 2 });
ByteBuffer b2 = ByteBuffer.wrap(new byte[] { 1, -1, 2 });
Literal<ByteBuffer> fixedLit = Literal.of(b1).to(Types.FixedType.ofLength(3));
Comparator<ByteBuffer> cmp = fixedLit.comparator();
Assert.assertTrue("Negative bytes should sort after positive bytes",
cmp.compare(b1, b2) < 0);
}
@Test
public void testNullHandling() {
ByteBuffer buf = ByteBuffer.allocate(0);
Comparator<ByteBuffer> cmp = Literal.of(buf).comparator();
Assert.assertTrue("null comes before non-null", cmp.compare(null, buf) < 0);
Assert.assertTrue("null comes before non-null", cmp.compare(buf, null) > 0);
Assert.assertEquals("null equals null", 0, cmp.compare(null, null));
}
}
| 6,396 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestBucketing.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.transforms;
import com.google.common.base.Charsets;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hashing;
import com.netflix.iceberg.expressions.Literal;
import com.netflix.iceberg.types.Types;
import org.apache.avro.util.Utf8;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Random;
import java.util.UUID;
public class TestBucketing {
private static final HashFunction MURMUR3 = Hashing.murmur3_32();
private static Constructor<UUID> uuidBytesConstructor;
@BeforeClass
public static void getUUIDConstrutor() {
try {
uuidBytesConstructor = UUID.class.getDeclaredConstructor(byte[].class);
uuidBytesConstructor.setAccessible(true);
} catch (NoSuchMethodException e) {
throw new RuntimeException(e);
}
}
private Random testRandom = null;
@Before
public void initRandom() {
// reinitialize random for each test to avoid dependence on run order
this.testRandom = new Random(314358);
}
@Test
public void testSpecValues() {
Assert.assertEquals("Spec example: hash(true) = 1392991556",
1392991556, Bucket.<Integer>get(Types.IntegerType.get(), 100).hash(1));
Assert.assertEquals("Spec example: hash(34) = 2017239379",
2017239379, Bucket.<Integer>get(Types.IntegerType.get(), 100).hash(34));
Assert.assertEquals("Spec example: hash(34L) = 2017239379",
2017239379, Bucket.<Long>get(Types.LongType.get(), 100).hash(34L));
Assert.assertEquals("Spec example: hash(17.11F) = -142385009",
-142385009, new Bucket.BucketFloat(100).hash(1.0F));
Assert.assertEquals("Spec example: hash(17.11D) = -142385009",
-142385009, new Bucket.BucketDouble(100).hash(1.0D));
Assert.assertEquals("Spec example: hash(decimal2(14.20)) = -500754589",
-500754589,
Bucket.<BigDecimal>get(Types.DecimalType.of(9,2), 100).hash(new BigDecimal("14.20")));
Assert.assertEquals("Spec example: hash(decimal2(14.20)) = -500754589",
-500754589,
Bucket.<BigDecimal>get(Types.DecimalType.of(9,2), 100).hash(new BigDecimal("14.20")));
Literal<Integer> date = Literal.of("2017-11-16").to(Types.DateType.get());
Assert.assertEquals("Spec example: hash(2017-11-16) = -653330422",
-653330422,
Bucket.<Integer>get(Types.DateType.get(), 100).hash(date.value()));
Literal<Long> timeValue = Literal.of("22:31:08").to(Types.TimeType.get());
Assert.assertEquals("Spec example: hash(22:31:08) = -662762989",
-662762989,
Bucket.<Long>get(Types.TimeType.get(), 100).hash(timeValue.value()));
Literal<Long> timestampVal = Literal.of("2017-11-16T22:31:08")
.to(Types.TimestampType.withoutZone());
Assert.assertEquals("Spec example: hash(2017-11-16T22:31:08) = -2047944441",
-2047944441,
Bucket.<Long>get(Types.TimestampType.withoutZone(), 100).hash(timestampVal.value()));
Literal<Long> timestamptzVal = Literal.of("2017-11-16T14:31:08-08:00")
.to(Types.TimestampType.withZone());
Assert.assertEquals("Spec example: hash(2017-11-16T14:31:08-08:00) = -2047944441",
-2047944441,
Bucket.<Long>get(Types.TimestampType.withZone(), 100).hash(timestamptzVal.value()));
Assert.assertEquals("Spec example: hash(\"iceberg\") = 1210000089",
1210000089, Bucket.<String>get(Types.StringType.get(), 100).hash("iceberg"));
Assert.assertEquals("Spec example: hash(\"iceberg\") = 1210000089",
1210000089, Bucket.<Utf8>get(Types.StringType.get(), 100).hash(new Utf8("iceberg")));
Literal<UUID> uuid = Literal.of("f79c3e09-677c-4bbd-a479-3f349cb785e7")
.to(Types.UUIDType.get());
Assert.assertEquals("Spec example: hash(f79c3e09-677c-4bbd-a479-3f349cb785e7) = 1488055340",
1488055340, Bucket.<UUID>get(Types.UUIDType.get(), 100).hash(uuid.value()));
ByteBuffer bytes = ByteBuffer.wrap(new byte[] {0, 1, 2, 3});
Assert.assertEquals("Spec example: hash([00 01 02 03]) = -188683207",
-188683207, Bucket.<ByteBuffer>get(Types.BinaryType.get(), 100).hash(bytes));
Assert.assertEquals("Spec example: hash([00 01 02 03]) = -188683207",
-188683207, Bucket.<ByteBuffer>get(Types.BinaryType.get(), 100).hash(bytes));
}
@Test
public void testInteger() {
int num = testRandom.nextInt();
ByteBuffer buffer = ByteBuffer.allocate(8);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.putLong((long) num);
Bucket<Integer> bucketFunc = Bucket.get(Types.IntegerType.get(), 100);
Assert.assertEquals("Integer hash should match hash of little-endian bytes",
hashBytes(buffer.array()), bucketFunc.hash(num));
}
@Test
public void testLong() {
long num = testRandom.nextLong();
ByteBuffer buffer = ByteBuffer.allocate(8);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.putLong(num);
Bucket<Long> bucketFunc = Bucket.get(Types.LongType.get(), 100);
Assert.assertEquals("Long hash should match hash of little-endian bytes",
hashBytes(buffer.array()), bucketFunc.hash(num));
}
@Test
public void testIntegerTypePromotion() {
Bucket<Integer> bucketInts = Bucket.get(Types.IntegerType.get(), 100);
Bucket<Long> bucketLongs = Bucket.get(Types.LongType.get(), 100);
int r = testRandom.nextInt();
Assert.assertEquals("Integer and Long bucket results should match",
bucketInts.apply(r), bucketLongs.apply((long) r));
}
@Test
public void testFloat() {
float num = testRandom.nextFloat();
ByteBuffer buffer = ByteBuffer.allocate(8);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.putDouble((double) num);
Bucket<Float> bucketFunc = new Bucket.BucketFloat(100);
Assert.assertEquals("Float hash should match hash of little-endian bytes",
hashBytes(buffer.array()), bucketFunc.hash(num));
}
@Test
public void testDouble() {
double num = testRandom.nextDouble();
ByteBuffer buffer = ByteBuffer.allocate(8);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.putDouble(num);
Bucket<Double> bucketFunc = new Bucket.BucketDouble(100);
Assert.assertEquals("Double hash should match hash of little-endian bytes",
hashBytes(buffer.array()), bucketFunc.hash(num));
}
@Test
public void testFloatTypePromotion() {
Bucket<Float> bucketFloats = new Bucket.BucketFloat(100);
Bucket<Double> bucketDoubles = new Bucket.BucketDouble(100);
float r = testRandom.nextFloat();
Assert.assertEquals("Float and Double bucket results should match",
bucketFloats.apply(r), bucketDoubles.apply((double) r));
}
@Test
public void testDecimal() {
double num = testRandom.nextDouble();
BigDecimal decimal = BigDecimal.valueOf(num);
byte[] unscaledBytes = decimal.unscaledValue().toByteArray();
Bucket<BigDecimal> bucketFunc = Bucket.get(Types.DecimalType.of(9, 2), 100);
Assert.assertEquals("Decimal hash should match hash of backing bytes",
hashBytes(unscaledBytes), bucketFunc.hash(decimal));
}
@Test
public void testString() {
String string = "string to test murmur3 hash";
byte[] asBytes = string.getBytes(Charsets.UTF_8);
Bucket<CharSequence> bucketFunc = Bucket.get(Types.StringType.get(), 100);
Assert.assertEquals("String hash should match hash of UTF-8 bytes",
hashBytes(asBytes), bucketFunc.hash(string));
}
@Test
public void testUtf8() {
Utf8 utf8 = new Utf8("string to test murmur3 hash");
byte[] asBytes = utf8.toString().getBytes(Charsets.UTF_8);
Bucket<CharSequence> bucketFunc = Bucket.get(Types.StringType.get(), 100);
Assert.assertEquals("String hash should match hash of UTF-8 bytes",
hashBytes(asBytes), bucketFunc.hash(utf8));
}
@Test
public void testByteBufferOnHeap() {
byte[] bytes = randomBytes(128);
ByteBuffer buffer = ByteBuffer.wrap(bytes, 5, 100);
Bucket<ByteBuffer> bucketFunc = Bucket.get(Types.BinaryType.get(), 100);
Assert.assertEquals(
"HeapByteBuffer hash should match hash for correct slice",
hashBytes(bytes, 5, 100), bucketFunc.hash(buffer));
// verify that the buffer was not modified
Assert.assertEquals("Buffer position should not change", 5, buffer.position());
Assert.assertEquals("Buffer limit should not change", 105, buffer.limit());
}
@Test
public void testByteBufferOffHeap() {
byte[] bytes = randomBytes(128);
ByteBuffer buffer = ByteBuffer.allocateDirect(128);
// copy to the middle of the off-heap buffer
buffer.position(5);
buffer.limit(105);
buffer.mark();
buffer.put(bytes, 5, 100);
buffer.reset();
Bucket<ByteBuffer> bucketFunc = Bucket.get(Types.BinaryType.get(), 100);
Assert.assertEquals(
"DirectByteBuffer hash should match hash for correct slice",
hashBytes(bytes, 5, 100), bucketFunc.hash(buffer));
// verify that the buffer was not modified
Assert.assertEquals("Buffer position should not change", 5, buffer.position());
Assert.assertEquals("Buffer limit should not change", 105, buffer.limit());
}
@Test
public void testUUIDHash() {
byte[] uuidBytes = randomBytes(16);
UUID uuid = newUUID(uuidBytes);
Bucket<UUID> bucketFunc = Bucket.get(Types.UUIDType.get(), 100);
Assert.assertEquals("UUID hash should match hash of backing bytes",
hashBytes(uuidBytes), bucketFunc.hash(uuid));
}
private byte[] randomBytes(int length) {
byte[] bytes = new byte[length];
testRandom.nextBytes(bytes);
return bytes;
}
private int hashBytes(byte[] bytes) {
return hashBytes(bytes, 0, bytes.length);
}
private int hashBytes(byte[] bytes, int offset, int length) {
return MURMUR3.hashBytes(bytes, offset, length).asInt();
}
/**
* This method returns a UUID for the bytes in the array without modification.
* @param bytes a 16-byte array
* @return a UUID for the bytes
*/
private static UUID newUUID(byte[] bytes) {
try {
return uuidBytesConstructor.newInstance((Object) bytes);
} catch (InstantiationException | IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
}
}
| 6,397 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestTruncate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.transforms;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
public class TestTruncate {
@Test
public void testTruncateInteger() {
Truncate<Integer> trunc = Truncate.get(Types.IntegerType.get(), 10);
Assert.assertEquals(0, (int) trunc.apply(0));
Assert.assertEquals(0, (int) trunc.apply(1));
Assert.assertEquals(0, (int) trunc.apply(5));
Assert.assertEquals(0, (int) trunc.apply(9));
Assert.assertEquals(10, (int) trunc.apply(10));
Assert.assertEquals(10, (int) trunc.apply(11));
Assert.assertEquals(-10, (int) trunc.apply(-1));
Assert.assertEquals(-10, (int) trunc.apply(-5));
Assert.assertEquals(-10, (int) trunc.apply(-10));
Assert.assertEquals(-20, (int) trunc.apply(-11));
}
@Test
public void testTruncateLong() {
Truncate<Long> trunc = Truncate.get(Types.LongType.get(), 10);
Assert.assertEquals(0L, (long) trunc.apply(0L));
Assert.assertEquals(0L, (long) trunc.apply(1L));
Assert.assertEquals(0L, (long) trunc.apply(5L));
Assert.assertEquals(0L, (long) trunc.apply(9L));
Assert.assertEquals(10L, (long) trunc.apply(10L));
Assert.assertEquals(10L, (long) trunc.apply(11L));
Assert.assertEquals(-10L, (long) trunc.apply(-1L));
Assert.assertEquals(-10L, (long) trunc.apply(-5L));
Assert.assertEquals(-10L, (long) trunc.apply(-10L));
Assert.assertEquals(-20L, (long) trunc.apply(-11L));
}
@Test
public void testTruncateDecimal() {
// decimal truncation works by applying the decimal scale to the width: 10 scale 2 = 0.10
Truncate<BigDecimal> trunc = Truncate.get(Types.DecimalType.of(9, 2), 10);
Assert.assertEquals(new BigDecimal("12.30"), trunc.apply(new BigDecimal("12.34")));
Assert.assertEquals(new BigDecimal("12.30"), trunc.apply(new BigDecimal("12.30")));
Assert.assertEquals(new BigDecimal("12.20"), trunc.apply(new BigDecimal("12.29")));
Assert.assertEquals(new BigDecimal("0.00"), trunc.apply(new BigDecimal("0.05")));
Assert.assertEquals(new BigDecimal("-0.10"), trunc.apply(new BigDecimal("-0.05")));
}
@Test
public void testTruncateString() {
Truncate<String> trunc = Truncate.get(Types.StringType.get(), 5);
Assert.assertEquals("Should truncate strings longer than length",
"abcde", trunc.apply("abcdefg"));
Assert.assertEquals("Should not pad strings shorter than length",
"abc", trunc.apply("abc"));
}
@Test
public void testTruncateByteBuffer() throws Exception {
Truncate<ByteBuffer> trunc = Truncate.get(Types.BinaryType.get(), 4);
Assert.assertEquals("Should truncate binary longer than length",
ByteBuffer.wrap("abcd".getBytes("UTF-8")),
trunc.apply(ByteBuffer.wrap("abcdefg".getBytes("UTF-8"))));
Assert.assertEquals("Should not pad binary shorter than length",
ByteBuffer.wrap("abc".getBytes("UTF-8")),
trunc.apply(ByteBuffer.wrap("abc".getBytes("UTF-8"))));
}
}
| 6,398 |
0 | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg | Create_ds/iceberg/api/src/test/java/com/netflix/iceberg/transforms/TestTimestamps.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.transforms;
import com.netflix.iceberg.expressions.Literal;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
public class TestTimestamps {
@Test
public void testTimestampWithoutZoneToHumanString() {
Types.TimestampType type = Types.TimestampType.withoutZone();
Literal<Integer> date = Literal.of("2017-12-01T10:12:55.038194").to(type);
Transform<Integer, Integer> year = Transforms.year(type);
Assert.assertEquals("Should produce the correct Human string",
"2017", year.toHumanString(year.apply(date.value())));
Transform<Integer, Integer> month = Transforms.month(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12", month.toHumanString(month.apply(date.value())));
Transform<Integer, Integer> day = Transforms.day(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12-01", day.toHumanString(day.apply(date.value())));
Transform<Integer, Integer> hour = Transforms.hour(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12-01-10", hour.toHumanString(hour.apply(date.value())));
}
@Test
public void testTimestampWithZoneToHumanString() {
Types.TimestampType type = Types.TimestampType.withZone();
Literal<Integer> date = Literal.of("2017-12-01T10:12:55.038194-08:00").to(type);
Transform<Integer, Integer> year = Transforms.year(type);
Assert.assertEquals("Should produce the correct Human string",
"2017", year.toHumanString(year.apply(date.value())));
Transform<Integer, Integer> month = Transforms.month(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12", month.toHumanString(month.apply(date.value())));
Transform<Integer, Integer> day = Transforms.day(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12-01", day.toHumanString(day.apply(date.value())));
// the hour is 18 because the value is always UTC
Transform<Integer, Integer> hour = Transforms.hour(type);
Assert.assertEquals("Should produce the correct Human string",
"2017-12-01-18", hour.toHumanString(hour.apply(date.value())));
}
@Test
public void testNullHumanString() {
Types.TimestampType type = Types.TimestampType.withZone();
Assert.assertEquals("Should produce \"null\" for null",
"null", Transforms.year(type).toHumanString(null));
Assert.assertEquals("Should produce \"null\" for null",
"null", Transforms.month(type).toHumanString(null));
Assert.assertEquals("Should produce \"null\" for null",
"null", Transforms.day(type).toHumanString(null));
Assert.assertEquals("Should produce \"null\" for null",
"null", Transforms.hour(type).toHumanString(null));
}
}
| 6,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.